Ejemplo n.º 1
0
def list_pseudo(sym, name, tags):
    """
    List Gaussian Pseudopotentials
    """
    from aiida.orm.querybuilder import QueryBuilder

    from aiida_gaussian_datatypes.pseudopotential.data import Pseudopotential

    query = QueryBuilder()
    query.append(Pseudopotential)

    if sym:
        query.add_filter(Pseudopotential, {'attributes.element': {'==': sym}})

    if name:
        query.add_filter(Pseudopotential,
                         {'attributes.aliases': {
                             'contains': [name]
                         }})

    if tags:
        query.add_filter(Pseudopotential,
                         {'attributes.tags': {
                             'contains': tags
                         }})

    if not query.count():
        echo.echo("No Gaussian Pseudopotentials found.")
        return

    echo.echo_report("{} Gaussian Pseudopotentials found:\n".format(
        query.count()))
    echo.echo(_formatted_table_list(pseudo for [pseudo] in query.iterall()))
    echo.echo("")
Ejemplo n.º 2
0
def _rehash_cmd(all, class_name, pks):
    try_load_dbenv()
    from aiida.orm.querybuilder import QueryBuilder

    # Get the Node class to match
    try:
        node_class = load_class(class_name)
    except ClassNotFoundException:
        click.echo("Could not load class '{}'.\nAborted!".format(class_name))
        sys.exit(1)

    # Add the filters for the class and PKs.
    qb = QueryBuilder()
    qb.append(node_class, tag='node')
    if pks:
        qb.add_filter('node', {'id': {'in': pks}})
    else:
        if not all:
            click.echo(
                "Nothing specified, nothing re-hashed.\nExplicitly specify the PK of the nodes, or use '--all'."
            )
            return

    if not qb.count():
        click.echo('No matching nodes found.')
        return
    for i, (node, ) in enumerate(qb.iterall()):
        if i % 100 == 0:
            click.echo('.', nl=False)
        node.rehash()
    click.echo('\nAll done! {} node(s) re-hashed.'.format(i + 1))
Ejemplo n.º 3
0
def list_basisset(sym, name, tags):
    """
    List Gaussian Basis Sets
    """

    from aiida.orm.querybuilder import QueryBuilder

    from aiida_gaussian_datatypes.basisset.data import BasisSet

    query = QueryBuilder()
    query.append(BasisSet)

    if sym:
        query.add_filter(BasisSet, {'attributes.element': {'==': sym}})

    if name:
        query.add_filter(BasisSet,
                         {'attributes.aliases': {
                             'contains': [name]
                         }})

    if tags:
        query.add_filter(BasisSet, {'attributes.tags': {'contains': tags}})

    if not query.count():
        echo.echo("No Gaussian Basis Sets found.")
        return

    echo.echo_report("{} Gaussian Basis Sets found:\n".format(query.count()))
    echo.echo(_formatted_table_list(bs for [bs] in query.iterall()))
    echo.echo("")
Ejemplo n.º 4
0
    def __init__(self):
        # Find all process labels
        qb = QueryBuilder()
        qb.append(WorkCalculation,
                  project="attributes._process_label",
                  filters={
                      'attributes': {'!has_key': 'source_code'}
                  }
        )
        qb.order_by({WorkCalculation:{'ctime':'desc'}})
        process_labels = []
        for i in qb.iterall():
            if i[0] not in process_labels:
                process_labels.append(i[0])

        layout = ipw.Layout(width="900px")

        self.mode = ipw.RadioButtons(options=['all', 'uploaded', 'edited', 'calculated'],
                                     layout=ipw.Layout(width="25%"))
        
        
        # Date range
        self.dt_now = datetime.datetime.now()
        self.dt_end = self.dt_now - datetime.timedelta(days=7)
        self.date_start = ipw.Text(value='',
                                   description='From: ',
                                   style={'description_width': '120px'})

        self.date_end = ipw.Text(value='',
                                 description='To: ')

        self.date_text = ipw.HTML(value='<p>Select the date range:</p>')
        
        self.btn_date = ipw.Button(description='Search',
                                   layout={'margin': '1em 0 0 0'})

        self.age_selection = ipw.VBox([self.date_text, ipw.HBox([self.date_start, self.date_end]), self.btn_date],
                                      layout={'border': '1px solid #fafafa', 'padding': '1em'})


        # Labels
        self.drop_label = ipw.Dropdown(options=(['All',] + process_labels),
                                       description='Process Label',
                                       style = {'description_width': '120px'},
                                       layout={'width': '50%'})

        self.btn_date.on_click(self.search)
        self.mode.observe(self.search, names='value')
        self.drop_label.observe(self.search, names='value')
        
        hr = ipw.HTML('<hr>')
        box = ipw.VBox([self.age_selection,
                        hr,
                        ipw.HBox([self.mode, self.drop_label])])
        
        self.results = ipw.Dropdown(layout=layout)
        self.search()
        super(StructureBrowser, self).__init__([box, hr, self.results])
Ejemplo n.º 5
0
    def get(cls,
            element,
            name=None,
            version="latest",
            match_aliases=True,
            group_label=None,
            n_el=None):
        from aiida.orm.querybuilder import QueryBuilder

        query = QueryBuilder()

        params = {}

        if group_label:
            query.append(Group, filters={"label": group_label}, tag="group")
            params["with_group"] = "group"

        query.append(BasisSet, **params)

        filters = {"attributes.element": {"==": element}}

        if version != "latest":
            filters["attributes.version"] = {"==": version}

        if name:
            if match_aliases:
                filters["attributes.aliases"] = {"contains": [name]}
            else:
                filters["attributes.name"] = {"==": name}

        if n_el:
            filters["attributes.n_el"] = {"==": n_el}

        query.add_filter(BasisSet, filters)

        # SQLA ORM only solution:
        # query.order_by({BasisSet: [{"attributes.version": {"cast": "i", "order": "desc"}}]})
        # items = query.first()

        items = sorted(query.iterall(),
                       key=lambda b: b[0].version,
                       reverse=True)

        if not items:
            raise NotExistent(
                f"No Gaussian Basis Set found for element={element}, name={name}, version={version}"
            )

        # if we get different names there is no well ordering, sorting by version only works if they have the same name
        if len(set(b[0].name for b in items)) > 1:
            raise MultipleObjectsError(
                f"Multiple Gaussian Basis Set found for element={element}, name={name}, version={version}"
            )

        return items[0][0]
Ejemplo n.º 6
0
def dump_pseudo(sym, name, tags, output_format, data):
    """
    Print specified Pseudopotentials
    """

    from aiida.orm.querybuilder import QueryBuilder

    from aiida_gaussian_datatypes.pseudopotential.data import Pseudopotential

    writers = {
        "cp2k": Pseudopotential.to_cp2k,
    }

    if data:
        # if explicit nodes where given the only thing left is to make sure no filters are present
        if sym or name or tags:
            raise click.UsageError(
                "can not specify node IDs and filters at the same time")
    else:
        query = QueryBuilder()
        query.append(Pseudopotential, project=["*"])

        if sym:
            query.add_filter(Pseudopotential,
                             {"attributes.element": {
                                 "==": sym
                             }})

        if name:
            query.add_filter(Pseudopotential,
                             {"attributes.aliases": {
                                 "contains": [name]
                             }})

        if tags:
            query.add_filter(Pseudopotential,
                             {"attributes.tags": {
                                 "contains": tags
                             }})

        if not query.count():
            echo.echo_warning("No Gaussian Pseudopotential found.",
                              err=echo.is_stdout_redirected())
            return

        data = [pseudo for pseudo, in query.iterall()
                ]  # query always returns a tuple, unpack it here

    for pseudo in data:
        if echo.is_stdout_redirected():
            echo.echo_report("Dumping {}/{} ({})...".format(
                pseudo.name, pseudo.element, pseudo.uuid),
                             err=True)

        writers[output_format](pseudo, sys.stdout)
Ejemplo n.º 7
0
    def add_origins_to_targets(
        self,
        origin_cls,
        target_cls,
        origin_filters=None,
        target_filters=None,
        include_target_inputs=False,
        include_target_outputs=False,
        origin_style=(),
        annotate_links=False
    ):
        """Add nodes and edges from all nodes of an origin class to all node of a target node class.

        :param origin_cls: origin node class
        :param target_cls: target node class
        :param origin_filters:  (Default value = None)
        :type origin_filters: dict or None
        :param target_filters:  (Default value = None)
        :type target_filters: dict or None
        :param include_target_inputs:  (Default value = False)
        :type include_target_inputs: bool
        :param include_target_outputs:  (Default value = False)
        :type include_target_outputs: bool
        :param origin_style: node style map for origin node (Default value = ())
        :type origin_style: dict or tuple
        :param annotate_links: label edges with the link 'label', 'type' or 'both' (Default value = False)
        :type annotate_links: bool

        """
        # pylint: disable=too-many-arguments
        if origin_filters is None:
            origin_filters = {}

        query = QueryBuilder(
            **{'path': [{
                'cls': origin_cls,
                'filters': origin_filters,
                'tag': 'origin',
                'project': '*'
            }]}
        )

        for (node,) in query.iterall():
            self.add_origin_to_targets(
                node,
                target_cls,
                target_filters=target_filters,
                include_target_inputs=include_target_inputs,
                include_target_outputs=include_target_outputs,
                origin_style=origin_style,
                annotate_links=annotate_links
            )
Ejemplo n.º 8
0
def dump_basisset(sym, name, tags, output_format, data):
    """
    Print specified Basis Sets
    """

    from aiida.orm.querybuilder import QueryBuilder

    from aiida_gaussian_datatypes.basisset.data import BasisSet

    writers = {
        "cp2k": BasisSet.to_cp2k,
    }

    if data:
        # if explicit nodes where given the only thing left is to make sure no filters are present
        if sym or name or tags:
            raise click.UsageError(
                "can not specify node IDs and filters at the same time")
    else:
        query = QueryBuilder()
        query.append(BasisSet, project=['*'])

        if sym:
            query.add_filter(BasisSet, {'attributes.element': {'==': sym}})

        if name:
            query.add_filter(BasisSet,
                             {'attributes.aliases': {
                                 'contains': [name]
                             }})

        if tags:
            query.add_filter(BasisSet, {'attributes.tags': {'contains': tags}})

        if not query.count():
            echo.echo_warning("No Gaussian Basis Sets found.",
                              err=echo.is_stdout_redirected())
            return

        data = [bset for bset, in query.iterall()
                ]  # query always returns a tuple, unpack it here

    for bset in data:
        if echo.is_stdout_redirected():
            echo.echo_report("Dumping {}/{} ({})...".format(
                bset.name, bset.element, bset.uuid),
                             err=True)

        writers[output_format](bset, sys.stdout)
Ejemplo n.º 9
0
    def search(self, c=None):
        try:  # If the date range is valid, use it for the search
            self.start_date = datetime.datetime.strptime(
                self.date_start.value, '%Y-%m-%d')
            self.end_date = datetime.datetime.strptime(
                self.date_end.value, '%Y-%m-%d') + datetime.timedelta(hours=24)
        except ValueError:  # Otherwise revert to the standard (i.e. last 7 days)
            self.start_date = self.dt_end
            self.end_date = self.dt_now + datetime.timedelta(hours=24)

            self.date_start.value = self.start_date.strftime('%Y-%m-%d')
            self.date_end.value = self.end_date.strftime('%Y-%m-%d')

        qb = QueryBuilder()
        qb.append(Node,
                  filters={
                      'type': 'data.folder.FolderData.',
                      'ctime': {
                          'and': [{
                              '<=': self.end_date
                          }, {
                              '>': self.start_date
                          }]
                      }
                  },
                  tag='output')
        qb.append(Node, filters={'label': 'phonons_opt'}, input_of='output')

        matches = set([n[0] for n in qb.iterall()])
        matches = sorted(matches, reverse=True, key=lambda n: n.ctime)

        c = len(matches)
        options = OrderedDict()
        options["Select a Structure"] = 'False'

        for n in matches:
            file_path = n.out.retrieved.folder.abspath + "/path/aiida-VIBRATIONS-1.mol"
            if os.path.isfile(file_path):
                label = "PK: %d" % n.pk
                label += " | " + n.ctime.strftime("%Y-%m-%d %H:%M")
                options[label] = n

        self.results.options = options
Ejemplo n.º 10
0
#This code works just in this case and in this moment  in time when
#I don't have any other calculation in the database.
#I will be more systematic with labels, description and extras in the future
#in order to make the querying easier.

from aiida.orm.querybuilder import QueryBuilder
from aiida.orm.calculation.work import WorkCalculation
from common_wf import fit_birch_murnaghan_params, birch_murnaghan

q = QueryBuilder()
q.append(WorkCalculation,
         tag="wc",
         filters={'attributes._process_label': 'EquationOfStatesHe'})
q.add_projection('wc', "id")

for i in q.iterall():
    p = load_node(i[0])
    if i[0] == 507:
        print i[0]
    else:
        print i[0], p.inp.pseudo_path  #, p.out.result.get_attr("eos_data")
        k = 0
        for i in p.out.initial_structure.sites:
            k = k + 1
        vol = []
        en = []
        for s in range(5):
            # print float(p.out.result.get_attr("eos_data")[s][0]/k), float(p.out.result.get_attr("eos_data")[s][1]/k)
            vol.append(float(p.out.result.get_attr("eos_data")[s][0]) / k)
            en.append(float(p.out.result.get_attr("eos_data")[s][1]) / k)
        #print vol, en
Ejemplo n.º 11
0
    def search(self, c=None):
        self.preprocess()

        qb = QueryBuilder()
        try:  # If the date range is valid, use it for the search
            self.start_date = datetime.datetime.strptime(
                self.date_start.value, '%Y-%m-%d')
            self.end_date = datetime.datetime.strptime(
                self.date_end.value, '%Y-%m-%d') + datetime.timedelta(hours=24)
        except ValueError:  # Otherwise revert to the standard (i.e. last 7 days)
            self.start_date = self.dt_end
            self.end_date = self.dt_now + datetime.timedelta(hours=24)

            self.date_start.value = self.start_date.strftime('%Y-%m-%d')
            self.date_end.value = self.end_date.strftime('%Y-%m-%d')

        filters = {}
        filters['ctime'] = {
            'and': [{
                '<=': self.end_date
            }, {
                '>': self.start_date
            }]
        }
        if self.drop_label.value != 'All':
            qb.append(
                WorkCalculation,
                filters={'attributes._process_label': self.drop_label.value})

            qb.append(JobCalculation, output_of=WorkCalculation)

            qb.append(StructureData, output_of=JobCalculation, filters=filters)
        else:
            if self.mode.value == "uploaded":
                qb2 = QueryBuilder()
                qb2.append(StructureData, project=["id"])
                qb2.append(Node, input_of=StructureData)
                processed_nodes = [n[0] for n in qb2.all()]
                if processed_nodes:
                    filters['id'] = {"!in": processed_nodes}
                qb.append(StructureData, filters=filters)

            elif self.mode.value == "calculated":
                qb.append(JobCalculation)
                qb.append(StructureData,
                          output_of=JobCalculation,
                          filters=filters)

            elif self.mode.value == "edited":
                qb.append(WorkCalculation)
                qb.append(StructureData,
                          output_of=WorkCalculation,
                          filters=filters)

            else:
                self.mode.value == "all"
                qb.append(StructureData, filters=filters)

        qb.order_by({StructureData: {'ctime': 'desc'}})
        matches = set([n[0] for n in qb.iterall()])
        matches = sorted(matches, reverse=True, key=lambda n: n.ctime)

        c = len(matches)
        options = OrderedDict()
        options["Select a Structure (%d found)" % c] = False

        for n in matches:
            label = "PK: %d" % n.pk
            label += " | " + n.ctime.strftime("%Y-%m-%d %H:%M")
            label += " | " + n.get_extra("formula")
            label += " | " + n.description
            options[label] = n

        self.results.options = options
Ejemplo n.º 12
0
ParameterData = DataFactory("parameter")
#PwCalculation= DataFactory("calculation")

qb=QueryBuilder()



qb.append(ParameterData,
	project=["attributes.step0", "attributes.steps"],
	filters={"id":{"==":5615}}
	)

ok=0
a=qb.count()
file=open(path+"results-parabola-dict", 'w')



for i in qb.iterall():
	file.write("{}\n\n{}\n\n{}\n\n{}".format(i[0],i[1][0],i[1][1],i[1][2]))
	ok+=1
	print i[0]['dE']
	print len(i[0])

file.close()

new_dict={'0': i[0], '1': i[1][0], '2': i[1][1],'3' :i[1][2]}

#print new_dict
print
Ejemplo n.º 13
0
    def get(cls,
            element,
            name=None,
            version="latest",
            match_aliases=True,
            group_label=None,
            n_el=None):
        """
        Get the first matching Pseudopotential for the given parameters.

        :param element: The atomic symbol
        :param name: The name of the pseudo
        :param version: A specific version (if more than one in the database and not the highest/latest)
        :param match_aliases: Whether to look in the list of of aliases for a matching name
        """
        from aiida.orm.querybuilder import QueryBuilder

        query = QueryBuilder()

        params = {}

        if group_label:
            query.append(Group, filters={"label": group_label}, tag="group")
            params["with_group"] = "group"

        query.append(Pseudopotential, **params)

        filters = {"attributes.element": {"==": element}}

        if version != "latest":
            filters["attributes.version"] = {"==": version}

        if name:
            if match_aliases:
                filters["attributes.aliases"] = {"contains": [name]}
            else:
                filters["attributes.name"] = {"==": name}

        query.add_filter(Pseudopotential, filters)

        # SQLA ORM only solution:
        # query.order_by({Pseudopotential: [{"attributes.version": {"cast": "i", "order": "desc"}}]})
        # items = query.first()

        all_iter = query.iterall()

        if n_el:
            all_iter = filter(lambda p: sum(p[0].n_el) == n_el, all_iter)

        items = sorted(all_iter, key=lambda p: p[0].version, reverse=True)

        if not items:
            raise NotExistent(
                f"No Gaussian Pseudopotential found for element={element}, name={name}, version={version}"
            )

        # if we get different names there is no well ordering, sorting by version only works if they have the same name
        if len(set(p[0].name for p in items)) > 1:
            raise MultipleObjectsError(
                f"Multiple Gaussian Pseudopotentials found for element={element}, name={name}, version={version}"
            )

        return items[0][0]
Ejemplo n.º 14
0
#for queries examplefrom tutorial 

from aiida.orm.querybuilder import QueryBuilder

qb=QueryBuilder()
qb.all()
qb.append(Node)
qb.all()
qb.count()

#enumerate the <pk> for each query key
for node, in qb.iterall():
	print node

#may need this line
StructureData = DataFactory("structure")
qb=QueryBuilder()
qb.append(StructureData)	#met le pk pour chaque structure si on met qb.all()
qb.all()

Ejemplo n.º 15
0
    def code_list(self, *args):
        """
        List available codes
        """
        import argparse

        parser = argparse.ArgumentParser(
            prog=self.get_full_command_name(),
            description='List the codes in the database.')
        # The default states are those that are shown if no option is given
        parser.add_argument(
            '-c',
            '--computer',
            help="Filter only codes on a given computer",
        )
        parser.add_argument(
            '-p',
            '--plugin',
            help="Filter only calculation with a given plugin",
        )
        parser.add_argument(
            '-A',
            '--all-users',
            dest='all_users',
            action='store_true',
            help="Show codes of all users",
        )
        parser.add_argument(
            '-o',
            '--show-owner',
            dest='show_owner',
            action='store_true',
            help="Show also the owner of the code",
        )
        parser.add_argument(
            '-a',
            '--all-codes',
            action='store_true',
            help="Show also hidden codes",
        )
        parser.set_defaults(all_users=False, hidden=False)
        parsed_args = parser.parse_args(args)
        computer_filter = parsed_args.computer
        plugin_filter = parsed_args.plugin
        all_users = parsed_args.all_users
        show_owner = parsed_args.show_owner
        reveal_filter = parsed_args.all_codes

        from aiida.orm.querybuilder import QueryBuilder
        from aiida.orm.code import Code
        from aiida.orm.computer import Computer
        from aiida.orm.user import User
        from aiida.backends.utils import get_automatic_user

        qb_user_filters = dict()
        if not all_users:
            user = User(dbuser=get_automatic_user())
            qb_user_filters['email'] = user.email

        qb_computer_filters = dict()
        if computer_filter is not None:
            qb_computer_filters['name'] = computer_filter

        qb_code_filters = dict()
        if plugin_filter is not None:
            qb_code_filters['attributes.input_plugin'] = plugin_filter

        if not reveal_filter:
            qb_code_filters['attributes.hidden'] = {"~==": True}

        qb = QueryBuilder()
        qb.append(Code,
                  tag="code",
                  filters=qb_code_filters,
                  project=["id", "label"])
        qb.append(Computer,
                  computer_of="code",
                  project=["name"],
                  filters=qb_computer_filters)
        qb.append(User,
                  creator_of="code",
                  project=["email"],
                  filters=qb_user_filters)

        print "# List of configured codes:"
        print "# (use 'verdi code show CODEID' to see the details)"
        if qb.count > 0:
            for pk, label, computername, useremail in qb.iterall():
                if show_owner:
                    owner_string = " ({})".format(useremail)
                else:
                    owner_string = ""
                if computername is None:
                    computernamestring = ""
                else:
                    computernamestring = "@{}".format(computername)
                print "* pk {} - {}{}{}".format(pk, label, computernamestring,
                                                owner_string)
        else:
            print "# No codes found matching the specified criteria."
Ejemplo n.º 16
0
    def computers(self, info) -> typing.List[Computer]:
        # requested_fields = [s.name.value for s in info.field_nodes[0].selection_set.selections]

        q = QueryBuilder()
        q.append(orm.Computer, project=["*"])
        return [Computer.from_orm(entry) for entry in q.iterall()]
Ejemplo n.º 17
0
# 	Pour etablir des liens entre etats
qb1.append(RemoteData, tag="remote", project=["*"])
qb1.append(Group, group_of="remote")

qb2.append(RemoteData, project=["*"])

qb3.append(Group)


# qb.append(ParameterData, project=["attributes.energy_smearing"]) #, filters=)
# qb.append(ParameterData, project=["attributes.element"])

f1 = open(path + "remoteData_Group", "w")
f2 = open(path + "remoteData", "w")
f3 = open(path + "Group", "w")

for i in qb1.iterall():
    f1.write(str(i) + "\n")

for j in qb2.iterall():
    f2.write(str(j) + "\n")

for k in qb3.iterall():
    f3.write(str(k) + "\n")


f1.close()
f2.close()
f3.close()
Ejemplo n.º 18
0
    def add_origin_to_targets(
        self,
        origin,
        target_cls,
        target_filters=None,
        include_target_inputs=False,
        include_target_outputs=False,
        origin_style=(),
        annotate_links=False
    ):
        """Add nodes and edges from an origin node to all nodes of a target node class.

        :param origin: node or node pk/uuid
        :type origin: aiida.orm.nodes.node.Node or int
        :param target_cls: target node class
        :param target_filters:  (Default value = None)
        :type target_filters: dict or None
        :param include_target_inputs:  (Default value = False)
        :type include_target_inputs: bool
        :param include_target_outputs:  (Default value = False)
        :type include_target_outputs: bool
        :param origin_style: node style map for origin node (Default value = ())
        :type origin_style: dict or tuple
        :param annotate_links: label edges with the link 'label', 'type' or 'both' (Default value = False)
        :type annotate_links: bool

        """
        # pylint: disable=too-many-arguments
        origin_node = self._load_node(origin)

        if target_filters is None:
            target_filters = {}

        self.add_node(origin_node, style_override=dict(origin_style))

        query = QueryBuilder(
            **{
                'path': [{
                    'cls': origin_node.__class__,
                    'filters': {
                        'id': origin_node.pk
                    },
                    'tag': 'origin'
                }, {
                    'cls': target_cls,
                    'filters': target_filters,
                    'with_ancestors': 'origin',
                    'tag': 'target',
                    'project': '*'
                }]
            }
        )

        for (target_node,) in query.iterall():
            self.add_node(target_node)
            self.add_edge(origin_node, target_node, style={'style': 'dashed', 'color': 'grey'})

            if include_target_inputs:
                self.add_incoming(target_node, annotate_links=annotate_links)

            if include_target_outputs:
                self.add_outgoing(target_node, annotate_links=annotate_links)
Ejemplo n.º 19
0
def draw_graph(origin_node,
               ancestor_depth=None,
               descendant_depth=None,
               format='dot',
               include_calculation_inputs=False,
               include_calculation_outputs=False):
    """
    The algorithm starts from the original node and goes both input-ward and output-ward via a breadth-first algorithm.

    :param origin_node: An Aiida node, the starting point for drawing the graph
    :param int ancestor_depth: The maximum depth of the ancestors drawn. If left to None, we recurse until the graph is fully explored
    :param int descendant_depth: The maximum depth of the descendants drawn. If left to None, we recurse until the graph is fully explored
    :param str format: The format, by default dot

    :returns: The exit_status of the os.system call that produced the valid file
    :returns: The file name of the final output

    ..note::
        If an invalid format is provided graphviz prints a helpful message, so this doesn't need to be implemented here.
    """
    #
    # until the connected part of the graph that contains the root_pk is fully explored.
    # TODO this command deserves to be improved, with options and further subcommands

    from aiida.orm.calculation import Calculation
    from aiida.orm.calculation.job import JobCalculation
    from aiida.orm.code import Code
    from aiida.orm.node import Node
    from aiida.common.links import LinkType
    from aiida.orm.querybuilder import QueryBuilder
    from aiida.orm.data.structure import StructureData
    from aiida.orm.data.parameter import ParameterData

    def draw_node_settings(node, **kwargs):
        """
        Returns a string with all infos needed in a .dot file  to define a node of a graph.
        :param node:
        :param kwargs: Additional key-value pairs to be added to the returned string
        :return: a string
        """
        if kwargs:
            additional_params = ",{}".format(",".join(
                '{}="{}"'.format(k, v) for k, v in kwargs.iteritems()))
        else:
            additional_params = ""

        if isinstance(node, Calculation):
            shape = "shape=polygon,sides=4"
            additional_params = additional_params + ', style="filled", fillcolor="sandybrown"'
        elif isinstance(node, Code):
            shape = "shape=diamond"
            additional_params = additional_params + ', style="filled", fillcolor="seagreen3"'
        else:
            shape = "shape=ellipse"
            if isinstance(node, StructureData):
                additional_params = additional_params + ', style="filled", fillcolor="skyblue"'
            if isinstance(node, ParameterData):
                additional_params = additional_params + ', style="filled", fillcolor="#fcd975"'

        if node.label:
            label_string = "\n'{}'".format(node.label)
            additional_string = ""
        else:
            additional_string = "\n {}".format(node.get_desc())
            label_string = ""
        labelstring = 'label=<<B>{} ({})<BR/>{}{}</B>>'.format(
            node.__class__.__name__, node.pk, label_string, additional_string)
        return "N{} [{},{}{}];".format(node.pk, shape, labelstring,
                                       additional_params)

    def draw_link_settings(inp_id, out_id, link_label, link_type):
        if link_type in (LinkType.CREATE.value, LinkType.INPUT.value):
            style = 'solid'  # Solid lines and black colors
            color = "0.0 0.0 0.0"  # for CREATE and INPUT (The provenance graph)
        elif link_type == LinkType.RETURN.value:
            style = 'dotted'  # Dotted  lines of
            color = "0.0 0.0 0.0"  # black color for Returns
        elif link_type == LinkType.CALL.value:
            style = 'bold'  # Bold lines and
            color = "0.0 1.0 1.0"  # Bright red for calls
        else:
            style = 'solid'  # Solid and
            color = "0.0 0.0 0.5"  #grey lines for unspecified links!
        return '    {} -> {} [label=<<B>{}</B>>, color="{}", style="{}", penwidth=3];'.format(
            "N{}".format(inp_id), "N{}".format(out_id), link_label, color,
            style)

    # Breadth-first search of all ancestors and descendant nodes of a given node
    links = {}  # Accumulate links here , penwidth=5
    nodes = {
        origin_node.pk:
        draw_node_settings(origin_node,
                           style='filled',
                           fillcolor='lightblue',
                           penwidth=3)
    }  #Accumulate nodes specs here
    # Additional nodes (the ones added with either one of  include_calculation_inputs or include_calculation_outputs
    # is set to true. I have to put them in a different dictionary because nodes is the one used for the recursion,
    # whereas these should not be used for the recursion:
    additional_nodes = {}

    last_nodes = [origin_node
                  ]  # Put the nodes whose links have not been scanned yet

    # Go through the graph on-ward (i.e. look at inputs)
    depth = 0
    while last_nodes:
        # I augment depth every time I get through a new iteration
        depth += 1
        # I check whether I should stop here:
        if ancestor_depth is not None and depth > ancestor_depth:
            break
        # I continue by adding new nodes here!
        new_nodes = []
        for node in last_nodes:
            # This query gives me all the inputs of this node, and link labels and types!
            input_query = QueryBuilder()
            input_query.append(Node, filters={'id': node.pk}, tag='n')
            input_query.append(Node,
                               input_of='n',
                               edge_project=('id', 'label', 'type'),
                               project='*',
                               tag='inp')
            for inp, link_id, link_label, link_type in input_query.iterall():
                # I removed this check, to me there is no way that this link was already referred to!
                # if link_id not in links:
                links[link_id] = draw_link_settings(inp.pk, node.pk,
                                                    link_label, link_type)
                # For the nodes I need to check, maybe this same node is referred to multiple times.
                if inp.pk not in nodes:
                    nodes[inp.pk] = draw_node_settings(inp, penwidth=3)
                    new_nodes.append(inp)

            # Checking whether I also should include all the outputs of a calculation into the drawing:
            if include_calculation_outputs and isinstance(node, Calculation):
                # Query for the outputs, giving me also link labels and types:
                output_query = QueryBuilder()
                output_query.append(Node, filters={'id': node.pk}, tag='n')
                output_query.append(Node,
                                    output_of='n',
                                    edge_project=('id', 'label', 'type'),
                                    project='*',
                                    tag='out')
                # Iterate through results
                for out, link_id, link_label, link_type in output_query.iterall(
                ):
                    # This link might have been drawn already, because the output is maybe
                    # already drawn.
                    # To check: Maybe it's more efficient not to check this, since
                    # the dictionaries are large and contain many keys...
                    # I.e. just always draw, also when overwriting an existing (identical) entry.
                    if link_id not in links:
                        links[link_id] = draw_link_settings(
                            node.pk, out.pk, link_label, link_type)
                    if out.pk not in nodes and out.pk not in additional_nodes:
                        additional_nodes[out.pk] = draw_node_settings(
                            out, penwidth=3)

        last_nodes = new_nodes

    # Go through the graph down-ward (i.e. look at outputs)
    last_nodes = [origin_node]
    depth = 0
    while last_nodes:
        depth += 1
        # Also here, checking of maximum descendant depth is set and applies.
        if descendant_depth is not None and depth > descendant_depth:
            break
        new_nodes = []

        for node in last_nodes:
            # Query for the outputs:
            output_query = QueryBuilder()
            output_query.append(Node, filters={'id': node.pk}, tag='n')
            output_query.append(Node,
                                output_of='n',
                                edge_project=('id', 'label', 'type'),
                                project='*',
                                tag='out')

            for out, link_id, link_label, link_type in output_query.iterall():
                # Draw the link
                links[link_id] = draw_link_settings(node.pk, out.pk,
                                                    link_label, link_type)
                if out.pk not in nodes:
                    nodes[out.pk] = draw_node_settings(out, penwidth=3)
                    new_nodes.append(out)

            if include_calculation_inputs and isinstance(node, Calculation):
                input_query = QueryBuilder()
                input_query.append(Node, filters={'id': node.pk}, tag='n')
                input_query.append(Node,
                                   input_of='n',
                                   edge_project=('id', 'label', 'type'),
                                   project='*',
                                   tag='inp')
                for inp, link_id, link_label, link_type in input_query.iterall(
                ):
                    # Also here, maybe it's just better not to check?
                    if link_id not in links:
                        links[link_id] = draw_link_settings(
                            inp.pk, node.pk, link_label, link_type)
                    if inp.pk not in nodes and inp.pk not in additional_nodes:
                        additional_nodes[inp.pk] = draw_node_settings(
                            inp, penwidth=3)
        last_nodes = new_nodes

    # Writing the graph to a temporary file
    fd, fname = tempfile.mkstemp(suffix='.dot')
    with open(fname, 'w') as fout:
        fout.write("digraph G {\n")
        for l_name, l_values in links.iteritems():
            fout.write('    {}\n'.format(l_values))
        for n_name, n_values in nodes.iteritems():
            fout.write("    {}\n".format(n_values))
        for n_name, n_values in additional_nodes.iteritems():
            fout.write("    {}\n".format(n_values))
        fout.write("}\n")

    # Now I am producing the output file
    output_file_name = "{0}.{format}".format(origin_node.pk, format=format)
    exit_status = os.system('dot -T{format} {0} -o {1}'.format(
        fname, output_file_name, format=format))
    # cleaning up by removing the temporary file
    os.remove(fname)
    return exit_status, output_file_name
Ejemplo n.º 20
0
 def nodes(self, info) -> typing.List[Node]:
     q = QueryBuilder()
     q.append(orm.Node, project=["*"])
     return [Node.from_orm(entry) for entry in q.iterall()]
Ejemplo n.º 21
0
 def calculations(self, info) -> typing.List[Calculation]:
     q = QueryBuilder()
     q.append(orm.Calculation, project=["*"])
     return [Calculation.from_orm(entry) for entry in q.iterall()]
Ejemplo n.º 22
0
def delete_nodes(pks,
                 follow_calls=False,
                 follow_returns=False,
                 dry_run=False,
                 force=False,
                 disable_checks=False,
                 verbosity=0):
    """
    Delete nodes by a list of pks

    :note: The script will also delete all children calculations generated from the specified nodes.

    :param pks: a list of the PKs of the nodes to delete
    :param bool follow_calls: Follow calls
    :param bool follow_returns:
        Follow returns. This is a very dangerous option, since anything returned by a workflow might have
        been used as input in many other calculations. Use with care, and never combine with force.
    :param bool dry_run: Do not delete, a dry run, with statistics printed according to verbosity levels.
    :param bool force: Do not ask for confirmation to delete nodes.
    :param bool disable_checks:
        If True, will not check whether calculations are losing created data or called instances.
        If checks are disabled, also logging is disabled.
    :param bool force: Do not ask for confirmation to delete nodes.
    :param int verbosity:
        The verbosity levels, 0 prints nothing, 1 prints just sums and total, 2 prints individual nodes.
    """

    from aiida.orm.querybuilder import QueryBuilder
    from aiida.common.links import LinkType
    from aiida.orm.node import Node
    from aiida.orm.calculation import Calculation
    from aiida.orm.data import Data
    from aiida.orm import load_node
    from aiida.orm.backend import construct_backend
    from aiida.backends.utils import delete_nodes_and_connections

    backend = construct_backend()
    user_email = backend.users.get_automatic_user().email

    if not pks:
        # If I was passed an empty list, I don't to anything
        # I prefer checking explicitly, an empty set might be problematic for the queries done below.
        if verbosity:
            print "Nothing to delete"
        return

    # The following code is just for the querying of downwards provenance.
    # Ideally, there should be a module to interface with, but this is the solution for now.
    # By only dealing with ids, and keeping track of what has been already
    # visited in the query, there's good performance and no infinite loops.
    link_types_to_follow = [LinkType.CREATE.value, LinkType.INPUT.value]
    if follow_calls:
        link_types_to_follow.append(LinkType.CALL.value)
    if follow_returns:
        link_types_to_follow.append(LinkType.RETURN.value)

    edge_filters = {'type': {'in': link_types_to_follow}}

    # Operational set always includes the recently (in the last iteration added) nodes.
    operational_set = set().union(set(pks))  # Union to copy the set!
    pks_set_to_delete = set().union(set(pks))
    while operational_set:
        # new_pks_set are the the pks of all nodes that are connected to the operational node set
        # with the links specified.
        new_pks_set = set([
            i for i, in QueryBuilder().
            append(Node, filters={
                'id': {
                    'in': operational_set
                }
            }).append(Node, project='id', edge_filters=edge_filters).iterall()
        ])
        # The operational set is only those pks that haven't been yet put into the pks_set_to_delete.
        operational_set = new_pks_set.difference(pks_set_to_delete)

        # I add these pks in the pks_set_to_delete with a union
        pks_set_to_delete = pks_set_to_delete.union(new_pks_set)

    if verbosity > 0:
        print "I {} delete {} node{}".format(
            'would' if dry_run else 'will', len(pks_set_to_delete),
            's' if len(pks_set_to_delete) > 1 else '')
        if verbosity > 1:
            qb = QueryBuilder().append(
                Node,
                filters={'id': {
                    'in': pks_set_to_delete
                }},
                project=('uuid', 'id', 'type', 'label'))
            print "The nodes I {} delete:".format(
                'would' if dry_run else 'will')
            for uuid, pk, type_string, label in qb.iterall():
                try:
                    short_type_string = type_string.split('.')[-2]
                except IndexError:
                    short_type_string = type_string
                print "   {} {} {} {}".format(uuid, pk, short_type_string,
                                              label)

    # Here I am checking whether I am deleting
    ## A data instance without also deleting the creator, which brakes relationship between a calculation and its data
    ## A calculation instance that was called, without also deleting the caller.

    if not disable_checks:
        called_qb = QueryBuilder()
        called_qb.append(Calculation,
                         filters={'id': {
                             '!in': pks_set_to_delete
                         }},
                         project='id')
        called_qb.append(Calculation,
                         project='type',
                         edge_project='label',
                         filters={'id': {
                             'in': pks_set_to_delete
                         }},
                         edge_filters={'type': {
                             '==': LinkType.CALL.value
                         }})
        caller_to_called2delete = called_qb.all()

        if verbosity > 0 and caller_to_called2delete:
            calculation_pks_losing_called = set(
                zip(*caller_to_called2delete)[0])
            print "\n{} calculation{} {} lose at least one called instance".format(
                len(calculation_pks_losing_called),
                's' if len(calculation_pks_losing_called) > 1 else '',
                'would' if dry_run else 'will')
            if verbosity > 1:
                print "These are the calculations that {} lose a called instance:".format(
                    'would' if dry_run else 'will')
                for calc_losing_called_pk in calculation_pks_losing_called:
                    print '  ', load_node(calc_losing_called_pk)

        created_qb = QueryBuilder()
        created_qb.append(Calculation,
                          filters={'id': {
                              '!in': pks_set_to_delete
                          }},
                          project='id')
        created_qb.append(Data,
                          project='type',
                          edge_project='label',
                          filters={'id': {
                              'in': pks_set_to_delete
                          }},
                          edge_filters={'type': {
                              '==': LinkType.CREATE.value
                          }})

        creator_to_created2delete = created_qb.all()
        if verbosity > 0 and creator_to_created2delete:
            calculation_pks_losing_created = set(
                zip(*creator_to_created2delete)[0])
            print "\n{} calculation{} {} lose at least one created data-instance".format(
                len(calculation_pks_losing_created),
                's' if len(calculation_pks_losing_created) > 1 else '',
                'would' if dry_run else 'will')
            if verbosity > 1:
                print "These are the calculations that {} lose a created data-instance:".format(
                    'would' if dry_run else 'will')
                for calc_losing_created_pk in calculation_pks_losing_created:
                    print '  ', load_node(calc_losing_created_pk)

    if dry_run:
        if verbosity > 0:
            print "\nThis was a dry run, exiting without deleting anything"
        return

    # Asking for user confirmation here
    if force:
        pass
    else:
        print "YOU ARE ABOUT TO DELETE {} NODES! THIS CANNOT BE UNDONE!".format(
            len(pks_set_to_delete))
        if raw_input("Shall I continue? [Y/N] ").lower() != 'y':
            print "Exiting without deleting"
            return

    # Recover the list of folders to delete before actually deleting
    # the nodes.  I will delete the folders only later, so that if
    # there is a problem during the deletion of the nodes in
    # the DB, I don't delete the folders
    folders = [load_node(_).folder for _ in pks_set_to_delete]

    delete_nodes_and_connections(pks_set_to_delete)

    if not disable_checks:
        # I pass now to the log the information for calculations losing created data or called instances
        for calc_pk, calc_type_string, link_label in caller_to_called2delete:
            calc = load_node(calc_pk)
            calc.logger.warning("User {} deleted "
                                "an instance of type {} "
                                "called with the label {} "
                                "by this calculation".format(
                                    user_email, calc_type_string, link_label))

        for calc_pk, data_type_string, link_label in creator_to_created2delete:
            calc = load_node(calc_pk)
            calc.logger.warning("User {} deleted "
                                "an instance of type {} "
                                "created with the label {} "
                                "by this calculation".format(
                                    user_email, data_type_string, link_label))

    # If we are here, we managed to delete the entries from the DB.
    # I can now delete the folders
    for f in folders:
        f.erase()
Ejemplo n.º 23
0
 def multiple(self, info) -> typing.List[data_class]:
     q = QueryBuilder()
     q.append(orm_class, project=["*"])
     return [data_class.from_orm(entry) for entry in q.iterall()]