示例#1
0
def node_show(nodes, print_groups):
    """Show generic information on one or more nodes."""
    from aiida.cmdline.utils.common import get_node_info

    for node in nodes:
        # pylint: disable=fixme
        # TODO: Add a check here on the node type, otherwise it might try to access
        # attributes such as code which are not necessarily there
        echo.echo(get_node_info(node))

        if print_groups:
            from aiida.orm.querybuilder import QueryBuilder
            from aiida.orm.groups import Group
            from aiida.orm import Node  # pylint: disable=redefined-outer-name

            # pylint: disable=invalid-name
            qb = QueryBuilder()
            qb.append(Node, tag='node', filters={'id': {'==': node.pk}})
            qb.append(Group,
                      tag='groups',
                      with_node='node',
                      project=['id', 'label', 'type_string'])

            echo.echo('#### GROUPS:')

            if qb.count() == 0:
                echo.echo(f'Node {node.pk} does not belong to any group')
            else:
                echo.echo(f'Node {node.pk} belongs to the following groups:')
                res = qb.iterdict()
                table = [(gr['groups']['id'], gr['groups']['label'],
                          gr['groups']['type_string']) for gr in res]
                table.sort()

                echo.echo(
                    tabulate.tabulate(table,
                                      headers=['PK', 'Label', 'Group type']))
示例#2
0
def localhost(aiida_env, localhost_dir):
    """Fixture for a local computer called localhost"""
    from aiida.orm import Computer
    from aiida.orm.querybuilder import QueryBuilder
    query_builder = QueryBuilder()
    query_builder.append(Computer, tag='comp')
    query_builder.add_filter('comp', {'name': {'==': 'localhost'}})
    query_results = query_builder.all()
    if query_results:
        computer = query_results[0][0]
    else:
        computer = Computer(name='localhost',
                            description='description',
                            hostname='localhost',
                            workdir=localhost_dir.strpath,
                            transport_type='local',
                            scheduler_type='direct',
                            mpirun_command=[],
                            enabled_state=True)
    return computer
示例#3
0
def rehash(nodes, entry_point, force):
    """Recompute the hash for nodes in the database.

    The set of nodes that will be rehashed can be filtered by their identifier and/or based on their class.
    """
    from aiida.orm import Data, ProcessNode, QueryBuilder

    if not force:
        echo.echo_warning(
            'This command will recompute and overwrite the hashes of all nodes.'
        )
        echo.echo_warning(
            'Note that this can take a lot of time for big databases.')
        echo.echo_warning('')
        echo.echo_warning('', nl=False)

        confirm_message = 'Do you want to continue?'

        try:
            click.confirm(text=confirm_message, abort=True)
        except click.Abort:
            echo.echo('\n')
            echo.echo_critical(
                'Migration aborted, the data has not been affected.')

    # If no explicit entry point is defined, rehash all nodes, which are either Data nodes or ProcessNodes
    if entry_point is None:
        entry_point = (Data, ProcessNode)

    if nodes:
        to_hash = [(node, ) for node in nodes if isinstance(node, entry_point)]
        num_nodes = len(to_hash)
    else:
        builder = QueryBuilder()
        builder.append(entry_point, tag='node')
        to_hash = builder.all()
        num_nodes = builder.count()

    if not to_hash:
        echo.echo_critical('no matching nodes found')

    with click.progressbar(to_hash, label='Rehashing Nodes:') as iter_hash:
        for node, in iter_hash:
            node.rehash()

    echo.echo_success('{} nodes re-hashed.'.format(num_nodes))
示例#4
0
    def get_subtree(pk, level=0):
        qb = QueryBuilder()
        qb.append(cls=WorkCalculation,
                  filters={'id': pk},
                  tag='workcalculation')
        qb.append(
            cls=WorkCalculation,
            project=['id'],
            # In the future, we should specify here the type of link
            # for now, CALL links are the only ones allowing calc-calc
            # (we here really want instead to follow CALL links)
            output_of='workcalculation',
            tag='subworkchains')
        result = list(itertools.chain(*qb.distinct().all()))

        # This will return a single flat list of tuples, where the first element
        # corresponds to the WorkChain pk and the second element is an integer
        # that represents its level of nesting within the chain
        return [(pk, level)] + list(
            itertools.chain(
                *[get_subtree(subpk, level=level + 1) for subpk in result]))
示例#5
0
    def get_subclass_from_uuid(cls, uuid):
        from aiida.orm.querybuilder import QueryBuilder
        from sqlalchemy.exc import DatabaseError
        try:
            qb = QueryBuilder()
            qb.append(cls, filters={'uuid': {'==': str(uuid)}})

            if qb.count() == 0:
                raise NotExistent("No entry with UUID={} found".format(uuid))

            node = qb.first()[0]

            if not isinstance(node, cls):
                raise NotExistent("UUID={} is not an instance of {}".format(
                    uuid, cls.__name__))
            return node
        except DatabaseError as de:
            raise ValueError(de.message)
示例#6
0
def vac_BE():

    qb = QueryBuilder()

    qb.append(Group,
              tag="group",
              project="id",
              filters={"id": {
                  "in": [
                      263,
                      265,
                      266,
                      267,
                      268,
                      278,
                  ]
              }})
    qb.append(JobCalculation,
              member_of="group",
              tag="calculation",
              filters={"state": {
                  "==": "FINISHED"
              }},
              project=["id"])
    calc_list = qb.dict()

    print "Total slab structures in vacuum %s . . ." % len(calc_list)

    with open("vacuum_slab_BE.txt", "w") as f:

        for bulk_calc in calc_list:
            shift_energy = shift_fermi(bulk_calc['calculation']['id'])
            VBM, CBM = BE(bulk_calc['calculation']['id'], shift_energy)
            A_site, B_site, term_site = site_term_atm(
                bulk_calc['calculation']['id'])
            f.write(
                str(A_site) + "    " + str(B_site) + "    " + str(term_site) +
                "    " + str(VBM) + "    " + str(CBM) + "    " +
                str(bulk_calc['calculation']['id']) + "\n")
示例#7
0
    def _get_query_builder_label_identifier(cls,
                                            identifier,
                                            classes,
                                            operator='==',
                                            project='*'):
        """
        Return the query builder instance that attempts to map the identifier onto an entity of the orm class,
        defined for this loader class, interpreting the identifier as a LABEL like identifier

        :param identifier: the LABEL identifier
        :param classes: a tuple of orm classes to which the identifier should be mapped
        :param operator: the operator to use in the query
        :param project: the property or properties to project for entities matching the query
        :returns: the query builder instance that should retrieve the entity corresponding to the identifier
        :raises ValueError: if the identifier is invalid
        :raises aiida.common.NotExistent: if the orm base class does not support a LABEL like identifier
        """
        from aiida.common.escaping import escape_for_sql_like
        from aiida.orm import Computer

        try:
            identifier, _, machinename = identifier.partition('@')
        except AttributeError:
            raise ValueError('the identifier needs to be a string')

        if operator == 'like':
            identifier = f'{escape_for_sql_like(identifier)}%'

        builder = QueryBuilder()
        builder.append(cls=classes,
                       tag='code',
                       project=project,
                       filters={'label': {
                           operator: identifier
                       }})

        if machinename:
            builder.append(Computer,
                           filters={'name': {
                               '==': machinename
                           }},
                           with_node='code')

        return builder
示例#8
0
def get_data_aiida(quantitites):
    """Query the AiiDA database for a list of quantities (other appl results).
    Return list of entries like [mat_id, mat_name, mat_class, quantity-1, quantity-2, ..., quantity-n].
    """

    qb = QueryBuilder()
    qb.append(Group,
              filters={'label': {
                  'like': GROUP_DIR + "%"
              }},
              tag='g',
              project=[
                  'extras.mat_id', 'extras.name_conventional',
                  'extras.class_material'
              ])

    for q in quantitites:
        qb.append(Dict,
                  project=['attributes.{}'.format(q['key'])],
                  filters={'extras.{}'.format(TAG_KEY): q['dict']},
                  with_group='g')

    return qb.all()
示例#9
0
def querry_for_ref_structure(element_string):
    """
    This methods finds StructureData nodes with the following extras:
    extra.type = 'bulk', # Should be done by looking at pbc, but I could not
    get querry to work.
    extra.specific = 'reference',
    'extra.elemental' = True,
    extra.structure = element_string

    param: element_string: string of an element
    return: the latest StructureData node that was found
    """

    #query db
    q = QueryBuilder()
    q.append(StructureData,
             filters={
                 'extras.type': {
                     '==': 'bulk'
                 },
                 'extras.specification': {
                     '==': 'reference'
                 },
                 'extras.elemental': {
                     '==': True
                 },
                 'extras.element': {
                     '==': element_string
                 }
             })
    q.order_by({StructureData: 'ctime'})  #always use the most recent
    structures = q.all()

    if structures:
        return structures[-1][0]
    else:
        return None
示例#10
0
def mock_vasp(aiida_env, localhost):
    """Points to a mock-up of a VASP executable."""
    from aiida.orm import Code
    from aiida.orm.querybuilder import QueryBuilder
    query_builder = QueryBuilder()
    query_builder.append(Code, tag='code')
    query_builder.add_filter('code', {'label': {'==': 'mock-vasp'}})
    query_results = query_builder.all()
    if query_results:
        code = query_results[0][0]
    else:
        os_env = os.environ.copy()
        if not localhost.pk:
            localhost.store()
        mock_vasp_path = sp.check_output(['which', 'mock-vasp'],
                                         env=os_env).strip()
        code = Code()
        code.label = 'mock-vasp'
        code.description = 'Mock VASP for tests'
        code.set_remote_computer_exec((localhost, mock_vasp_path))
        code.set_input_plugin_name('vasp.vasp')
        aiidapath = py_path.local(aiida_env.root_dir).join('.aiida')
        code.set_prepend_text('export AIIDA_PATH={}'.format(aiidapath))
    return code
示例#11
0
    def get_subclass_from_pk(cls, pk):
        from aiida.orm.querybuilder import QueryBuilder
        from sqlalchemy.exc import DatabaseError
        if not isinstance(pk, int):
            raise ValueError("Incorrect type for int")

        try:
            qb = QueryBuilder()
            qb.append(cls, filters={'id': {'==': pk}})

            if qb.count() == 0:
                raise NotExistent("No entry with pk= {} found".format(pk))

            node = qb.first()[0]

            if not isinstance(node, cls):
                raise NotExistent("pk= {} is not an instance of {}".format(
                    pk, cls.__name__))
            return node
        except DatabaseError as de:
            raise ValueError(de.message)
示例#12
0
def test_uploadfamily_again(fresh_aiida_env, potcar_family, cmd_params):
    """
    Re-upload a potcar family.

    Test:
        * Does not require description
        * Must succeed
        * Adds no nodes
        * Adds no groups
    """
    from aiida.orm import Node, Group
    from aiida.orm.querybuilder import QueryBuilder

    node_qb = QueryBuilder(path=[Node])
    node_count = node_qb.count()
    group_qb = QueryBuilder(path=[Group])
    group_count = group_qb.count()

    result = run_cmd('uploadfamily',
                     [cmd_params.PATH_OPTION, cmd_params.NAME_OPTION])

    assert not result.exception

    node_qb = QueryBuilder(path=[Node])
    assert node_count == node_qb.count()
    group_qb = QueryBuilder(path=[Group])
    assert group_count == group_qb.count()
示例#13
0
class BaseTranslator(object):
    """
    Generic class for translator. It contains the methods
    required to build a related QueryBuilder object
    """
    # pylint: disable=too-many-instance-attributes,fixme

    # A label associated to the present class
    __label__ = None
    # The AiiDA class one-to-one associated to the present class
    _aiida_class = None
    # The string name of the AiiDA class
    _aiida_type = None

    # If True (False) the corresponding AiiDA class has (no) uuid property
    _has_uuid = None

    _result_type = __label__

    _default = _default_projections = ['**']

    _is_qb_initialized = False
    _is_id_query = None
    _total_count = None

    def __init__(self, **kwargs):
        """
        Initialise the parameters.
        Create the basic query_help

        keyword Class (default None but becomes this class): is the class
        from which one takes the initial values of the attributes. By default
        is this class so that class atributes are translated into object
        attributes. In case of inheritance one cane use the
        same constructore but pass the inheriting class to pass its attributes.
        """
        # Basic filter (dict) to set the identity of the uuid. None if
        #  no specific node is requested
        self._id_filter = None

        # basic query_help object
        self._query_help = {
            'path': [{
                'cls': self._aiida_class,
                'tag': self.__label__
            }],
            'filters': {},
            'project': {},
            'order_by': {}
        }
        # query_builder object (No initialization)
        self.qbobj = QueryBuilder()

        self.limit_default = kwargs['LIMIT_DEFAULT']
        self.schema = None

    def __repr__(self):
        """
        This function is required for the caching system to be able to compare
        two NodeTranslator objects. Comparison is done on the value returned by __repr__

        :return: representation of NodeTranslator objects. Returns nothing
            because the inputs of self.get_nodes are sufficient to determine the
            identity of two queries.
        """
        return ''

    @staticmethod
    def get_projectable_properties():
        """
        This method is extended in specific translators classes.
        It returns a dict as follows:
        dict(fields=projectable_properties, ordering=ordering)
        where projectable_properties is a dict and ordering is a list
        """
        return {}

    def init_qb(self):
        """
        Initialize query builder object by means of _query_help
        """
        self.qbobj.__init__(**self._query_help)
        self._is_qb_initialized = True

    def count(self):
        """
        Count the number of rows returned by the query and set total_count
        """
        if self._is_qb_initialized:
            self._total_count = self.qbobj.count()
        else:
            raise InvalidOperation(
                'query builder object has not been initialized.')

            # def caching_method(self):
            #     """
            #     class method for caching. It is a wrapper of the
            # flask_cache memoize
            #     method. To be used as a decorator
            #     :return: the flask_cache memoize method with the timeout kwarg
            #     corrispondent to the class
            #     """
            #     return cache.memoize()
            #

            #    @cache.memoize(timeout=CACHING_TIMEOUTS[self.__label__])

    def get_total_count(self):
        """
        Returns the number of rows of the query.

        :return: total_count
        """
        ## Count the results if needed
        if not self._total_count:
            self.count()

        return self._total_count

    def set_filters(self, filters=None):
        """
        Add filters in query_help.

        :param filters: it is a dictionary where keys are the tag names
            given in the path in query_help and their values are the dictionary
            of filters want to add for that tag name. Format for the Filters
            dictionary::

                filters = {
                    "tag1" : {k1:v1, k2:v2},
                    "tag2" : {k1:v1, k2:v2},
                }

        :return: query_help dict including filters if any.
        """
        if filters is None:
            filters = {}

        if isinstance(filters, dict):  # pylint: disable=too-many-nested-blocks
            if filters:
                for tag, tag_filters in filters.items():
                    if tag_filters and isinstance(tag_filters, dict):
                        self._query_help['filters'][tag] = {}
                        for filter_key, filter_value in tag_filters.items():
                            if filter_key == 'pk':
                                filter_key = PK_DBSYNONYM
                            self._query_help['filters'][tag][filter_key] \
                                = filter_value
        else:
            raise InputValidationError(
                'Pass data in dictionary format where '
                'keys are the tag names given in the '
                'path in query_help and and their values'
                ' are the dictionary of filters want '
                'to add for that tag name.')

    def get_default_projections(self):
        """
        method to get default projections of the node
        :return: self._default_projections
        """
        return self._default_projections

    def set_default_projections(self):
        """
        It calls the set_projections() methods internally to add the
        default projections in query_help

        :return: None
        """
        self.set_projections({self.__label__: self._default_projections})

    def set_projections(self, projections):
        """
        add the projections in query_help

        :param projections: it is a dictionary where keys are the tag names
         given in the path in query_help and values are the list of the names
         you want to project in the final output
        :return: updated query_help with projections
        """
        if isinstance(projections, dict):
            if projections:
                for project_key, project_list in projections.items():
                    self._query_help['project'][project_key] = project_list

        else:
            raise InputValidationError('Pass data in dictionary format where '
                                       'keys are the tag names given in the '
                                       'path in query_help and values are the '
                                       'list of the names you want to project '
                                       'in the final output')

    def set_order(self, orders):
        """
        Add order_by clause in query_help
        :param orders: dictionary of orders you want to apply on final
        results
        :return: None or exception if any.
        """
        ## Validate input
        if not isinstance(orders, dict):
            raise InputValidationError('orders has to be a dictionary'
                                       "compatible with the 'order_by' section"
                                       'of the query_help')

        ## Auxiliary_function to get the ordering cryterion
        def def_order(columns):
            """
            Takes a list of signed column names ex. ['id', '-ctime',
            '+mtime']
            and transforms it in a order_by compatible dictionary
            :param columns: (list of strings)
            :return: a dictionary
            """
            from collections import OrderedDict
            order_dict = OrderedDict()
            for column in columns:
                if column[0] == '-':
                    order_dict[column[1:]] = 'desc'
                elif column[0] == '+':
                    order_dict[column[1:]] = 'asc'
                else:
                    order_dict[column] = 'asc'
            if 'pk' in order_dict:
                order_dict[PK_DBSYNONYM] = order_dict.pop('pk')
            return order_dict

        ## Assign orderby field query_help
        if 'id' not in orders[self._result_type] and '-id' not in orders[
                self._result_type]:
            orders[self._result_type].append('id')
        for tag, columns in orders.items():
            self._query_help['order_by'][tag] = def_order(columns)

    def set_query(self,
                  filters=None,
                  orders=None,
                  projections=None,
                  query_type=None,
                  node_id=None,
                  attributes=None,
                  attributes_filter=None,
                  extras=None,
                  extras_filter=None):
        # pylint: disable=too-many-arguments,unused-argument,too-many-locals,too-many-branches
        """
        Adds filters, default projections, order specs to the query_help,
        and initializes the qb object

        :param filters: dictionary with the filters
        :param orders: dictionary with the order for each tag
        :param projections: dictionary with the projection. It is discarded
            if query_type=='attributes'/'extras'
        :param query_type: (string) specify the result or the content ("attr")
        :param id: (integer) id of a specific node
        :param filename: name of the file to return its content
        :param attributes: flag to show attributes in nodes endpoint
        :param attributes_filter: list of node attributes to query
        :param extras: flag to show attributes in nodes endpoint
        :param extras_filter: list of node extras to query
        """

        tagged_filters = {}

        ## Check if filters are well defined and construct an ad-hoc filter
        # for id_query
        if node_id is not None:
            self._is_id_query = True
            if self._result_type == self.__label__ and filters:
                raise RestInputValidationError(
                    'selecting a specific id does not allow to specify filters'
                )

            try:
                self._check_id_validity(node_id)
            except RestValidationError as exc:
                raise RestValidationError(str(exc))
            else:
                tagged_filters[self.__label__] = self._id_filter
                if self._result_type is not self.__label__:
                    tagged_filters[self._result_type] = filters
        else:
            tagged_filters[self.__label__] = filters

        ## Add filters
        self.set_filters(tagged_filters)

        ## Add projections
        if projections is None:
            if attributes is None and extras is None:
                self.set_default_projections()
            else:
                default_projections = self.get_default_projections()

                if attributes is True:
                    if attributes_filter is None:
                        default_projections.append('attributes')
                    else:
                        ## Check if attributes_filter is not a list
                        if not isinstance(attributes_filter, list):
                            attributes_filter = [attributes_filter]
                        for attr in attributes_filter:
                            default_projections.append('attributes.' +
                                                       str(attr))
                elif attributes is not None and attributes is not False:
                    raise RestValidationError(
                        'The attributes filter is false by default and can only be set to true.'
                    )

                if extras is True:
                    if extras_filter is None:
                        default_projections.append('extras')
                    else:
                        ## Check if extras_filter is not a list
                        if not isinstance(extras_filter, list):
                            extras_filter = [extras_filter]
                        for extra in extras_filter:
                            default_projections.append('extras.' + str(extra))
                elif extras is not None and extras is not False:
                    raise RestValidationError(
                        'The extras filter is false by default and can only be set to true.'
                    )

                self.set_projections({self.__label__: default_projections})
        else:
            tagged_projections = {self._result_type: projections}
            self.set_projections(tagged_projections)

        ##Add order_by
        if orders is not None:
            tagged_orders = {self._result_type: orders}
            self.set_order(tagged_orders)

        ## Initialize the query_object
        self.init_qb()

    def get_query_help(self):
        """
        :return: return QB json dictionary
        """
        return self._query_help

    def set_limit_offset(self, limit=None, offset=None):
        """
        sets limits and offset directly to the query_builder object

        :param limit:
        :param offset:
        :return:
        """

        ## mandatory params
        # none

        ## non-mandatory params
        if limit is not None:
            try:
                limit = int(limit)
            except ValueError:
                raise InputValidationError('Limit value must be an integer')
            if limit > self.limit_default:
                raise RestValidationError(
                    'Limit and perpage cannot be bigger than {}'.format(
                        self.limit_default))
        else:
            limit = self.limit_default

        if offset is not None:
            try:
                offset = int(offset)
            except ValueError:
                raise InputValidationError('Offset value must be an integer')

        if self._is_qb_initialized:
            if limit is not None:
                self.qbobj.limit(limit)
            else:
                pass
            if offset is not None:
                self.qbobj.offset(offset)
            else:
                pass
        else:
            raise InvalidOperation(
                'query builder object has not been initialized.')

    def get_formatted_result(self, label):
        """
        Runs the query and retrieves results tagged as "label".

        :param label: the tag of the results to be extracted out of
          the query rows.
        :type label: str
        :return: a list of the query results
        """

        if not self._is_qb_initialized:
            raise InvalidOperation(
                'query builder object has not been initialized.')

        results = []
        if self._total_count > 0:
            for res in self.qbobj.dict():
                tmp = res[label]

                # Note: In code cleanup and design change, remove this node dependant part
                # from base class and move it to node translator.
                if self._result_type in ['with_outgoing', 'with_incoming']:
                    tmp['link_type'] = res[self.__label__ + '--' +
                                           label]['type']
                    tmp['link_label'] = res[self.__label__ + '--' +
                                            label]['label']
                results.append(tmp)

        # TODO think how to make it less hardcoded
        if self._result_type == 'with_outgoing':
            result = {'incoming': results}
        elif self._result_type == 'with_incoming':
            result = {'outgoing': results}
        else:
            result = {self.__label__: results}

        return result

    def get_results(self):
        """
        Returns either list of nodes or details of single node from database.

        :return: either list of nodes or details of single node from database
        """

        ## Check whether the querybuilder object has been initialized
        if not self._is_qb_initialized:
            raise InvalidOperation(
                'query builder object has not been initialized.')

        ## Count the total number of rows returned by the query (if not
        # already done)
        if self._total_count is None:
            self.count()

        ## Retrieve data
        data = self.get_formatted_result(self._result_type)
        return data

    def _check_id_validity(self, node_id):
        """
        Checks whether id corresponds to an object of the expected type,
        whenever type is a valid column of the database (ex. for nodes,
        but not for users)

        :param node_id: id (or id starting pattern)

        :return: True if node_id valid, False if invalid. If True, sets the id
          filter attribute correctly

        :raise RestValidationError: if no node is found or id pattern does
          not identify a unique node
        """
        from aiida.common.exceptions import MultipleObjectsError, NotExistent
        from aiida.orm.utils.loaders import IdentifierType, get_loader

        loader = get_loader(self._aiida_class)

        if self._has_uuid:

            # For consistency check that id is a string
            if not isinstance(node_id, six.string_types):
                raise RestValidationError('parameter id has to be a string')

            identifier_type = IdentifierType.UUID
            qbobj, _ = loader.get_query_builder(
                node_id, identifier_type, sub_classes=(self._aiida_class, ))
        else:

            # Similarly, check that id is an integer
            if not isinstance(node_id, int):
                raise RestValidationError('parameter id has to be an integer')

            identifier_type = IdentifierType.ID
            qbobj, _ = loader.get_query_builder(
                node_id, identifier_type, sub_classes=(self._aiida_class, ))

        # For efficiency I don't go further than two results
        qbobj.limit(2)

        try:
            pk = qbobj.one()[0].pk
        except MultipleObjectsError:
            raise RestInputValidationError(
                'More than one node found. Provide longer starting pattern for id.'
            )
        except NotExistent:
            raise RestInputValidationError(
                "either no object's id starts"
                " with '{}' or the corresponding object"
                ' is not of type aiida.orm.{}'.format(node_id,
                                                      self._aiida_type))
        else:
            # create a permanent filter
            self._id_filter = {'id': {'==': pk}}
            return True
示例#14
0
    def computer_configure(self, *args):
        """
        Configure the authentication information for a given computer
        """
        if not is_dbenv_loaded():
            load_dbenv()

        import readline
        import inspect

        from django.core.exceptions import ObjectDoesNotExist

        from aiida.common.exceptions import (NotExistent, ValidationError)
        from aiida.backends.utils import get_automatic_user
        from aiida.common.utils import get_configured_user_email
        from aiida.backends.settings import BACKEND
        from aiida.backends.profile import BACKEND_SQLA, BACKEND_DJANGO

        import argparse

        parser = argparse.ArgumentParser(
            prog=self.get_full_command_name(),
            description='Configure a computer for a given AiiDA user.')
        # The default states are those that are shown if no option is given
        parser.add_argument(
            '-u',
            '--user',
            type=str,
            metavar='EMAIL',
            help=
            "Configure the computer for the given AiiDA user (otherwise, configure the current default user)",
        )
        parser.add_argument(
            'computer',
            type=str,
            help="The name of the computer that you want to configure")

        parsed_args = parser.parse_args(args)

        user_email = parsed_args.user
        computername = parsed_args.computer

        try:
            computer = self.get_computer(name=computername)
        except NotExistent:
            print >> sys.stderr, "No computer exists with name '{}'".format(
                computername)
            sys.exit(1)
        if user_email is None:
            user = get_automatic_user()
        else:
            from aiida.orm.querybuilder import QueryBuilder
            qb = QueryBuilder()
            qb.append(type="user", filters={'email': user_email})
            user = qb.first()
            if user is None:
                print >> sys.stderr, ("No user with email '{}' in the "
                                      "database.".format(user_email))
                sys.exit(1)

        if BACKEND == BACKEND_DJANGO:
            from aiida.backends.djsite.db.models import DbAuthInfo

            try:
                authinfo = DbAuthInfo.objects.get(
                    dbcomputer=computer.dbcomputer, aiidauser=user)

                old_authparams = authinfo.get_auth_params()
            except ObjectDoesNotExist:
                authinfo = DbAuthInfo(dbcomputer=computer.dbcomputer,
                                      aiidauser=user)
                old_authparams = {}

        elif BACKEND == BACKEND_SQLA:
            from aiida.backends.sqlalchemy.models.authinfo import DbAuthInfo
            from aiida.backends.sqlalchemy import session

            authinfo = session.query(DbAuthInfo).filter(
                DbAuthInfo.dbcomputer == computer.dbcomputer).filter(
                    DbAuthInfo.aiidauser == user).first()
            if authinfo is None:
                authinfo = DbAuthInfo(dbcomputer=computer.dbcomputer,
                                      aiidauser=user)
                old_authparams = {}
            else:
                old_authparams = authinfo.get_auth_params()
        else:
            raise Exception("Unknown backend {}".format(BACKEND))
        Transport = computer.get_transport_class()

        print("Configuring computer '{}' for the AiiDA user '{}'".format(
            computername, user.email))

        print "Computer {} has transport of type {}".format(
            computername, computer.get_transport_type())

        if user.email != get_configured_user_email():
            print "*" * 72
            print "** {:66s} **".format("WARNING!")
            print "** {:66s} **".format(
                "  You are configuring a different user.")
            print "** {:66s} **".format(
                "  Note that the default suggestions are taken from your")
            print "** {:66s} **".format(
                "  local configuration files, so they may be incorrect.")
            print "*" * 72

        valid_keys = Transport.get_valid_auth_params()

        default_authparams = {}
        for k in valid_keys:
            if k in old_authparams:
                default_authparams[k] = old_authparams.pop(k)
        if old_authparams:
            print(
                "WARNING: the following keys were previously in the "
                "authorization parameters,")
            print "but have not been recognized and have been deleted:"
            print ", ".join(old_authparams.keys())

        if not valid_keys:
            print "There are no special keys to be configured. Configuration completed."
            authinfo.set_auth_params({})
            authinfo.save()
            return

        print ""
        print "Note: to leave a field unconfigured, leave it empty and press [Enter]"

        # I strip out the old auth_params that are not among the valid keys

        new_authparams = {}

        for k in valid_keys:
            key_set = False
            while not key_set:
                try:
                    converter_name = '_convert_{}_fromstring'.format(k)
                    try:
                        converter = dict(
                            inspect.getmembers(Transport))[converter_name]
                    except KeyError:
                        print >> sys.stderr, (
                            "Internal error! "
                            "No {} defined in Transport {}".format(
                                converter_name, computer.get_transport_type()))
                        sys.exit(1)

                    if k in default_authparams:
                        readline.set_startup_hook(lambda: readline.insert_text(
                            str(default_authparams[k])))
                    else:
                        # Use suggestion only if parameters were not already set
                        suggester_name = '_get_{}_suggestion_string'.format(k)
                        try:
                            suggester = dict(
                                inspect.getmembers(Transport))[suggester_name]
                            suggestion = suggester(computer)
                            readline.set_startup_hook(
                                lambda: readline.insert_text(suggestion))
                        except KeyError:
                            readline.set_startup_hook()

                    txtval = raw_input("=> {} = ".format(k))
                    if txtval:
                        new_authparams[k] = converter(txtval)
                    key_set = True
                except ValidationError as e:
                    print "Error in the inserted value: {}".format(e.message)

        authinfo.set_auth_params(new_authparams)
        authinfo.save()
        print "Configuration stored for your user on computer '{}'.".format(
            computername)
示例#15
0
# for queries examplefrom tutorial
from sys import argv
from aiida.orm.querybuilder import QueryBuilder
from aiida.orm.data.remote import RemoteData

path = "/home/aiida/Documents/seb352-travail/essais-tuto/res/"
StructureData = DataFactory("structure")
ParameterData = DataFactory("parameter")

qb1 = QueryBuilder()
qb2 = QueryBuilder()
qb3 = QueryBuilder()
# qb.append(Node, project=["id"])

# enumerate the <pk> for each query key
# for node, in qb.iterall():
# 	print node
# print
# print("Number of species "+str( qb.count()))

# qb.append(StructureData, project=["id", "uuid"],
# 	filters={"or":[
# 	{"id":{"==":285}}, {"id":{"==":3512}} ] })


# 	Pour etablir des liens entre etats
qb1.append(RemoteData, tag="remote", project=["*"])
qb1.append(Group, group_of="remote")

qb2.append(RemoteData, project=["*"])
示例#16
0
def listfamilies(element, with_description):
    """
    Print on screen the list of installed PSF-pseudo families.
    """
    from aiida import is_dbenv_loaded, load_dbenv
    if not is_dbenv_loaded():
        load_dbenv()

    from aiida.orm import DataFactory
    from aiida_siesta.data.psf import PSFGROUP_TYPE

    PsfData = DataFactory('siesta.psf')
    from aiida.orm.querybuilder import QueryBuilder
    from aiida.orm.group import Group
    qb = QueryBuilder()
    qb.append(PsfData, tag='psfdata')

    if element:
        qb.add_filter(PsfData, {'attributes.element': {'in': element}})

    qb.append(
        Group,
        group_of='psfdata',
        tag='group',
        project=["name", "description"],
        filters={
            "type": {
                '==': PSFGROUP_TYPE
            }
        })

    qb.distinct()
    if qb.count() > 0:
        for res in qb.dict():
            group_name = res.get("group").get("name")
            group_desc = res.get("group").get("description")
            qb = QueryBuilder()
            qb.append(
                Group, tag='thisgroup', filters={
                    "name": {
                        'like': group_name
                    }
                })
            qb.append(PsfData, project=["id"], member_of='thisgroup')

            if with_description:
                description_string = ": {}".format(group_desc)
            else:
                description_string = ""

            click.echo("* {} [{} pseudos]{}".format(group_name,
                                                    qb.count(),
                                                    description_string))

    else:
        click.echo("No valid PSF pseudopotential family found.", err=True)
示例#17
0
    def get_bands_and_parents_structure(self, args):
        """
        Search for bands and return bands and the closest structure that is a parent of the instance.
        This is the backend independent way, can be overriden for performance reason

        :returns:
            A list of sublists, each latter containing (in order):
                pk as string, formula as string, creation date, bandsdata-label
        """

        import datetime
        from aiida.utils import timezone
        from aiida.orm.querybuilder import QueryBuilder
        from aiida.backends.utils import get_automatic_user
        from aiida.orm.implementation import User
        from aiida.orm.implementation import Group
        from aiida.orm.data.structure import (get_formula, get_symbols_string)
        from aiida.orm.data.array.bands import BandsData
        from aiida.orm.data.structure import StructureData

        qb = QueryBuilder()
        if args.all_users is False:
            au = get_automatic_user()
            user = User(dbuser=au)
            qb.append(User, tag="creator", filters={"email": user.email})
        else:
            qb.append(User, tag="creator")

        bdata_filters = {}
        if args.past_days is not None:
            now = timezone.now()
            n_days_ago = now - datetime.timedelta(days=args.past_days)
            bdata_filters.update({"ctime": {'>=': n_days_ago}})

        qb.append(BandsData,
                  tag="bdata",
                  created_by="creator",
                  filters=bdata_filters,
                  project=["id", "label", "ctime"])

        group_filters = {}

        if args.group_name is not None:
            group_filters.update({"name": {"in": args.group_name}})
        if args.group_pk is not None:
            group_filters.update({"id": {"in": args.group_pk}})
        if group_filters:
            qb.append(Group,
                      tag="group",
                      filters=group_filters,
                      group_of="bdata")

        qb.append(
            StructureData,
            tag="sdata",
            ancestor_of="bdata",
            # We don't care about the creator of StructureData
            project=["id", "attributes.kinds", "attributes.sites"])

        qb.order_by({StructureData: {'ctime': 'desc'}})

        list_data = qb.distinct()

        entry_list = []
        already_visited_bdata = set()

        for [bid, blabel, bdate, sid, akinds, asites] in list_data.all():

            # We process only one StructureData per BandsData.
            # We want to process the closest StructureData to
            # every BandsData.
            # We hope that the StructureData with the latest
            # creation time is the closest one.
            # This will be updated when the QueryBuilder supports
            # order_by by the distance of two nodes.
            if already_visited_bdata.__contains__(bid):
                continue
            already_visited_bdata.add(bid)

            if args.element is not None:
                all_symbols = [_["symbols"][0] for _ in akinds]
                if not any([s in args.element for s in all_symbols]):
                    continue

            if args.element_only is not None:
                all_symbols = [_["symbols"][0] for _ in akinds]
                if not all([s in all_symbols for s in args.element_only]):
                    continue

            # We want only the StructureData that have attributes
            if akinds is None or asites is None:
                continue

            symbol_dict = {}
            for k in akinds:
                symbols = k['symbols']
                weights = k['weights']
                symbol_dict[k['name']] = get_symbols_string(symbols, weights)

            try:
                symbol_list = []
                for s in asites:
                    symbol_list.append(symbol_dict[s['kind_name']])
                formula = get_formula(symbol_list, mode=args.formulamode)
            # If for some reason there is no kind with the name
            # referenced by the site
            except KeyError:
                formula = "<<UNKNOWN>>"
            entry_list.append(
                [str(bid),
                 str(formula),
                 bdate.strftime('%d %b %Y'), blabel])

        return entry_list
示例#18
0
#for queries examplefrom tutorial 

from aiida.orm.querybuilder import QueryBuilder

qb=QueryBuilder()
qb.all()
qb.append(Node)
qb.all()
qb.count()

#enumerate the <pk> for each query key
for node, in qb.iterall():
	print node

#may need this line
StructureData = DataFactory("structure")
qb=QueryBuilder()
qb.append(StructureData)	#met le pk pour chaque structure si on met qb.all()
qb.all()

示例#19
0
文件: node.py 项目: asle85/aiida-core
    def get_io_tree(self, nodeId, maxDepth=None):
        from aiida.orm.querybuilder import QueryBuilder
        from aiida.orm.node import Node

        def addNodes(nodeId, maxDepth, nodes, addedNodes, addedEdges,
                     edgeType):
            qb = QueryBuilder()
            qb.append(Node, tag="main", filters={"id": {"==": nodeId}})
            if edgeType == "ancestors":
                qb.append(Node,
                          tag=edgeType,
                          project=['id', 'type'],
                          edge_project=['path', 'depth'],
                          ancestor_of_beta='main',
                          edge_filters={'depth': {
                              '<=': maxDepth
                          }})
            elif edgeType == "desc":
                qb.append(Node,
                          tag=edgeType,
                          project=['id', 'type'],
                          edge_project=['path', 'depth'],
                          descendant_of_beta='main',
                          edge_filters={'depth': {
                              '<=': maxDepth
                          }})

            if (qb.count() > 0):
                qbResults = qb.get_results_dict()

                for resultDict in qbResults:
                    if resultDict[edgeType]["id"] not in addedNodes:
                        nodes.append({
                            "id":
                            len(addedNodes),
                            "nodeid":
                            resultDict[edgeType]["id"],
                            "nodetype":
                            resultDict[edgeType]["type"],
                            "group":
                            edgeType + "-" +
                            str(resultDict["main--" + edgeType]["depth"])
                        })
                        addedNodes.append(resultDict[edgeType]["id"])

                    path = resultDict["main--" + edgeType]["path"]
                    if edgeType == "ancestors":
                        startEdge = path[0]
                        endEdge = path[1]
                    elif edgeType == "desc":
                        startEdge = path[-2]
                        endEdge = path[-1]
                    if startEdge not in addedEdges.keys():
                        addedEdges[startEdge] = [endEdge]
                    elif endEdge not in addedEdges[startEdge]:
                        addedEdges[startEdge].append(endEdge)

            return nodes, addedNodes, addedEdges

        def addEdges(edges, addedNodes, addedEdges):
            for fromNodeId in addedEdges.keys():
                fromNodeIdIndex = addedNodes.index(fromNodeId)
                for toNodeId in addedEdges[fromNodeId]:
                    toNodeIdIndex = addedNodes.index(toNodeId)
                    edges.append({
                        "from": fromNodeIdIndex,
                        "to": toNodeIdIndex,
                        "arrows": "to",
                        "color": {
                            "inherit": 'from'
                        }
                    })

            return edges

        nodes = []
        edges = []
        addedNodes = []
        addedEdges = {}

        if maxDepth is None:
            from aiida.restapi.common.config import MAX_TREE_DEPTH
            maxDepth = MAX_TREE_DEPTH

        qb = QueryBuilder()
        qb.append(Node,
                  tag="main",
                  project=["id", "type"],
                  filters={"id": {
                      "==": nodeId
                  }})
        if qb.count() > 0:
            mainNode = qb.first()
            nodes.append({
                "id": 0,
                "nodeid": mainNode[0],
                "nodetype": mainNode[1],
                "group": "mainNode"
            })
            addedNodes.append(mainNode[0])

        # get all descendents
        nodes, addedNodes, addedEdges = addNodes(nodeId, maxDepth, nodes,
                                                 addedNodes, addedEdges,
                                                 "ancestors")
        nodes, addedNodes, addedEdges = addNodes(nodeId, maxDepth, nodes,
                                                 addedNodes, addedEdges,
                                                 "desc")

        edges = addEdges(edges, addedNodes, addedEdges)

        return {"nodes": nodes, "edges": edges}
示例#20
0
def query(datatype, project, past_days, group_pks, all_users):
    """
    Perform the query
    """
    import datetime

    from aiida.orm.implementation import Group
    from aiida.orm.user import User
    from aiida.orm.backend import construct_backend
    from aiida.orm.querybuilder import QueryBuilder
    from aiida.utils import timezone

    backend = construct_backend()

    qbl = QueryBuilder()
    if all_users is False:
        user = backend.users.get_automatic_user()
        qbl.append(User, tag="creator", filters={"email": user.email})
    else:
        qbl.append(User, tag="creator")

    # If there is a time restriction
    data_filters = {}
    if past_days is not None:
        now = timezone.now()
        n_days_ago = now - datetime.timedelta(days=past_days)
        data_filters.update({"ctime": {'>=': n_days_ago}})

    qbl.append(datatype, tag="data", created_by="creator", filters=data_filters, project=project)

    # If there is a group restriction
    if group_pks is not None:
        group_filters = dict()
        group_filters.update({"id": {"in": group_pks}})
        qbl.append(Group, tag="group", filters=group_filters, group_of="data")

    qbl.order_by({datatype: {'ctime': 'asc'}})

    object_list = qbl.distinct()
    return object_list.all()
def preprocess_spm_calcs(
        workchain_list=['STMWorkChain', 'PdosWorkChain', 'AfmWorkChain']):
    qb = QueryBuilder()
    qb.append(WorkCalculation,
              filters={
                  'attributes._process_label': {
                      'in': workchain_list
                  },
                  'or': [
                      {
                          'extras': {
                              '!has_key': 'preprocess_version'
                          }
                      },
                      {
                          'extras.preprocess_version': {
                              '<': PREPROCESS_VERSION
                          }
                      },
                  ],
              })
    qb.order_by({WorkCalculation: {'ctime': 'asc'}})

    for m in qb.all():
        n = m[0]
        ## ---------------------------------------------------------------
        ## calculation not finished
        if not n.is_sealed:
            print("Skipping underway workchain PK %d" % n.pk)
            continue
        calc_states = [out.get_state() for out in n.get_outputs()]
        if 'WITHSCHEDULER' in calc_states:
            print("Skipping underway workchain PK %d" % n.pk)
            continue
        ## ---------------------------------------------------------------

        if 'obsolete' not in n.get_extras():
            n.set_extra('obsolete', False)
        if n.get_extra('obsolete'):
            continue

        wc_name = n.get_attrs()['_process_label']

        try:
            if not all(
                [calc.get_state() == 'FINISHED' for calc in n.get_outputs()]):
                raise (Exception("Not all calculations are 'FINISHED'"))

            preprocess_one(n)
            print("Preprocessed PK %d (%s)" % (n.pk, wc_name))

            n.set_extra('preprocess_successful', True)
            n.set_extra('preprocess_version', PREPROCESS_VERSION)

            if 'preprocess_error' in n.get_extras():
                n.del_extra('preprocess_error')

        except Exception as e:
            n.set_extra('preprocess_successful', False)
            n.set_extra('preprocess_error', str(e))
            n.set_extra('preprocess_version', PREPROCESS_VERSION)
            print("Failed to preprocess PK %d (%s): %s" % (n.pk, wc_name, e))
示例#22
0
#for queries examplefrom tutorial 

from aiida.orm.querybuilder import QueryBuilder
from aiida.orm.data.remote import RemoteData

qb=QueryBuilder()
#qb.append(Node, project=["id"])
StructureData = DataFactory("structure")
ParameterData = DataFactory("parameter")

#enumerate the <pk> for each query key
#for node, in qb.iterall():
#	print node
#print
#print("Number of species "+str( qb.count()))

#qb.append(StructureData, project=["id", "uuid"], 
#	filters={"or":[
#	{"id":{"==":285}}, {"id":{"==":3512}} ] })



#	Pour etablir des liens entre etats
qb.append(RemoteData, tag="remote", project=["*"])
qb.append(Group, group_of="remote")

#qb.append(ParameterData, project=["attributes.energy_smearing"]) #, filters=)
#qb.append(ParameterData, project=["attributes.element"])

#for i in qb.iterall():
#	print i
示例#23
0
parser = argparse.ArgumentParser()
parser.add_argument("code",
                    help="code and machine where you would like to run")
parser.add_argument("json_hpc", help="json file with HPC parameters")
parser.add_argument("json_pw", help="json file with PW parameters")
args = parser.parse_args()

StructureData = DataFactory('structure')
UpfData = DataFactory('upf')
ParameterData = DataFactory('parameter')
KpointsData = DataFactory('array.kpoints')

with open(args.json_hpc) as data_file:
    json_hpc = json.load(data_file)

qb = QueryBuilder()
qb.append(JobCalculation, tag="mycalculation", project=["*"])
qb.append(Group,
          filters={"name": json_hpc["query_group"]},
          group_of="mycalculation")
calcs_list = qb.all()

pseudo_family = json_hpc['pseudo']
structures_wf = []
kpoints_wf = []
pw_parameters_wf = []
hpc_workflow_params = {}
keys = []
count = 0

#for i in calcs_list[0:1]:
示例#24
0
#for queries examplefrom tutorial 

from aiida.orm.querybuilder import QueryBuilder
from aiida.orm.data.remote import RemoteData

StructureData=DataFactory("structure")
ParameterData=DataFactory("parameter")

qb=QueryBuilder()
qb.append(RemoteData, tag="remote", project=["*"])
qb.append(Group,group_of="remote",
	filters={"name":{"in": ["tutorial_pbesol", "tutorial_lda", "tutorial_pbe"]   }})

qb.append(ParameterData, project=["attributes.energy_smearing"]


#qb.append(ParameterData, project=["attributes.energy_smearing"],
#	 filters={"id":{"==":1}} )

#qb.append(ParameterData, project=["attributes.energy_smearing"]

qb.all()

示例#25
0
# for queries examplefrom tutorial
from sys import argv
from aiida.orm.querybuilder import QueryBuilder
from aiida.orm.data.remote import RemoteData
from aiida.orm.calculation import *

path = "/home/aiida/Documents/seb352-travail/essais-tuto/res/"
StructureData = DataFactory("structure")
ParameterData = DataFactory("parameter")
# PwCalculation= DataFactory("calculation")

qb = QueryBuilder()


qb.append

# essai une query sur PwClaculation
qb.append(
    Calculation,
    # filters={"id":{"==":4285}},
    tag="calculation",
    output_of="structure",
)


# donne juste les nom qui sont dans les groupes
qb.append(
    Group,
    group_of="calculation",
    project=["name"],
    filters={"name": {"in": ["tutorial_pbesol", "tutorial_lda", "tutorial_pbe"]}},
示例#26
0
    def _add_dblink_from(self,
                         src,
                         label=None,
                         link_type=LinkType.UNSPECIFIED):
        from aiida.backends.sqlalchemy import get_scoped_session
        from aiida.orm.querybuilder import QueryBuilder
        session = get_scoped_session()
        if not isinstance(src, Node):
            raise ValueError("src must be a Node instance")
        if self.uuid == src.uuid:
            raise ValueError("Cannot link to itself")

        if self._to_be_stored:
            raise ModificationNotAllowed(
                "Cannot call the internal _add_dblink_from if the "
                "destination node is not stored")
        if src._to_be_stored:
            raise ModificationNotAllowed(
                "Cannot call the internal _add_dblink_from if the "
                "source node is not stored")

        # Check for cycles. This works if the transitive closure is enabled; if
        # it isn't, this test will never fail, but then having a circular link
        # is not meaningful but does not pose a huge threat
        #
        # I am linking src->self; a loop would be created if a DbPath exists
        # already in the TC table from self to src
        if link_type is LinkType.CREATE or link_type is LinkType.INPUT:
            if QueryBuilder().append(
                    Node, filters={
                        'id': self.pk
                    }, tag='parent').append(
                        Node,
                        filters={
                            'id': src.pk
                        },
                        tag='child',
                        descendant_of='parent').count() > 0:
                raise ValueError(
                    "The link you are attempting to create would generate a loop"
                )

        if label is None:
            autolabel_idx = 1

            existing_from_autolabels = session.query(DbLink.label).filter(
                DbLink.output_id == self.dbnode.id, DbLink.label.like("link%"))

            while "link_{}".format(autolabel_idx) in existing_from_autolabels:
                autolabel_idx += 1

            safety_counter = 0
            while True:
                safety_counter += 1
                if safety_counter > 100:
                    # Well, if you have more than 100 concurrent addings
                    # to the same node, you are clearly doing something wrong...
                    raise InternalError(
                        "Hey! We found more than 100 concurrent"
                        " adds of links "
                        "to the same nodes! Are you really doing that??")
                try:
                    self._do_create_link(src, "link_{}".format(autolabel_idx),
                                         link_type)
                    break
                except UniquenessError:
                    # Retry loop until you find a new loop
                    autolabel_idx += 1
        else:
            self._do_create_link(src, label, link_type)
示例#27
0
文件: node.py 项目: asle85/aiida-core
    def get_statistics(self, tclass, users=[]):
        from aiida.orm.querybuilder import QueryBuilder as QB
        from aiida.orm import User
        from collections import Counter

        def count_statistics(dataset):
            def get_statistics_dict(dataset):
                results = {}
                for count, typestring in sorted(
                    (v, k) for k, v in dataset.iteritems())[::-1]:
                    results[typestring] = count
                return results

            count_dict = {}

            types = Counter([r[3] for r in dataset])
            count_dict["types"] = get_statistics_dict(types)

            ctimelist = [r[1].strftime("%Y-%m") for r in dataset]
            ctime = Counter(ctimelist)
            count_dict["ctime_by_month"] = get_statistics_dict(ctime)

            ctimelist = [r[1].strftime("%Y-%m-%d") for r in dataset]
            ctime = Counter(ctimelist)
            count_dict["ctime_by_day"] = get_statistics_dict(ctime)

            mtimelist = [r[2].strftime("%Y-%m") for r in dataset]
            mtime = Counter(ctimelist)
            count_dict["mtime_by_month"] = get_statistics_dict(mtime)

            mtimelist = [r[1].strftime("%Y-%m-%d") for r in dataset]
            mtime = Counter(ctimelist)
            count_dict["mtime_by_day"] = get_statistics_dict(mtime)

            return count_dict

        statistics = {}

        q = QB()
        q.append(tclass, project=['id', 'ctime', 'mtime', 'type'], tag='node')
        q.append(User, creator_of='node', project='email')
        qb_res = q.all()

        # total count
        statistics["total"] = len(qb_res)

        node_users = Counter([r[4] for r in qb_res])
        statistics["users"] = {}

        if isinstance(users, basestring):
            users = [users]
        if len(users) == 0:
            users = node_users

        for user in users:
            user_data = [r for r in qb_res if r[4] == user]
            # statistics for user data
            statistics["users"][user] = count_statistics(user_data)
            statistics["users"][user]["total"] = node_users[user]

        # statistics for node data
        statistics.update(count_statistics(qb_res))

        return statistics
示例#28
0
    def run(self, *args):
        load_dbenv()

        import argparse

        from aiida.orm.querybuilder import QueryBuilder
        from aiida.orm import Group, Node, Computer
        from aiida.orm.importexport import export, export_zip

        parser = argparse.ArgumentParser(
            prog=self.get_full_command_name(),
            description='Export data from the DB.')
        parser.add_argument('-c',
                            '--computers',
                            nargs='+',
                            type=int,
                            metavar="PK",
                            help="Export the given computers")
        parser.add_argument('-n',
                            '--nodes',
                            nargs='+',
                            type=int,
                            metavar="PK",
                            help="Export the given nodes")
        parser.add_argument('-g',
                            '--groups',
                            nargs='+',
                            metavar="GROUPNAME",
                            help="Export all nodes in the given group(s), "
                            "identified by name.",
                            type=str)
        parser.add_argument('-G',
                            '--group_pks',
                            nargs='+',
                            metavar="PK",
                            help="Export all nodes in the given group(s), "
                            "identified by pk.",
                            type=str)
        parser.add_argument('-P',
                            '--no-parents',
                            dest='no_parents',
                            action='store_true',
                            help="Store only the nodes that are explicitly "
                            "given, without exporting the parents")
        parser.set_defaults(no_parents=False)
        parser.add_argument('-O',
                            '--no-calc-outputs',
                            dest='no_calc_outputs',
                            action='store_true',
                            help="If a calculation is included in the list of "
                            "nodes to export, do not export its outputs")
        parser.set_defaults(no_calc_outputs=False)
        parser.add_argument('-y',
                            '--overwrite',
                            dest='overwrite',
                            action='store_true',
                            help="Overwrite the output file, if it exists")
        parser.set_defaults(overwrite=False)

        zipsubgroup = parser.add_mutually_exclusive_group()
        zipsubgroup.add_argument(
            '-z',
            '--zipfile-compressed',
            dest='zipfilec',
            action='store_true',
            help="Store as zip file (experimental, should be "
            "faster")
        zipsubgroup.add_argument('-Z',
                                 '--zipfile-uncompressed',
                                 dest='zipfileu',
                                 action='store_true',
                                 help="Store as uncompressed zip file "
                                 "(experimental, should be faster")
        parser.set_defaults(zipfilec=False)
        parser.set_defaults(zipfileu=False)

        parser.add_argument('output_file',
                            type=str,
                            help='The output file name for the export file')

        parsed_args = parser.parse_args(args)

        if parsed_args.nodes is None:
            node_id_set = set()
        else:
            node_id_set = set(parsed_args.nodes)

        group_dict = dict()

        if parsed_args.groups is not None:
            qb = QueryBuilder()
            qb.append(Group,
                      tag='group',
                      project=['*'],
                      filters={'name': {
                          'in': parsed_args.groups
                      }})
            qb.append(Node, tag='node', member_of='group', project=['id'])
            res = qb.dict()

            group_dict.update(
                {_['group']['*'].name: _['group']['*'].dbgroup
                 for _ in res})
            node_id_set.update([_['node']['id'] for _ in res])

        if parsed_args.group_pks is not None:
            qb = QueryBuilder()
            qb.append(Group,
                      tag='group',
                      project=['*'],
                      filters={'id': {
                          'in': parsed_args.group_pks
                      }})
            qb.append(Node, tag='node', member_of='group', project=['id'])
            res = qb.dict()

            group_dict.update(
                {_['group']['*'].name: _['group']['*'].dbgroup
                 for _ in res})
            node_id_set.update([_['node']['id'] for _ in res])

        # The db_groups that correspond to what was searched above
        dbgroups_list = group_dict.values()

        # Getting the nodes that correspond to the ids that were found above
        if len(node_id_set) > 0:
            qb = QueryBuilder()
            qb.append(Node,
                      tag='node',
                      project=['*'],
                      filters={'id': {
                          'in': node_id_set
                      }})
            node_list = [_[0] for _ in qb.all()]
        else:
            node_list = list()

        # Check if any of the nodes wasn't found in the database.
        missing_nodes = node_id_set.difference(_.id for _ in node_list)
        for id in missing_nodes:
            print >> sys.stderr, ("WARNING! Node with pk= {} "
                                  "not found, skipping.".format(id))

        # The dbnodes of the above node list
        dbnode_list = [_.dbnode for _ in node_list]

        if parsed_args.computers is not None:
            qb = QueryBuilder()
            qb.append(Computer,
                      tag='comp',
                      project=['*'],
                      filters={'id': {
                          'in': set(parsed_args.computers)
                      }})
            computer_list = [_[0] for _ in qb.all()]
            missing_computers = set(parsed_args.computers).difference(
                _.id for _ in computer_list)
            for id in missing_computers:
                print >> sys.stderr, ("WARNING! Computer with pk= {} "
                                      "not found, skipping.".format(id))
        else:
            computer_list = []

        # The dbcomputers of the above computer list
        dbcomputer_list = [_.dbcomputer for _ in computer_list]

        what_list = dbnode_list + dbcomputer_list + dbgroups_list

        export_function = export
        additional_kwargs = {}
        if parsed_args.zipfileu:
            export_function = export_zip
            additional_kwargs.update({"use_compression": False})
        elif parsed_args.zipfilec:
            export_function = export_zip
            additional_kwargs.update({"use_compression": True})
        try:
            export_function(what=what_list,
                            also_parents=not parsed_args.no_parents,
                            also_calc_outputs=not parsed_args.no_calc_outputs,
                            outfile=parsed_args.output_file,
                            overwrite=parsed_args.overwrite,
                            **additional_kwargs)
        except IOError as e:
            print >> sys.stderr, "IOError: {}".format(e.message)
            sys.exit(1)
示例#29
0
    def test_creation_and_deletion(self):
        from aiida.backends.djsite.db.models import DbLink  # Direct links
        from aiida.orm.querybuilder import QueryBuilder
        from aiida.common.links import LinkType

        n1 = Node().store()
        n2 = Node().store()
        n3 = Node().store()
        n4 = Node().store()
        n5 = Node().store()
        n6 = Node().store()
        n7 = Node().store()
        n8 = Node().store()
        n9 = Node().store()

        # I create a strange graph, inserting links in a order
        # such that I often have to create the transitive closure
        # between two graphs
        n3.add_link_from(n2, link_type=LinkType.CREATE)
        n2.add_link_from(n1, link_type=LinkType.CREATE)
        n5.add_link_from(n3, link_type=LinkType.CREATE)
        n5.add_link_from(n4, link_type=LinkType.CREATE)
        n4.add_link_from(n2, link_type=LinkType.CREATE)

        n7.add_link_from(n6, link_type=LinkType.CREATE)
        n8.add_link_from(n7, link_type=LinkType.CREATE)

        # Yet, no links from 1 to 8
        self.assertEquals(
            QueryBuilder().append(Node, filters={
                'id': n1.pk
            }, tag='anc').append(Node,
                                 descendant_of='anc',
                                 filters={
                                     'id': n8.pk
                                 }).count(), 0)

        n6.add_link_from(n5, link_type=LinkType.INPUT)

        # Yet, now 2 links from 1 to 8
        self.assertEquals(
            QueryBuilder().append(Node, filters={
                'id': n1.pk
            }, tag='anc').append(Node,
                                 descendant_of='anc',
                                 filters={
                                     'id': n8.pk
                                 }).count(), 2)

        n7.add_link_from(n9, link_type=LinkType.INPUT)
        # Still two links...
        self.assertEquals(
            QueryBuilder().append(Node, filters={
                'id': n1.pk
            }, tag='anc').append(Node,
                                 descendant_of='anc',
                                 filters={
                                     'id': n8.pk
                                 }).count(), 2)

        n9.add_link_from(n6, link_type=LinkType.INPUT)
        # And now there should be 4 nodes
        self.assertEquals(
            QueryBuilder().append(Node, filters={
                'id': n1.pk
            }, tag='anc').append(Node,
                                 descendant_of='anc',
                                 filters={
                                     'id': n8.pk
                                 }).count(), 4)

        ### I start deleting now

        # I cut one branch below: I should loose 2 links
        DbLink.objects.filter(input=n6, output=n9).delete()

        self.assertEquals(
            QueryBuilder().append(Node, filters={
                'id': n1.pk
            }, tag='anc').append(Node,
                                 descendant_of='anc',
                                 filters={
                                     'id': n8.pk
                                 }).count(), 2)

        DbLink.objects.filter(input=n2, output=n4).delete()

        self.assertEquals(
            QueryBuilder().append(Node, filters={
                'id': n1.pk
            }, tag='anc').append(Node,
                                 descendant_of='anc',
                                 filters={
                                     'id': n8.pk
                                 }).count(), 1)
        #~ self.assertEquals(
        #~ len(DbPath.objects.filter(parent=n1, child=n8).distinct()), 1)

        # Another cut should delete all links
        DbLink.objects.filter(input=n3, output=n5).delete()
        self.assertEquals(
            QueryBuilder().append(Node, filters={
                'id': n1.pk
            }, tag='anc').append(Node,
                                 descendant_of='anc',
                                 filters={
                                     'id': n8.pk
                                 }).count(), 0)
        #~ self.assertEquals(
        #~ len(DbPath.objects.filter(parent=n1, child=n8).distinct()), 0)

        # But I did not delete everything! For instance, I can check
        # the following links
        self.assertEquals(
            QueryBuilder().append(Node, filters={
                'id': n4.pk
            }, tag='anc').append(Node,
                                 descendant_of='anc',
                                 filters={
                                     'id': n8.pk
                                 }).count(), 1)
        self.assertEquals(
            QueryBuilder().append(Node, filters={
                'id': n5.pk
            }, tag='anc').append(Node,
                                 descendant_of='anc',
                                 filters={
                                     'id': n7.pk
                                 }).count(), 1)
        #~ self.assertEquals(
        #~ len(DbPath.objects.filter(parent=n4, child=n8).distinct()), 1)
        #~ self.assertEquals(
        #~ len(DbPath.objects.filter(parent=n5, child=n7).distinct()), 1)

        # Finally, I reconnect in a different way the two graphs and
        # check that 1 and 8 are again connected
        n4.add_link_from(n3, link_type=LinkType.INPUT)

        self.assertEquals(
            QueryBuilder().append(Node, filters={
                'id': n1.pk
            }, tag='anc').append(Node,
                                 descendant_of='anc',
                                 filters={
                                     'id': n8.pk
                                 }).count(), 1)
示例#30
0
    def get_upf_groups(cls, filter_elements=None, user=None):
        """Return all names of groups of type UpfFamily, possibly with some filters.

        :param filter_elements: A string or a list of strings.
            If present, returns only the groups that contains one UPF for every element present in the list. The default
            is `None`, meaning that all families are returned.
        :param user: if None (default), return the groups for all users.
            If defined, it should be either a `User` instance or the user email.
        :return: list of `Group` entities of type UPF.
        """
        from aiida.orm import Group
        from aiida.orm import QueryBuilder
        from aiida.orm import User

        builder = QueryBuilder()
        builder.append(
            Group,
            filters={'type_string': {
                '==': cls.upffamily_type_string
            }},
            tag='group',
            project='*')

        if user:
            builder.append(User,
                           filters={'email': {
                               '==': user
                           }},
                           with_group='group')

        if isinstance(filter_elements, six.string_types):
            filter_elements = [filter_elements]

        if filter_elements is not None:
            builder.append(
                UpfData,
                filters={'attributes.element': {
                    'in': filter_elements
                }},
                with_group='group')

        builder.order_by({Group: {'id': 'asc'}})

        return [group for group, in builder.all()]
示例#31
0
    def get_creation_statistics(self, user_email=None):
        """
        Return a dictionary with the statistics of node creation, summarized by day.

        :note: Days when no nodes were created are not present in the returned `ctime_by_day` dictionary.

        :param user_email: If None (default), return statistics for all users.
            If an email is specified, return only the statistics for the given user.

        :return: a dictionary as
            follows::

                {
                   "total": TOTAL_NUM_OF_NODES,
                   "types": {TYPESTRING1: count, TYPESTRING2: count, ...},
                   "ctime_by_day": {'YYYY-MMM-DD': count, ...}

            where in `ctime_by_day` the key is a string in the format 'YYYY-MM-DD' and the value is
            an integer with the number of nodes created that day.
        """
        from aiida.orm.querybuilder import QueryBuilder as QB
        from aiida.orm import User, Node
        from collections import Counter
        import datetime

        def count_statistics(dataset):
            def get_statistics_dict(dataset):
                results = {}
                for count, typestring in sorted(
                    (v, k) for k, v in dataset.iteritems())[::-1]:
                    results[typestring] = count
                return results

            count_dict = {}

            types = Counter([r[2] for r in dataset])
            count_dict["types"] = get_statistics_dict(types)

            ctimelist = [r[1].strftime("%Y-%m-%d") for r in dataset]
            ctime = Counter(ctimelist)

            if len(ctimelist) > 0:

                # For the way the string is formatted, we can just sort it alphabetically
                firstdate = datetime.datetime.strptime(
                    sorted(ctimelist)[0], '%Y-%m-%d')
                lastdate = datetime.datetime.strptime(
                    sorted(ctimelist)[-1], '%Y-%m-%d')

                curdate = firstdate
                outdata = {}

                while curdate <= lastdate:
                    curdatestring = curdate.strftime('%Y-%m-%d')
                    outdata[curdatestring] = ctime.get(curdatestring, 0)
                    curdate += datetime.timedelta(days=1)
                count_dict["ctime_by_day"] = outdata

            else:
                count_dict["ctime_by_day"] = {}

            return count_dict

        statistics = {}

        q = QB()
        q.append(Node, project=['id', 'ctime', 'type'], tag='node')
        if user_email is not None:
            q.append(User,
                     creator_of='node',
                     project='email',
                     filters={'email': user_email})
        qb_res = q.all()

        # total count
        statistics["total"] = len(qb_res)
        statistics.update(count_statistics(qb_res))

        return statistics
示例#32
0
def create(outfile, computers, groups, nodes, group_names, input_forward,
           create_reversed, return_reversed, call_reversed, overwrite,
           archive_format):
    """
    Export nodes and groups of nodes to an archive file for backup or sharing purposes
    """
    import sys
    from aiida.backends.utils import load_dbenv, is_dbenv_loaded
    # TODO: Replace with aiida.cmdline.utils.decorators.with_dbenv decocator
    # TODO: when we merge to develop
    if not is_dbenv_loaded():
        load_dbenv()
    from aiida.orm import Group, Node, Computer
    from aiida.orm.querybuilder import QueryBuilder
    from aiida.orm.importexport import export, export_zip

    node_id_set = set(nodes)
    group_dict = dict()

    if group_names:
        qb = QueryBuilder()
        qb.append(Group, tag='group', project=['*'], filters={'name': {'in': group_names}})
        qb.append(Node, tag='node', member_of='group', project=['id'])
        res = qb.dict()

        group_dict.update(
            {group['group']['*'].id: group['group']['*'] for group in res})
        node_id_set.update([node['node']['id'] for node in res])

    if groups:
        qb = QueryBuilder()
        qb.append(Group, tag='group', project=['*'], filters={'id': {'in': groups}})
        qb.append(Node, tag='node', member_of='group', project=['id'])
        res = qb.dict()

        group_dict.update(
            {group['group']['*'].id: group['group']['*'] for group in res})
        node_id_set.update([node['node']['id'] for node in res])

    groups_list = group_dict.values()

    # Getting the nodes that correspond to the ids that were found above
    if len(node_id_set) > 0:
        qb = QueryBuilder()
        qb.append(Node, tag='node', project=['*'], filters={'id': {'in': node_id_set}})
        node_list = [node[0] for node in qb.all()]
    else:
        node_list = list()

    # Check if any of the nodes wasn't found in the database.
    missing_nodes = node_id_set.difference(node.id for node in node_list)
    for node_id in missing_nodes:
        print >> sys.stderr, ('WARNING! Node with pk={} not found, skipping'.format(node_id))

    if computers:
        qb = QueryBuilder()
        qb.append(Computer, tag='comp', project=['*'], filters={'id': {'in': set(computers)}})
        computer_list = [computer[0] for computer in qb.all()]
        missing_computers = set(computers).difference(computer.id for computer in computer_list)

        for computer_id in missing_computers:
            print >> sys.stderr, ('WARNING! Computer with pk={} not found, skipping'.format(computer_id))
    else:
        computer_list = []

    what_list = node_list + computer_list + groups_list
    additional_kwargs = dict()

    if archive_format == 'zip':
        export_function = export_zip
        additional_kwargs.update({'use_compression': True})
    elif archive_format == 'zip-uncompressed':
        export_function = export_zip
        additional_kwargs.update({'use_compression': False})
    elif archive_format == 'tar.gz':
        export_function = export
    else:
        print >> sys.stderr, 'invalid --archive-format value {}'.format(
            archive_format)
        sys.exit(1)

    try:
        export_function(
            what=what_list, input_forward=input_forward,
            create_reversed=create_reversed,
            return_reversed=return_reversed,
            call_reversed=call_reversed, outfile=outfile,
            overwrite=overwrite, **additional_kwargs
        )

    except IOError as e:
        print >> sys.stderr, 'IOError: {}'.format(e.message)
        sys.exit(1)
示例#33
0
    def query_jobcalculations_by_computer_user_state(
            self,
            state,
            computer=None,
            user=None,
            only_computer_user_pairs=False,
            only_enabled=True,
            limit=None):
        """
        Filter all calculations with a given state.

        Issue a warning if the state is not in the list of valid states.

        :param string state: The state to be used to filter (should be a string among
                those defined in aiida.common.datastructures.calc_states)
        :param computer: a Django DbComputer entry, or a Computer object, of a
                computer in the DbComputer table.
                A string for the hostname is also valid.
        :param user: a Django entry (or its pk) of a user in the DbUser table;
                if present, the results are restricted to calculations of that
                specific user
        :param bool only_computer_user_pairs: if False (default) return a queryset
                where each element is a suitable instance of Node (it should
                be an instance of Calculation, if everything goes right!)
                If True, return only a list of tuples, where each tuple is
                in the format
                ('dbcomputer__id', 'user__id')
                [where the IDs are the IDs of the respective tables]
        :param int limit: Limit the number of rows returned

        :return: a list of calculation objects matching the filters.
        """
        # I assume that calc_states are strings. If this changes in the future,
        # update the filter below from dbattributes__tval to the correct field.
        from aiida.orm.computer import Computer
        from aiida.orm.calculation.job import JobCalculation
        from aiida.orm.user import User
        from aiida.orm.querybuilder import QueryBuilder
        from aiida.common.exceptions import InputValidationError
        from aiida.common.datastructures import calc_states

        if state not in calc_states:
            raise InputValidationError(
                "querying for calculation state='{}', but it "
                "is not a valid calculation state".format(state))

        calcfilter = {'state': {'==': state}}
        computerfilter = {"enabled": {'==': True}}
        userfilter = {}

        if computer is None:
            pass
        elif isinstance(computer, int):
            computerfilter.update({'id': {'==': computer}})
        elif isinstance(computer, Computer):
            computerfilter.update({'id': {'==': computer.pk}})
        else:
            try:
                computerfilter.update({'id': {'==': computer.id}})
            except AttributeError as e:
                raise Exception("{} is not a valid computer\n{}".format(
                    computer, e))
        if user is None:
            pass
        elif isinstance(user, int):
            userfilter.update({'id': {'==': user}})
        else:
            try:
                userfilter.update({'id': {'==': int(user.id)}})
                # Is that safe?
            except:
                raise Exception("{} is not a valid user".format(user))

        qb = QueryBuilder()
        qb.append(type="computer", tag='computer', filters=computerfilter)
        qb.append(JobCalculation,
                  filters=calcfilter,
                  tag='calc',
                  has_computer='computer')
        qb.append(type="user",
                  tag='user',
                  filters=userfilter,
                  creator_of="calc")

        if only_computer_user_pairs:
            qb.add_projection("computer", "*")
            qb.add_projection("user", "*")
            returnresult = qb.distinct().all()
        else:
            qb.add_projection("calc", "*")
            if limit is not None:
                qb.limit(limit)
            returnresult = qb.all()
            returnresult = zip(*returnresult)[0]
        return returnresult
示例#34
0
def test_uploadfamily_dryrun(fresh_aiida_env, cmd_params):
    """Make sure --dry-run does not affect the db"""
    from aiida.orm import Node, Group
    from aiida.orm.querybuilder import QueryBuilder

    node_qb = QueryBuilder(path=[Node])
    node_count = node_qb.count()
    group_qb = QueryBuilder(path=[Group])
    group_count = group_qb.count()

    result = run_cmd('uploadfamily', [
        cmd_params.PATH_OPTION, cmd_params.NAME_OPTION, cmd_params.DESC_OPTION,
        '--dry-run'
    ])

    assert not result.exception

    node_qb = QueryBuilder(path=[Node])
    assert node_count == node_qb.count()
    group_qb = QueryBuilder(path=[Group])
    assert group_count == group_qb.count()
示例#35
0
def create(outfile, computers, groups, nodes, group_names, no_parents,
           no_calc_outputs, overwrite, archive_format):
    """
    Export nodes and groups of nodes to an archive file for backup or sharing purposes
    """
    import sys
    from aiida.backends.utils import load_dbenv
    load_dbenv()
    from aiida.orm import Group, Node, Computer
    from aiida.orm.querybuilder import QueryBuilder
    from aiida.orm.importexport import export, export_zip

    node_id_set = set(nodes)
    group_dict = dict()

    if group_names:
        qb = QueryBuilder()
        qb.append(Group,
                  tag='group',
                  project=['*'],
                  filters={'name': {
                      'in': group_names
                  }})
        qb.append(Node, tag='node', member_of='group', project=['id'])
        res = qb.dict()

        group_dict.update({
            group['group']['*'].name: group['group']['*'].dbgroup
            for group in res
        })
        node_id_set.update([node['node']['id'] for node in res])

    if groups:
        qb = QueryBuilder()
        qb.append(Group,
                  tag='group',
                  project=['*'],
                  filters={'id': {
                      'in': groups
                  }})
        qb.append(Node, tag='node', member_of='group', project=['id'])
        res = qb.dict()

        group_dict.update({
            group['group']['*'].name: group['group']['*'].dbgroup
            for group in res
        })
        node_id_set.update([node['node']['id'] for node in res])

    # The db_groups that correspond to what was searched above
    dbgroups_list = group_dict.values()

    # Getting the nodes that correspond to the ids that were found above
    if len(node_id_set) > 0:
        qb = QueryBuilder()
        qb.append(Node,
                  tag='node',
                  project=['*'],
                  filters={'id': {
                      'in': node_id_set
                  }})
        node_list = [node[0] for node in qb.all()]
    else:
        node_list = list()

    # Check if any of the nodes wasn't found in the database.
    missing_nodes = node_id_set.difference(node.id for node in node_list)
    for node_id in missing_nodes:
        print >> sys.stderr, (
            'WARNING! Node with pk={} not found, skipping'.format(node_id))

    # The dbnodes of the above node list
    dbnode_list = [node.dbnode for node in node_list]

    if computers:
        qb = QueryBuilder()
        qb.append(Computer,
                  tag='comp',
                  project=['*'],
                  filters={'id': {
                      'in': set(computers)
                  }})
        computer_list = [computer[0] for computer in qb.all()]
        missing_computers = set(computers).difference(
            computer.id for computer in computer_list)

        for computer_id in missing_computers:
            print >> sys.stderr, (
                'WARNING! Computer with pk={} not found, skipping'.format(
                    computer_id))
    else:
        computer_list = []

    # The dbcomputers of the above computer list
    dbcomputer_list = [computer.dbcomputer for computer in computer_list]

    what_list = dbnode_list + dbcomputer_list + dbgroups_list
    additional_kwargs = dict()

    if archive_format == 'zip':
        export_function = export_zip
        additional_kwargs.update({'use_compression': True})
    elif archive_format == 'zip-uncompressed':
        export_function = export_zip
        additional_kwargs.update({'use_compression': False})
    elif archive_format == 'tar.gz':
        export_function = export
    else:
        print >> sys.stderr, 'invalid --archive-format value {}'.format(
            archive_format)
        sys.exit(1)

    try:
        export_function(what=what_list,
                        also_parents=not no_parents,
                        also_calc_outputs=not no_calc_outputs,
                        outfile=outfile,
                        overwrite=overwrite,
                        **additional_kwargs)
    except IOError as e:
        print >> sys.stderr, 'IOError: {}'.format(e.message)
        sys.exit(1)
#for queries examplefrom tutorial 
from sys import argv
from aiida.orm.querybuilder import QueryBuilder
from aiida.orm.data.remote import RemoteData
from aiida.orm.calculation import *

path="/home/aiida/Documents/seb352-travail/essais-tuto/res/"
StructureData = DataFactory("structure")
ParameterData = DataFactory("parameter")
#PwCalculation= DataFactory("calculation")

qb=QueryBuilder()



qb.append(ParameterData,
	project=["attributes.step0", "attributes.steps"],
	filters={"id":{"==":5615}}
	)

ok=0
a=qb.count()
file=open(path+"results-parabola-dict", 'w')



for i in qb.iterall():
	file.write("{}\n\n{}\n\n{}\n\n{}".format(i[0],i[1][0],i[1][1],i[1][2]))
	ok+=1
	print i[0]['dE']
	print len(i[0])