예제 #1
0
    def get_code_helper(cls, label, machinename=None):
        """
        :param label: the code label identifying the code to load
        :param machinename: the machine name where code is setup

        :raise aiida.common.NotExistent: if no code identified by the given string is found
        :raise aiida.common.MultipleObjectsError: if the string cannot identify uniquely
            a code
        """
        from aiida.common.exceptions import NotExistent, MultipleObjectsError
        from aiida.orm.querybuilder import QueryBuilder
        from aiida.orm.computers import Computer

        qb = QueryBuilder()
        qb.append(cls, filters={'label': {'==': label}}, project=['*'], tag='code')
        if machinename:
            qb.append(Computer, filters={'name': {'==': machinename}}, with_node='code')

        if qb.count() == 0:
            raise NotExistent("'{}' is not a valid code name.".format(label))
        elif qb.count() > 1:
            codes = qb.all(flat=True)
            retstr = ("There are multiple codes with label '{}', having IDs: ".format(label))
            retstr += ', '.join(sorted([str(c.pk) for c in codes])) + '.\n'
            retstr += ('Relabel them (using their ID), or refer to them with their ID.')
            raise MultipleObjectsError(retstr)
        else:
            return qb.first()[0]
예제 #2
0
    def get_subclass_from_pk(cls, pk):
        from aiida.orm.querybuilder import QueryBuilder
        from sqlalchemy.exc import DatabaseError
        # If it is not an int make a final attempt
        # to convert to an integer. If you fail,
        # raise an exception.
        try:
            pk = int(pk)
        except:
            raise ValueError("Incorrect type for int")

        try:
            qb = QueryBuilder()
            qb.append(cls, filters={'id': {'==': pk}})

            if qb.count() == 0:
                raise NotExistent("No entry with pk= {} found".format(pk))

            node = qb.first()[0]

            if not isinstance(node, cls):
                raise NotExistent("pk= {} is not an instance of {}".format(
                    pk, cls.__name__))
            return node
        except DatabaseError as de:
            raise ValueError(de.message)
예제 #3
0
def get_calc_by_label(workcalc, label):
    qb = QueryBuilder()
    qb.append(WorkChainNode, filters={'uuid':workcalc.uuid})
    qb.append(CalcJob, with_incoming=WorkChainNode, filters={'label':label})
    assert qb.count() == 1
    calc = qb.first()[0]
    assert(calc.is_finished_ok)
    return calc
def get_calc_by_label(workcalc, label):
    qb = QueryBuilder()
    qb.append(WorkCalculation, filters={'uuid': workcalc.uuid})
    qb.append(JobCalculation,
              output_of=WorkCalculation,
              filters={'label': label})
    assert qb.count() == 1
    calc = qb.first()[0]
    assert (calc.get_state() == 'FINISHED')
    return calc
예제 #5
0
    def node(self, info, uuid: strawberry.ID) -> typing.Optional[Node]:
        q = QueryBuilder()
        q.append(orm.Node, filters={"uuid": {"==": uuid}}, project=["*"])
        entry = q.first()

        if entry:
            # TODO: call function to return proper type depending AiiDA node_type, e.g. the generic version of:
            # if entry[0].node_type == "data.gaussian.basisset.BasisSet.":
            #     return GaussianBasisset.from_orm(entry)

            return BareNode.from_orm(entry)

        return None
예제 #6
0
    def computer(self, info, uuid: strawberry.ID) -> typing.Optional[Computer]:
        try:
            UUID(uuid)
        except ValueError:
            raise ValueError("invalid value passed for uuid"
                             ) from None  # mask original exception completely

        q = QueryBuilder()
        q.append(orm.Computer, filters={"uuid": {"==": uuid}}, project=["*"])
        entry = q.first()

        if entry:
            return Computer.from_orm(entry)

        return None
예제 #7
0
    def get_subclass_from_uuid(cls, uuid):
        from aiida.orm.querybuilder import QueryBuilder
        from sqlalchemy.exc import DatabaseError
        try:
            qb = QueryBuilder()
            qb.append(cls, filters={'uuid': {'==': str(uuid)}})

            if qb.count() == 0:
                raise NotExistent("No entry with UUID={} found".format(uuid))

            node = qb.first()[0]

            if not isinstance(node, cls):
                raise NotExistent("UUID={} is not an instance of {}".format(
                    uuid, cls.__name__))
            return node
        except DatabaseError as de:
            raise ValueError(de.message)
예제 #8
0
def _retrieve_basis_sets(files, stop_if_existing):
    """ get existing basis sets or create if not

    :param files: list of basis set file paths
    :param stop_if_existing: if True, check for the md5 of the files and,
        if the file already exists in the DB, raises a MultipleObjectsError.
        If False, simply adds the existing BasisSetData node to the group.
    :return:
    """
    from aiida.orm.querybuilder import QueryBuilder

    basis_and_created = []
    for f in files:
        _, content = parse_basis(f)
        md5sum = md5_from_string(content)
        qb = QueryBuilder()
        qb.append(BasisSetData, filters={'attributes.md5': {'==': md5sum}})
        existing_basis = qb.first()

        if existing_basis is None:
            # return the basis set data instances, not stored
            basisset, created = BasisSetData.get_or_create(f,
                                                           use_first=True,
                                                           store_basis=False)
            # to check whether only one basis set per element exists
            # NOTE: actually, created has the meaning of "to_be_created"
            basis_and_created.append((basisset, created))
        else:
            if stop_if_existing:
                raise ValueError("A Basis Set with identical MD5 to "
                                 " {} cannot be added with stop_if_existing"
                                 "".format(f))
            existing_basis = existing_basis[0]
            basis_and_created.append((existing_basis, False))

    return basis_and_created
예제 #9
0
    def get_io_tree(self, uuid_pattern, tree_in_limit, tree_out_limit):
        # pylint: disable=too-many-statements,too-many-locals
        """
        json data to display nodes in tree format
        :param uuid_pattern: main node uuid
        :return: json data to display node tree
        """
        from aiida.orm.querybuilder import QueryBuilder
        from aiida.orm import Node

        def get_node_description(node):
            """
            Get the description of the node.
            CalcJobNodes migrated from AiiDA < 1.0.0 do not have a valid CalcJobState,
            in this case the function returns as description the type of the node (CalcJobNode)
            :param node: node object
            :return: description of the node
            """
            try:
                description = node.get_description()
            except ValueError:
                description = node.node_type.split('.')[-2]
            return description

        # Check whether uuid_pattern identifies a unique node
        self._check_id_validity(uuid_pattern)

        qb_obj = QueryBuilder()
        qb_obj.append(Node, tag='main', project=['*'], filters=self._id_filter)

        nodes = []

        if qb_obj.count() > 0:
            main_node = qb_obj.first()[0]
            pk = main_node.pk
            uuid = main_node.uuid
            nodetype = main_node.node_type
            nodelabel = main_node.label
            description = get_node_description(main_node)
            ctime = main_node.ctime
            mtime = main_node.mtime

            nodes.append({
                'ctime': ctime,
                'mtime': mtime,
                'id': pk,
                'uuid': uuid,
                'node_type': nodetype,
                'node_label': nodelabel,
                'description': description,
                'incoming': [],
                'outgoing': []
            })

        # get all incoming
        qb_obj = QueryBuilder()
        qb_obj.append(Node, tag='main', project=['*'], filters=self._id_filter)
        qb_obj.append(Node,
                      tag='in',
                      project=['*'],
                      edge_project=['label', 'type'],
                      with_outgoing='main').order_by(
                          {'in': [{
                              'id': {
                                  'order': 'asc'
                              }
                          }]})
        if tree_in_limit is not None:
            qb_obj.limit(tree_in_limit)

        sent_no_of_incomings = qb_obj.count()

        if sent_no_of_incomings > 0:
            for node_input in qb_obj.iterdict():
                node = node_input['in']['*']
                pk = node.pk
                linklabel = node_input['main--in']['label']
                linktype = node_input['main--in']['type']
                uuid = node.uuid
                nodetype = node.node_type
                nodelabel = node.label
                description = get_node_description(node)
                node_ctime = node.ctime
                node_mtime = node.mtime

                nodes[0]['incoming'].append({
                    'ctime': node_ctime,
                    'mtime': node_mtime,
                    'id': pk,
                    'uuid': uuid,
                    'node_type': nodetype,
                    'node_label': nodelabel,
                    'description': description,
                    'link_label': linklabel,
                    'link_type': linktype
                })

        # get all outgoing
        qb_obj = QueryBuilder()
        qb_obj.append(Node, tag='main', project=['*'], filters=self._id_filter)
        qb_obj.append(Node,
                      tag='out',
                      project=['*'],
                      edge_project=['label', 'type'],
                      with_incoming='main').order_by(
                          {'out': [{
                              'id': {
                                  'order': 'asc'
                              }
                          }]})
        if tree_out_limit is not None:
            qb_obj.limit(tree_out_limit)

        sent_no_of_outgoings = qb_obj.count()

        if sent_no_of_outgoings > 0:
            for output in qb_obj.iterdict():
                node = output['out']['*']
                pk = node.pk
                linklabel = output['main--out']['label']
                linktype = output['main--out']['type']
                uuid = node.uuid
                nodetype = node.node_type
                nodelabel = node.label
                description = get_node_description(node)
                node_ctime = node.ctime
                node_mtime = node.mtime

                nodes[0]['outgoing'].append({
                    'ctime': node_ctime,
                    'mtime': node_mtime,
                    'id': pk,
                    'uuid': uuid,
                    'node_type': nodetype,
                    'node_label': nodelabel,
                    'description': description,
                    'link_label': linklabel,
                    'link_type': linktype
                })

        # count total no of nodes
        builder = QueryBuilder()
        builder.append(Node,
                       tag='main',
                       project=['id'],
                       filters=self._id_filter)
        builder.append(Node, tag='in', project=['id'], with_outgoing='main')
        total_no_of_incomings = builder.count()

        builder = QueryBuilder()
        builder.append(Node,
                       tag='main',
                       project=['id'],
                       filters=self._id_filter)
        builder.append(Node, tag='out', project=['id'], with_incoming='main')
        total_no_of_outgoings = builder.count()

        metadata = [{
            'total_no_of_incomings': total_no_of_incomings,
            'total_no_of_outgoings': total_no_of_outgoings,
            'sent_no_of_incomings': sent_no_of_incomings,
            'sent_no_of_outgoings': sent_no_of_outgoings
        }]

        return {'nodes': nodes, 'metadata': metadata}
예제 #10
0
def upload_usp_family(folder,
                      group_label,
                      group_description,
                      stop_if_existing=True):
    """
    Upload a set of usp/recpot files in a give group

    :param folder: a path containing all UPF files to be added.
        Only files ending in .usp/.recpot are considered.
    :param group_label: the name of the group to create. If it exists and is
        non-empty, a UniquenessError is raised.
    :param group_description: a string to be set as the group description.
        Overwrites previous descriptions, if the group was existing.
    :param stop_if_existing: if True, check for the md5 of the files and,
        if the file already exists in the DB, raises a MultipleObjectsError.
        If False, simply adds the existing UPFData node to the group.
    """
    import os

    import aiida.common
    #from aiida.common import aiidalogger
    from aiida.common import UniquenessError, NotExistent
    from aiida.orm.querybuilder import QueryBuilder
    from .otfg import OTFGGroup

    files = [
        os.path.realpath(os.path.join(folder, i)) for i in os.listdir(folder)
        if os.path.isfile(os.path.join(folder, i)) and (
            i.lower().endswith('.usp') or i.lower().endswith('recpot')
            or i.lower().endswith('.uspcc'))
    ]

    nfiles = len(files)

    try:
        group = OTFGGroup.get(label=group_label)
        group_created = False
    except NotExistent:
        group = OTFGGroup(label=group_label, )
        group_created = True

    # Update the descript even if the group already existed
    group.description = group_description

    pseudo_and_created = []  # A list of records (UspData, created)

    for f in files:

        md5sum = md5_file(f)
        qb = QueryBuilder()
        qb.append(UspData, filters={'attributes.md5': {'==': md5sum}})
        existing_usp = qb.first()

        # Add the file if it is in the database
        if existing_usp is None:
            pseudo, created = UspData.get_or_create(f,
                                                    use_first=True,
                                                    store_usp=False)
            pseudo_and_created.append((pseudo, created))

        # The same file is there already
        else:
            if stop_if_existing:
                raise ValueError("A usp/recpot with identical MD5 to"
                                 " {} cannot be added with stop_if_existing"
                                 "".format(f))
            existing_usp = existing_usp[0]
            pseudo_and_created.append((existing_usp, False))

    # Check for unique per element
    elements = [(i[0].element, i[0].md5sum) for i in pseudo_and_created]

    # Check if we will duplicate after insertion

    if not group_created:
        for aiida_n in group.nodes:
            if not isinstance(aiida_n, UspData):
                continue
            elements.append((aiida_n.element, aiida_n.md5sum))

    # Discard duplicated pairs
    elements = set(elements)
    elements_names = [e[0] for e in elements]

    # Check the uniqueness of the complete group
    if not len(elements_names) == len(set(elements_names)):
        duplicates = set(
            [x for x in elements_names if elements_names.count(x) > 1])
        dup_string = ", ".join(duplicates)
        raise UniquenessError(
            "More than one usp/recpot found for the elements: " + dup_string +
            ".")

    if group_created:
        group.store()

    # Save the usp in the database if necessary and add them to the group

    for pseudo, created in pseudo_and_created:
        if created:
            pseudo.store()
            #aiidalogger.debug("New node {} created for file {}".format(
            #    pseudo.uuid, pseudo.filename))
        else:
            #aiidalogger.debug("Reusing node {} for file {}".format(
            #    pseudo.uuid, pseudo.filename))
            pass

    nodes_new = [
        pseduo for pseduo, created in pseudo_and_created if created is True
    ]
    nodes_add = [pseduo for pseduo, created in pseudo_and_created]
    group.add_nodes(nodes_add)

    return nfiles, len(nodes_new)
예제 #11
0
def upload_psml_family(folder,
                       group_label,
                       group_description,
                       stop_if_existing=True):
    """
    Upload a set of PSML files in a given group.

    :param folder: a path containing all PSML files to be added.
        Only files ending in .psml (case sensitive) are considered.
    :param group_label: the name of the group to create. If it exists and is
        non-empty, a UniquenessError is raised.
    :param group_description: a string to be set as the group description.
        Overwrites previous descriptions, if the group was existing.
    :param stop_if_existing: if True, check for the md5 of the files and,
        if the file already exists in the DB, raises a MultipleObjectsError.
        If False, simply adds the existing PsmlData node to the group.
    """
    import os
    from aiida import orm
    from aiida.common import AIIDA_LOGGER as aiidalogger
    from aiida.common.exceptions import UniquenessError
    from aiida.orm.querybuilder import QueryBuilder
    from aiida_siesta.groups.pseudos import PsmlFamily

    message = (  #pylint: disable=invalid-name
        'This function has been deprecated and will be removed in `v2.0.0`. ' +
        '`upload_psml_family` is substitued by `fam.create_from_folder` ' +
        'where `fam` is an instance of the families classes in `aiida_pseudo.groups.family`.'
    )

    warnings.warn(message, AiidaSiestaDeprecationWarning)

    if not os.path.isdir(folder):
        raise ValueError("folder must be a directory")

    # only files, and only those ending with .psml;
    # go to the real file if it is a symlink
    files = [
        os.path.realpath(os.path.join(folder, i)) for i in os.listdir(folder)
        if os.path.isfile(os.path.join(folder, i)) and i.endswith('.psml')
    ]

    nfiles = len(files)

    automatic_user = orm.User.objects.get_default()
    #group, group_created = orm.Group.objects.get_or_create(
    #    label=group_label, type_string=PSMLGROUP_TYPE, user=automatic_user
    #)
    group, group_created = PsmlFamily.objects.get_or_create(
        label=group_label, user=automatic_user)

    if group.user.email != automatic_user.email:
        raise UniquenessError(
            "There is already a PsmlFamily group with name {}"
            ", but it belongs to user {}, therefore you "
            "cannot modify it".format(group_label, group.user.email))

    # Always update description, even if the group already existed
    group.description = group_description

    # NOTE: GROUP SAVED ONLY AFTER CHECKS OF UNICITY

    pseudo_and_created = []

    for afile in files:
        md5sum = md5_file(afile)
        qb = QueryBuilder()
        qb.append(PsmlData, filters={'attributes.md5': {'==': md5sum}})
        existing_psml = qb.first()

        #existing_psml = PsmlData.query(dbattributes__key="md5",
        #                            dbattributes__tval = md5sum)

        if existing_psml is None:
            # return the psmldata instances, not stored
            pseudo, created = PsmlData.get_or_create(afile,
                                                     use_first=True,
                                                     store_psml=False)
            # to check whether only one psml per element exists
            # NOTE: actually, created has the meaning of "to_be_created"
            pseudo_and_created.append((pseudo, created))
        else:
            if stop_if_existing:
                raise ValueError("A PSML with identical MD5 to "
                                 " {} cannot be added with stop_if_existing"
                                 "".format(afile))
            existing_psml = existing_psml[0]
            pseudo_and_created.append((existing_psml, False))

    # check whether pseudo are unique per element
    elements = [(i[0].element, i[0].md5sum) for i in pseudo_and_created]
    # If group already exists, check also that I am not inserting more than
    # once the same element
    if not group_created:
        for aiida_n in group.nodes:
            # Skip non-pseudos
            if not isinstance(aiida_n, PsmlData):
                continue
            elements.append((aiida_n.element, aiida_n.md5sum))

    elements = set(elements)  # Discard elements with the same MD5, that would
    # not be stored twice
    elements_names = [e[0] for e in elements]

    if not len(elements_names) == len(set(elements_names)):
        duplicates = {x for x in elements_names if elements_names.count(x) > 1}
        duplicates_string = ", ".join(i for i in duplicates)
        raise UniquenessError("More than one PSML found for the elements: " +
                              duplicates_string + ".")

    # At this point, save the group, if still unstored
    if group_created:
        group.store()

    # save the psml in the database, and add them to group
    for pseudo, created in pseudo_and_created:
        if created:
            pseudo.store()

            aiidalogger.debug("New node {} created for file {}".format(
                pseudo.uuid, pseudo.filename))
        else:
            aiidalogger.debug("Reusing node {} for file {}".format(
                pseudo.uuid, pseudo.filename))

    # Add elements to the group all togetehr
    group.add_nodes([pseudo for pseudo, created in pseudo_and_created])

    nuploaded = len([_ for _, created in pseudo_and_created if created])

    return nfiles, nuploaded
예제 #12
0
def upload_psf_family(folder,
                      group_name,
                      group_description,
                      stop_if_existing=True):
    """
    Upload a set of PSF files in a given group.

    :param folder: a path containing all PSF files to be added.
        Only files ending in .PSF (case-insensitive) are considered.
    :param group_name: the name of the group to create. If it exists and is
        non-empty, a UniquenessError is raised.
    :param group_description: a string to be set as the group description.
        Overwrites previous descriptions, if the group was existing.
    :param stop_if_existing: if True, check for the md5 of the files and,
        if the file already exists in the DB, raises a MultipleObjectsError.
        If False, simply adds the existing PsfData node to the group.
    """
    import os
    import aiida.common
    from aiida.common import aiidalogger
    from aiida.orm import Group
    from aiida.common.exceptions import UniquenessError, NotExistent
    from aiida.backends.utils import get_automatic_user
    from aiida.orm.querybuilder import QueryBuilder
    if not os.path.isdir(folder):
        raise ValueError("folder must be a directory")

    # only files, and only those ending with .psf or .PSF;
    # go to the real file if it is a symlink
    files = [
        os.path.realpath(os.path.join(folder, i)) for i in os.listdir(folder)
        if os.path.isfile(os.path.join(folder, i))
        and i.lower().endswith('.psf')
    ]

    nfiles = len(files)

    try:
        group = Group.get(name=group_name, type_string=PSFGROUP_TYPE)
        group_created = False
    except NotExistent:
        group = Group(name=group_name,
                      type_string=PSFGROUP_TYPE,
                      user=get_automatic_user())
        group_created = True

    if group.user != get_automatic_user():
        raise UniquenessError("There is already a PsfFamily group with name {}"
                              ", but it belongs to user {}, therefore you "
                              "cannot modify it".format(
                                  group_name, group.user.email))

    # Always update description, even if the group already existed
    group.description = group_description

    # NOTE: GROUP SAVED ONLY AFTER CHECKS OF UNICITY

    pseudo_and_created = []

    for f in files:
        md5sum = aiida.common.utils.md5_file(f)
        qb = QueryBuilder()
        qb.append(PsfData, filters={'attributes.md5': {'==': md5sum}})
        existing_psf = qb.first()

        #existing_psf = PsfData.query(dbattributes__key="md5",
        #                            dbattributes__tval = md5sum)

        if existing_psf is None:
            # return the psfdata instances, not stored
            pseudo, created = PsfData.get_or_create(f,
                                                    use_first=True,
                                                    store_psf=False)
            # to check whether only one psf per element exists
            # NOTE: actually, created has the meaning of "to_be_created"
            pseudo_and_created.append((pseudo, created))
        else:
            if stop_if_existing:
                raise ValueError("A PSF with identical MD5 to "
                                 " {} cannot be added with stop_if_existing"
                                 "".format(f))
            existing_psf = existing_psf[0]
            pseudo_and_created.append((existing_psf, False))

    # check whether pseudo are unique per element
    elements = [(i[0].element, i[0].md5sum) for i in pseudo_and_created]
    # If group already exists, check also that I am not inserting more than
    # once the same element
    if not group_created:
        for aiida_n in group.nodes:
            # Skip non-pseudos
            if not isinstance(aiida_n, PsfData):
                continue
            elements.append((aiida_n.element, aiida_n.md5sum))

    elements = set(elements)  # Discard elements with the same MD5, that would
    # not be stored twice
    elements_names = [e[0] for e in elements]

    if not len(elements_names) == len(set(elements_names)):
        duplicates = set(
            [x for x in elements_names if elements_names.count(x) > 1])
        duplicates_string = ", ".join(i for i in duplicates)
        raise UniquenessError("More than one PSF found for the elements: " +
                              duplicates_string + ".")

    # At this point, save the group, if still unstored
    if group_created:
        group.store()

    # save the psf in the database, and add them to group
    for pseudo, created in pseudo_and_created:
        if created:
            pseudo.store()

            aiidalogger.debug("New node {} created for file {}".format(
                pseudo.uuid, pseudo.filename))
        else:
            aiidalogger.debug("Reusing node {} for file {}".format(
                pseudo.uuid, pseudo.filename))

    # Add elements to the group all togetehr
    group.add_nodes(pseudo for pseudo, created in pseudo_and_created)

    nuploaded = len([_ for _, created in pseudo_and_created if created])

    return nfiles, nuploaded
예제 #13
0
    def test_same_computer_different_name_import(self):
        """
        This test checks that if the computer is re-imported with a different
        name to the same database, then the original computer will not be
        renamed. It also checks that the names were correctly imported (without
        any change since there is no computer name collision)
        """
        import os
        import shutil
        import tempfile

        from aiida.orm.importexport import export
        from aiida.orm.querybuilder import QueryBuilder
        from aiida.orm.computer import Computer
        from aiida.orm.calculation.job import JobCalculation

        # Creating a folder for the import/export files
        export_file_tmp_folder = tempfile.mkdtemp()
        unpack_tmp_folder = tempfile.mkdtemp()

        try:
            # Store a calculation
            calc1_label = "calc1"
            calc1 = JobCalculation()
            calc1.set_computer(self.computer)
            calc1.set_resources({
                "num_machines": 1,
                "num_mpiprocs_per_machine": 1
            })
            calc1.label = calc1_label
            calc1.store()
            calc1._set_state(u'RETRIEVING')

            # Store locally the computer name
            comp1_name = unicode(self.computer.name)

            # Export the first job calculation
            filename1 = os.path.join(export_file_tmp_folder, "export1.tar.gz")
            export([calc1.dbnode], outfile=filename1, silent=True)

            # Rename the computer
            self.computer.set_name(comp1_name + "_updated")

            # Store a second calculation
            calc2_label = "calc2"
            calc2 = JobCalculation()
            calc2.set_computer(self.computer)
            calc2.set_resources({
                "num_machines": 2,
                "num_mpiprocs_per_machine": 2
            })
            calc2.label = calc2_label
            calc2.store()
            calc2._set_state(u'RETRIEVING')

            # Export the second job calculation
            filename2 = os.path.join(export_file_tmp_folder, "export2.tar.gz")
            export([calc2.dbnode], outfile=filename2, silent=True)

            # Clean the local database
            self.clean_db()

            # Check that there are no computers
            qb = QueryBuilder()
            qb.append(Computer, project=['*'])
            self.assertEqual(
                qb.count(), 0, "There should not be any computers"
                "in the database at this point.")

            # Check that there are no calculations
            qb = QueryBuilder()
            qb.append(JobCalculation, project=['*'])
            self.assertEqual(
                qb.count(), 0, "There should not be any "
                "calculations in the database at "
                "this point.")

            # Import the first calculation
            import_data(filename1, silent=True)

            # Check that the calculation computer is imported correctly.
            qb = QueryBuilder()
            qb.append(JobCalculation, project=['label'])
            self.assertEqual(qb.count(), 1, "Only one calculation should be "
                             "found.")
            self.assertEqual(unicode(qb.first()[0]), calc1_label,
                             "The calculation label is not correct.")

            # Check that the referenced computer is imported correctly.
            qb = QueryBuilder()
            qb.append(Computer, project=['name', 'uuid', 'id'])
            self.assertEqual(qb.count(), 1, "Only one computer should be "
                             "found.")
            self.assertEqual(unicode(qb.first()[0]), comp1_name,
                             "The computer name is not correct.")

            # Import the second calculation
            import_data(filename2, silent=True)

            # Check that the number of computers remains the same and its data
            # did not change.
            qb = QueryBuilder()
            qb.append(Computer, project=['name'])
            self.assertEqual(qb.count(), 1, "Only one computer should be "
                             "found.")
            self.assertEqual(unicode(qb.first()[0]), comp1_name,
                             "The computer name is not correct.")

        finally:
            # Deleting the created temporary folders
            shutil.rmtree(export_file_tmp_folder, ignore_errors=True)
            shutil.rmtree(unpack_tmp_folder, ignore_errors=True)
예제 #14
0
파일: node.py 프로젝트: asle85/aiida-core
    def get_io_tree(self, nodeId, maxDepth=None):
        from aiida.orm.querybuilder import QueryBuilder
        from aiida.orm.node import Node

        def addNodes(nodeId, maxDepth, nodes, addedNodes, addedEdges,
                     edgeType):
            qb = QueryBuilder()
            qb.append(Node, tag="main", filters={"id": {"==": nodeId}})
            if edgeType == "ancestors":
                qb.append(Node,
                          tag=edgeType,
                          project=['id', 'type'],
                          edge_project=['path', 'depth'],
                          ancestor_of_beta='main',
                          edge_filters={'depth': {
                              '<=': maxDepth
                          }})
            elif edgeType == "desc":
                qb.append(Node,
                          tag=edgeType,
                          project=['id', 'type'],
                          edge_project=['path', 'depth'],
                          descendant_of_beta='main',
                          edge_filters={'depth': {
                              '<=': maxDepth
                          }})

            if (qb.count() > 0):
                qbResults = qb.get_results_dict()

                for resultDict in qbResults:
                    if resultDict[edgeType]["id"] not in addedNodes:
                        nodes.append({
                            "id":
                            len(addedNodes),
                            "nodeid":
                            resultDict[edgeType]["id"],
                            "nodetype":
                            resultDict[edgeType]["type"],
                            "group":
                            edgeType + "-" +
                            str(resultDict["main--" + edgeType]["depth"])
                        })
                        addedNodes.append(resultDict[edgeType]["id"])

                    path = resultDict["main--" + edgeType]["path"]
                    if edgeType == "ancestors":
                        startEdge = path[0]
                        endEdge = path[1]
                    elif edgeType == "desc":
                        startEdge = path[-2]
                        endEdge = path[-1]
                    if startEdge not in addedEdges.keys():
                        addedEdges[startEdge] = [endEdge]
                    elif endEdge not in addedEdges[startEdge]:
                        addedEdges[startEdge].append(endEdge)

            return nodes, addedNodes, addedEdges

        def addEdges(edges, addedNodes, addedEdges):
            for fromNodeId in addedEdges.keys():
                fromNodeIdIndex = addedNodes.index(fromNodeId)
                for toNodeId in addedEdges[fromNodeId]:
                    toNodeIdIndex = addedNodes.index(toNodeId)
                    edges.append({
                        "from": fromNodeIdIndex,
                        "to": toNodeIdIndex,
                        "arrows": "to",
                        "color": {
                            "inherit": 'from'
                        }
                    })

            return edges

        nodes = []
        edges = []
        addedNodes = []
        addedEdges = {}

        if maxDepth is None:
            from aiida.restapi.common.config import MAX_TREE_DEPTH
            maxDepth = MAX_TREE_DEPTH

        qb = QueryBuilder()
        qb.append(Node,
                  tag="main",
                  project=["id", "type"],
                  filters={"id": {
                      "==": nodeId
                  }})
        if qb.count() > 0:
            mainNode = qb.first()
            nodes.append({
                "id": 0,
                "nodeid": mainNode[0],
                "nodetype": mainNode[1],
                "group": "mainNode"
            })
            addedNodes.append(mainNode[0])

        # get all descendents
        nodes, addedNodes, addedEdges = addNodes(nodeId, maxDepth, nodes,
                                                 addedNodes, addedEdges,
                                                 "ancestors")
        nodes, addedNodes, addedEdges = addNodes(nodeId, maxDepth, nodes,
                                                 addedNodes, addedEdges,
                                                 "desc")

        edges = addEdges(edges, addedNodes, addedEdges)

        return {"nodes": nodes, "edges": edges}
예제 #15
0
    def get_io_tree(self, uuid_pattern, tree_in_limit, tree_out_limit):
        from aiida.orm.querybuilder import QueryBuilder
        from aiida.orm.node import Node

        def get_node_shape(ntype):
            type = ntype.split(".")[0]

            # default and data node shape
            shape = "dot"

            if type == "calculation":
                shape = "square"
            elif type == "code":
                shape = "triangle"

            return shape

        # Check whether uuid_pattern identifies a unique node
        self._check_id_validity(uuid_pattern)

        qb = QueryBuilder()
        qb.append(Node, tag="main", project=["*"], filters=self._id_filter)

        nodes = []
        edges = []
        nodeCount = 0

        if qb.count() > 0:
            mainNode = qb.first()[0]
            pk = mainNode.pk
            uuid = mainNode.uuid
            nodetype = mainNode.type
            nodelabel = mainNode.label
            display_type = nodetype.split('.')[-2]
            description = mainNode.get_desc()
            if description == '':
                description = mainNode.type.split('.')[-2]

            nodes.append({
                "id": nodeCount,
                "nodeid": pk,
                "nodeuuid": uuid,
                "nodetype": nodetype,
                "nodelabel": nodelabel,
                "displaytype": display_type,
                "group": "mainNode",
                "description": description,
                "shape": get_node_shape(nodetype)
            })
        nodeCount += 1

        # get all inputs
        qb = QueryBuilder()
        qb.append(Node, tag="main", project=['*'], filters=self._id_filter)
        qb.append(Node,
                  tag="in",
                  project=['*'],
                  edge_project=['label', 'type'],
                  input_of='main')
        if tree_in_limit is not None:
            qb.limit(tree_in_limit)

        input_node_pks = {}
        sent_no_of_incomings = qb.count()

        if sent_no_of_incomings > 0:
            for input in qb.iterdict():
                node = input['in']['*']
                pk = node.pk
                linklabel = input['main--in']['label']
                linktype = input['main--in']['type']

                # add node if it is not present
                if pk not in input_node_pks.keys():
                    input_node_pks[pk] = nodeCount
                    uuid = node.uuid
                    nodetype = node.type
                    nodelabel = node.label
                    display_type = nodetype.split('.')[-2]
                    description = node.get_desc()
                    if description == '':
                        description = node.type.split('.')[-2]

                    nodes.append({
                        "id": nodeCount,
                        "nodeid": pk,
                        "nodeuuid": uuid,
                        "nodetype": nodetype,
                        "nodelabel": nodelabel,
                        "displaytype": display_type,
                        "group": "inputs",
                        "description": description,
                        "linklabel": linklabel,
                        "linktype": linktype,
                        "shape": get_node_shape(nodetype)
                    })
                    nodeCount += 1

                from_edge = input_node_pks[pk]
                edges.append({
                    "from": from_edge,
                    "to": 0,
                    "arrows": "to",
                    "color": {
                        "inherit": 'from'
                    },
                    "label": linktype,
                })

        # get all outputs
        qb = QueryBuilder()
        qb.append(Node, tag="main", project=['*'], filters=self._id_filter)
        qb.append(Node,
                  tag="out",
                  project=['*'],
                  edge_project=['label', 'type'],
                  output_of='main')
        if tree_out_limit is not None:
            qb.limit(tree_out_limit)

        output_node_pks = {}
        sent_no_of_outgoings = qb.count()

        if sent_no_of_outgoings > 0:
            for output in qb.iterdict():
                node = output['out']['*']
                pk = node.pk
                linklabel = output['main--out']['label']
                linktype = output['main--out']['type']

                # add node if it is not present
                if pk not in output_node_pks.keys():
                    output_node_pks[pk] = nodeCount
                    uuid = node.uuid
                    nodetype = node.type
                    nodelabel = node.label
                    display_type = nodetype.split('.')[-2]
                    description = node.get_desc()
                    if description == '':
                        description = node.type.split('.')[-2]

                    nodes.append({
                        "id": nodeCount,
                        "nodeid": pk,
                        "nodeuuid": uuid,
                        "nodetype": nodetype,
                        "nodelabel": nodelabel,
                        "displaytype": display_type,
                        "group": "outputs",
                        "description": description,
                        "linklabel": linklabel,
                        "linktype": linktype,
                        "shape": get_node_shape(nodetype)
                    })
                    nodeCount += 1

                to_edge = output_node_pks[pk]
                edges.append({
                    "from": 0,
                    "to": to_edge,
                    "arrows": "to",
                    "color": {
                        "inherit": 'to'
                    },
                    "label": linktype
                })

        # count total no of nodes
        qb = QueryBuilder()
        qb.append(Node, tag="main", project=['id'], filters=self._id_filter)
        qb.append(Node, tag="in", project=['id'], input_of='main')
        total_no_of_incomings = qb.count()

        qb = QueryBuilder()
        qb.append(Node, tag="main", project=['id'], filters=self._id_filter)
        qb.append(Node, tag="out", project=['id'], output_of='main')
        total_no_of_outgoings = qb.count()

        return {
            "nodes": nodes,
            "edges": edges,
            "total_no_of_incomings": total_no_of_incomings,
            "total_no_of_outgoings": total_no_of_outgoings,
            "sent_no_of_incomings": sent_no_of_incomings,
            "sent_no_of_outgoings": sent_no_of_outgoings
        }
예제 #16
0
    def get_io_tree(self, uuid_pattern):
        from aiida.orm.querybuilder import QueryBuilder
        from aiida.orm.node import Node

        def get_node_shape(ntype):
            type = ntype.split(".")[0]

            # default and data node shape
            shape = "dot"

            if type == "calculation":
                shape = "square"
            elif type == "code":
                shape = "triangle"

            return shape

        # Check whether uuid_pattern identifies a unique node
        self._check_id_validity(uuid_pattern)

        qb = QueryBuilder()
        qb.append(Node, tag="main", project=["*"],
                  filters=self._id_filter)

        nodes = []
        edges = []
        nodeCount = 0

        if qb.count() > 0:
            mainNode = qb.first()[0]
            pk = mainNode.pk
            uuid = mainNode.uuid
            nodetype = mainNode.type
            display_type = nodetype.split('.')[-2]
            description = mainNode.get_desc()
            if description == '':
                description = mainNode.type.split('.')[-2]

            nodes.append({
                "id": nodeCount,
                "nodeid": pk,
                "nodeuuid": uuid,
                "nodetype": nodetype,
                "displaytype": display_type,
                "group": "mainNode",
                "description": description,
                "shape": get_node_shape(nodetype)
            })
        nodeCount += 1

        # get all inputs
        qb = QueryBuilder()
        qb.append(Node, tag="main", project=['*'],
                  filters=self._id_filter)
        qb.append(Node, tag="in", project=['*'], edge_project=['label'],
                  input_of='main')

        if qb.count() > 0:
            for input in qb.iterdict():
                node = input['in']['*']
                linktype = input['main--in']['label']
                pk = node.pk
                uuid = node.uuid
                nodetype = node.type
                display_type = nodetype.split('.')[-2]
                description = node.get_desc()
                if description == '':
                    description = node.type.split('.')[-2]

                nodes.append({
                    "id": nodeCount,
                    "nodeid": pk,
                    "nodeuuid": uuid,
                    "nodetype": nodetype,
                    "displaytype": display_type,
                    "group": "inputs",
                    "description": description,
                    "linktype": linktype,
                    "shape": get_node_shape(nodetype)
                })
                edges.append({
                    "from": nodeCount,
                    "to": 0,
                    "arrows": "to",
                    "color": {"inherit": 'from'},
                    "linktype": linktype,
                })
                nodeCount += 1

        # get all outputs
        qb = QueryBuilder()
        qb.append(Node, tag="main", project=['*'],
                  filters=self._id_filter)
        qb.append(Node, tag="out", project=['*'], edge_project=['label'],
                  output_of='main')
        if qb.count() > 0:
            for output in qb.iterdict():
                node = output['out']['*']
                linktype = output['main--out']['label']
                pk = node.pk
                uuid = node.uuid
                nodetype = node.type
                display_type = nodetype.split('.')[-2]
                description = node.get_desc()
                if description == '':
                    description = node.type.split('.')[-2]

                nodes.append({
                    "id": nodeCount,
                    "nodeid": pk,
                    "nodeuuid": uuid,
                    "nodetype": nodetype,
                    "displaytype": display_type,
                    "group": "outputs",
                    "description": description,
                    "linktype": linktype,
                    "shape": get_node_shape(nodetype)
                })
                edges.append({
                    "from": 0,
                    "to": nodeCount,
                    "arrows": "to",
                    "color": {"inherit": 'to'},
                    "linktype": linktype
                })
                nodeCount += 1

        return {"nodes": nodes, "edges": edges}
예제 #17
0
def configure_computer_v012(computer, user_email=None, authparams=None):
    """Configure the authentication information for a given computer

    adapted from aiida-core v0.12.2:
    aiida_core.aiida.cmdline.commands.computer.Computer.computer_configure

    :param computer: the computer to authenticate against
    :param user_email: the user email (otherwise use default)
    :param authparams: a dictionary of additional authorisation parameters to use (in string format)
    :return:
    """
    from aiida.common.exceptions import ValidationError
    from aiida.backends.utils import get_automatic_user
    # aiida-core v1
    # from aiida.orm.backend import construct_backend
    # backend = construct_backend()
    # get_automatic_user = backend.users.get_automatic_user

    authparams = {} if authparams is None else authparams
    transport = computer.get_transport_class()
    valid_keys = transport.get_valid_auth_params()

    if user_email is None:
        user = get_automatic_user()
    else:
        from aiida.orm.querybuilder import QueryBuilder
        qb = QueryBuilder()
        qb.append(type="user", filters={'email': user_email})
        user = qb.first()
        if not user:
            raise ValueError("user email not found: {}".format(user_email))
        user = user[0]._dbuser  # for Django, the wrong user class is returned

    authinfo, old_authparams = _get_auth_info(computer, user)

    # print ("Configuring computer '{}' for the AiiDA user '{}'".format(
    #     computername, user.email))
    #
    # print "Computer {} has transport of type {}".format(computername,
    #                                                     computer.get_transport_type())

    # from aiida.common.utils import get_configured_user_email
    # if user.email != get_configured_user_email():
    # print "*" * 72
    # print "** {:66s} **".format("WARNING!")
    # print "** {:66s} **".format(
    #     "  You are configuring a different user.")
    # print "** {:66s} **".format(
    #     "  Note that the default suggestions are taken from your")
    # print "** {:66s} **".format(
    #     "  local configuration files, so they may be incorrect.")
    # print "*" * 72

    default_authparams = {}
    for k in valid_keys:
        if k in old_authparams:
            default_authparams[k] = old_authparams.pop(k)
            if k not in authparams:
                authparams[k] = default_authparams[k]

    if old_authparams:
        print("WARNING: the following keys were previously in the "
              "authorization parameters, but have not been recognized "
              "and have been deleted: {}".format(", ".join(
                  old_authparams.keys())))

    if set(authparams.keys()) != set(valid_keys):
        raise ValueError(
            "new_authparams should contain only the keys: {}".format(
                valid_keys))

    # convert keys from strings
    transport_members = dict(inspect.getmembers(transport))
    for k, txtval in authparams.items():

        converter_name = '_convert_{}_fromstring'.format(k)
        suggester_name = '_get_{}_suggestion_string'.format(k)
        if converter_name not in transport_members:
            raise ValueError("No {} defined in Transport {}".format(
                converter_name, computer.get_transport_type()))
        converter = transport_members[converter_name]

        suggestion = ""
        if k in default_authparams:
            suggestion = default_authparams[k]
        elif suggester_name in transport_members:
            suggestion = transport_members[suggester_name](computer)

        try:
            authparams[k] = converter(txtval)
        except ValidationError, err:
            raise ValueError("error in the authparam "
                             "{0}: {1}, suggested value: {2}".format(
                                 k, err, suggestion))
예제 #18
0
    def get_from_string(cls, code_string):
        """
        Get a Computer object with given identifier string, that can either be
        the numeric ID (pk), or the label (if unique); the label can either
        be simply the label, or in the format label@machinename. See the note
        below for details on the string detection algorithm.

        .. note:: If a string that can be converted to an integer is given,
          the numeric ID is verified first (therefore, is a code A with a
          label equal to the ID of another code B is present, code A cannot
          be referenced by label). Similarly, the (leftmost) '@' symbol is
          always used to split code and computername. Therefore do not use
          '@' in the code name if you want to use this function
          ('@' in the computer name are instead valid).

        :param code_string: the code string identifying the code to load

        :raise NotExistent: if no code identified by the given string is found
        :raise MultipleObjectsError: if the string cannot identify uniquely
            a code
        """
        from aiida.common.exceptions import NotExistent, MultipleObjectsError
        from aiida.orm.querybuilder import QueryBuilder
        from aiida.orm.computer import Computer
        from aiida.orm.code import Code

        try:
            code_int = int(code_string)
            try:
                return cls.get_subclass_from_pk(code_int)
            except NotExistent:
                raise ValueError()  # Jump to the following section
                # to check if a code with the given
                # label exists.
            except MultipleObjectsError:
                raise MultipleObjectsError("More than one code in the DB "
                                           "with pk='{}'!".format(code_string))
        except ValueError:
            # Before dying, try to see if the user passed a (unique) label.
            # split with the leftmost '@' symbol (i.e. code names cannot
            # contain '@' symbols, computer names can)
            qb = QueryBuilder()
            codename, sep, computername = code_string.partition('@')
            qb.append(cls,
                      filters={'label': {
                          '==': codename
                      }},
                      project=['*'],
                      tag='code')
            if sep:
                qb.append(Computer,
                          filters={'name': {
                              '==': computername
                          }},
                          computer_of='code')

            if qb.count() == 0:
                raise NotExistent("'{}' is not a valid code "
                                  "ID or label.".format(code_string))
            elif qb.count() > 1:
                codes = [_ for [_] in qb.all()]
                retstr = ("There are multiple codes with label '{}', "
                          "having IDs: ".format(code_string))
                retstr += ", ".join(sorted([str(c.pk) for c in codes])) + ".\n"
                retstr += ("Relabel them (using their ID), or refer to them "
                           "with their ID.")
                raise MultipleObjectsError(retstr)
            else:
                return qb.first()[0]
예제 #19
0
    def computer_configure(self, *args):
        """
        Configure the authentication information for a given computer
        """
        if not is_dbenv_loaded():
            load_dbenv()

        import readline
        import inspect

        from django.core.exceptions import ObjectDoesNotExist

        from aiida.common.exceptions import (NotExistent, ValidationError)
        from aiida.backends.utils import get_automatic_user
        from aiida.common.utils import get_configured_user_email
        from aiida.backends.settings import BACKEND
        from aiida.backends.profile import BACKEND_SQLA, BACKEND_DJANGO

        import argparse

        parser = argparse.ArgumentParser(
            prog=self.get_full_command_name(),
            description='Configure a computer for a given AiiDA user.')
        # The default states are those that are shown if no option is given
        parser.add_argument(
            '-u',
            '--user',
            type=str,
            metavar='EMAIL',
            help=
            "Configure the computer for the given AiiDA user (otherwise, configure the current default user)",
        )
        parser.add_argument(
            'computer',
            type=str,
            help="The name of the computer that you want to configure")

        parsed_args = parser.parse_args(args)

        user_email = parsed_args.user
        computername = parsed_args.computer

        try:
            computer = self.get_computer(name=computername)
        except NotExistent:
            print >> sys.stderr, "No computer exists with name '{}'".format(
                computername)
            sys.exit(1)
        if user_email is None:
            user = get_automatic_user()
        else:
            from aiida.orm.querybuilder import QueryBuilder
            qb = QueryBuilder()
            qb.append(type="user", filters={'email': user_email})
            user = qb.first()
            if user is None:
                print >> sys.stderr, ("No user with email '{}' in the "
                                      "database.".format(user_email))
                sys.exit(1)

        if BACKEND == BACKEND_DJANGO:
            from aiida.backends.djsite.db.models import DbAuthInfo

            try:
                authinfo = DbAuthInfo.objects.get(
                    dbcomputer=computer.dbcomputer, aiidauser=user)

                old_authparams = authinfo.get_auth_params()
            except ObjectDoesNotExist:
                authinfo = DbAuthInfo(dbcomputer=computer.dbcomputer,
                                      aiidauser=user)
                old_authparams = {}

        elif BACKEND == BACKEND_SQLA:
            from aiida.backends.sqlalchemy.models.authinfo import DbAuthInfo
            from aiida.backends.sqlalchemy import session

            authinfo = session.query(DbAuthInfo).filter(
                DbAuthInfo.dbcomputer == computer.dbcomputer).filter(
                    DbAuthInfo.aiidauser == user).first()
            if authinfo is None:
                authinfo = DbAuthInfo(dbcomputer=computer.dbcomputer,
                                      aiidauser=user)
                old_authparams = {}
            else:
                old_authparams = authinfo.get_auth_params()
        else:
            raise Exception("Unknown backend {}".format(BACKEND))
        Transport = computer.get_transport_class()

        print("Configuring computer '{}' for the AiiDA user '{}'".format(
            computername, user.email))

        print "Computer {} has transport of type {}".format(
            computername, computer.get_transport_type())

        if user.email != get_configured_user_email():
            print "*" * 72
            print "** {:66s} **".format("WARNING!")
            print "** {:66s} **".format(
                "  You are configuring a different user.")
            print "** {:66s} **".format(
                "  Note that the default suggestions are taken from your")
            print "** {:66s} **".format(
                "  local configuration files, so they may be incorrect.")
            print "*" * 72

        valid_keys = Transport.get_valid_auth_params()

        default_authparams = {}
        for k in valid_keys:
            if k in old_authparams:
                default_authparams[k] = old_authparams.pop(k)
        if old_authparams:
            print(
                "WARNING: the following keys were previously in the "
                "authorization parameters,")
            print "but have not been recognized and have been deleted:"
            print ", ".join(old_authparams.keys())

        if not valid_keys:
            print "There are no special keys to be configured. Configuration completed."
            authinfo.set_auth_params({})
            authinfo.save()
            return

        print ""
        print "Note: to leave a field unconfigured, leave it empty and press [Enter]"

        # I strip out the old auth_params that are not among the valid keys

        new_authparams = {}

        for k in valid_keys:
            key_set = False
            while not key_set:
                try:
                    converter_name = '_convert_{}_fromstring'.format(k)
                    try:
                        converter = dict(
                            inspect.getmembers(Transport))[converter_name]
                    except KeyError:
                        print >> sys.stderr, (
                            "Internal error! "
                            "No {} defined in Transport {}".format(
                                converter_name, computer.get_transport_type()))
                        sys.exit(1)

                    if k in default_authparams:
                        readline.set_startup_hook(lambda: readline.insert_text(
                            str(default_authparams[k])))
                    else:
                        # Use suggestion only if parameters were not already set
                        suggester_name = '_get_{}_suggestion_string'.format(k)
                        try:
                            suggester = dict(
                                inspect.getmembers(Transport))[suggester_name]
                            suggestion = suggester(computer)
                            readline.set_startup_hook(
                                lambda: readline.insert_text(suggestion))
                        except KeyError:
                            readline.set_startup_hook()

                    txtval = raw_input("=> {} = ".format(k))
                    if txtval:
                        new_authparams[k] = converter(txtval)
                    key_set = True
                except ValidationError as e:
                    print "Error in the inserted value: {}".format(e.message)

        authinfo.set_auth_params(new_authparams)
        authinfo.save()
        print "Configuration stored for your user on computer '{}'.".format(
            computername)
예제 #20
0
    def test_same_computer_import(self):
        """
        Test that you can import nodes in steps without any problems. In this
        test we will import a first calculation and then a second one. The
        import should work as expected and have in the end two job
        calculations.

        Each calculation is related to the same computer. In the end we should
        have only one computer
        """
        import os
        import shutil
        import tempfile

        from aiida.orm.importexport import export
        from aiida.orm.querybuilder import QueryBuilder
        from aiida.orm.computer import Computer
        from aiida.orm.calculation.job import JobCalculation

        # Creating a folder for the import/export files
        export_file_tmp_folder = tempfile.mkdtemp()
        unpack_tmp_folder = tempfile.mkdtemp()

        try:
            # Store two job calculation related to the same computer
            calc1_label = "calc1"
            calc1 = JobCalculation()
            calc1.set_computer(self.computer)
            calc1.set_resources({
                "num_machines": 1,
                "num_mpiprocs_per_machine": 1
            })
            calc1.label = calc1_label
            calc1.store()
            calc1._set_state(u'RETRIEVING')

            calc2_label = "calc2"
            calc2 = JobCalculation()
            calc2.set_computer(self.computer)
            calc2.set_resources({
                "num_machines": 2,
                "num_mpiprocs_per_machine": 2
            })
            calc2.label = calc2_label
            calc2.store()
            calc2._set_state(u'RETRIEVING')

            # Store locally the computer name
            comp_name = unicode(self.computer.name)
            comp_uuid = unicode(self.computer.uuid)

            # Export the first job calculation
            filename1 = os.path.join(export_file_tmp_folder, "export1.tar.gz")
            export([calc1.dbnode], outfile=filename1, silent=True)

            # Export the second job calculation
            filename2 = os.path.join(export_file_tmp_folder, "export2.tar.gz")
            export([calc2.dbnode], outfile=filename2, silent=True)

            # Clean the local database
            self.clean_db()

            # Check that there are no computers
            qb = QueryBuilder()
            qb.append(Computer, project=['*'])
            self.assertEqual(
                qb.count(), 0, "There should not be any computers"
                "in the database at this point.")

            # Check that there are no calculations
            qb = QueryBuilder()
            qb.append(JobCalculation, project=['*'])
            self.assertEqual(
                qb.count(), 0, "There should not be any "
                "calculations in the database at "
                "this point.")

            # Import the first calculation
            import_data(filename1, silent=True)

            # Check that the calculation computer is imported correctly.
            qb = QueryBuilder()
            qb.append(JobCalculation, project=['label'])
            self.assertEqual(qb.count(), 1, "Only one calculation should be "
                             "found.")
            self.assertEqual(unicode(qb.first()[0]), calc1_label,
                             "The calculation label is not correct.")

            # Check that the referenced computer is imported correctly.
            qb = QueryBuilder()
            qb.append(Computer, project=['name', 'uuid', 'id'])
            self.assertEqual(qb.count(), 1, "Only one computer should be "
                             "found.")
            self.assertEqual(unicode(qb.first()[0]), comp_name,
                             "The computer name is not correct.")
            self.assertEqual(unicode(qb.first()[1]), comp_uuid,
                             "The computer uuid is not correct.")

            # Store the id of the computer
            comp_id = qb.first()[2]

            # Import the second calculation
            import_data(filename2, silent=True)

            # Check that the number of computers remains the same and its data
            # did not change.
            qb = QueryBuilder()
            qb.append(Computer, project=['name', 'uuid', 'id'])
            self.assertEqual(qb.count(), 1, "Only one computer should be "
                             "found.")
            self.assertEqual(unicode(qb.first()[0]), comp_name,
                             "The computer name is not correct.")
            self.assertEqual(unicode(qb.first()[1]), comp_uuid,
                             "The computer uuid is not correct.")
            self.assertEqual(qb.first()[2], comp_id,
                             "The computer id is not correct.")

            # Check that now you have two calculations attached to the same
            # computer.
            qb = QueryBuilder()
            qb.append(Computer, tag='comp')
            qb.append(JobCalculation, has_computer='comp', project=['label'])
            self.assertEqual(qb.count(), 2, "Two calculations should be "
                             "found.")
            ret_labels = set(_ for [_] in qb.all())
            self.assertEqual(
                ret_labels, set([calc1_label, calc2_label]),
                "The labels of the calculations are not correct.")

        finally:
            # Deleting the created temporary folders
            shutil.rmtree(export_file_tmp_folder, ignore_errors=True)
            shutil.rmtree(unpack_tmp_folder, ignore_errors=True)