Beispiel #1
0
    def upload_potcar_family(cls,
                             source,
                             group_name,
                             group_description=None,
                             stop_if_existing=True,
                             dry_run=False):
        """
        Upload a set of POTCAR potentials as a family.

        :param folder: a path containing all POTCAR files to be added.
        :param group_name: the name of the group to create. If it exists and is
            non-empty, a UniquenessError is raised.
        :param group_description: a string to be set as the group description.
            Overwrites previous descriptions, if the group was existing.
        :param stop_if_existing: if True, check for the sha512 of the files and,
            if the file already exists in the DB, raises a MultipleObjectsError.
            If False, simply adds the existing UPFData node to the group.
        :param dry_run: If True, do not change the database.
        """
        group = cls._prepare_group_for_upload(group_name,
                                              group_description,
                                              dry_run=dry_run)

        potcar_finder = PotcarWalker(source)
        potcar_finder.walk()
        num_files = len(potcar_finder.potcars)
        family_nodes_uuid = [node.uuid
                             for node in group.nodes] if not dry_run else []
        potcars_tried_upload = cls._try_upload_potcars(
            potcar_finder.potcars,
            stop_if_existing=stop_if_existing,
            dry_run=dry_run)
        new_potcars_added = [
            (potcar, created, file_path)
            for potcar, created, file_path in potcars_tried_upload
            if potcar.uuid not in family_nodes_uuid
        ]

        for potcar, created, file_path in new_potcars_added:
            if created:
                aiidalogger.debug(
                    'New PotcarData node %s created while uploading file %s for family %s',
                    potcar.uuid, file_path, group_name)
            else:
                aiidalogger.debug(
                    'PotcarData node %s used instead of uploading file %s to family %s',
                    potcar.uuid, file_path, group_name)

        if not dry_run:
            group.add_nodes(
                [potcar for potcar, created, file_path in new_potcars_added])

        num_added = len(new_potcars_added)
        num_uploaded = len([item for item in new_potcars_added
                            if item[1]])  # item[1] refers to 'created'

        return num_files, num_added, num_uploaded
Beispiel #2
0
def deserialize_dict(mainitem, subitems, sep, original_class, original_pk,
                     lesserrors):
    """Deserialize a Python dictionary."""
    # pylint: disable=protected-access
    # subitems contains all subitems, here I store only those of
    # deepness 1, i.e. if I have subitems '0', '1' and '1.c' I
    # store only '0' and '1'
    from aiida.common import AIIDA_LOGGER

    firstlevelsubdict = {k: v for k, v in subitems.items() if sep not in k}

    if len(firstlevelsubdict) != mainitem['ival']:
        if (original_class is not None
                and original_class._subspecifier_field_name is not None):
            subspecifier_string = '{}={} and '.format(
                original_class._subspecifier_field_name, original_pk)
        else:
            subspecifier_string = ''
        if original_class is None:
            sourcestr = 'the data passed'
        else:
            sourcestr = original_class.__name__

        msg = ('Wrong dict length stored in {} for '
               "{}key='{}' ({} vs {})".format(sourcestr, subspecifier_string,
                                              mainitem['key'],
                                              len(firstlevelsubdict),
                                              mainitem['ival']))
        if lesserrors:
            AIIDA_LOGGER.error(msg)
        else:
            raise DeserializationException(msg)

    # I get the values in memory as a dictionary
    tempdict = {}
    for firstsubk, firstsubv in firstlevelsubdict.items():
        # I call recursively the same function to get subitems
        newsubitems = {
            k[len(firstsubk) + len(sep):]: v
            for k, v in subitems.items() if k.startswith(firstsubk + sep)
        }
        tempdict[firstsubk] = _deserialize_attribute(
            mainitem=firstsubv,
            subitems=newsubitems,
            sep=sep,
            original_class=original_class,
            original_pk=original_pk)

    return tempdict
Beispiel #3
0
    def __init__(self, calculation):
        """Initialize the instance of YamboParser"""
        from aiida.common import AIIDA_LOGGER
        self._logger = AIIDA_LOGGER.getChild('parser').getChild(
            self.__class__.__name__)
        # check for valid input
        if calculation.process_type == 'aiida.calculations:yambo.yambo':
            yambo_parent = True
        else:
            raise OutputParsingError(
                "Input calculation must be a YamboCalculation, not {}".format(
                    calculation.process_type))

        self._calc = calculation
        self.last_job_info = self._calc.get_last_job_info()
        self._eels_array_linkname = 'array_eels'
        self._eps_array_linkname = 'array_eps'
        self._alpha_array_linkname = 'array_alpha'
        self._qp_array_linkname = 'array_qp'
        self._ndb_linkname = 'array_ndb'
        self._ndb_QP_linkname = 'array_ndb_QP'
        self._ndb_HF_linkname = 'array_ndb_HFlocXC'
        self._lifetime_bands_linkname = 'bands_lifetime'
        self._quasiparticle_bands_linkname = 'bands_quasiparticle'
        self._parameter_linkname = 'output_parameters'
        self._system_info_linkname = 'system_info'
        super(YamboParser, self).__init__(calculation)
Beispiel #4
0
 def __init__(self, **kwargs):  # pylint: disable=unused-argument
     super(BaseFileParser, self).__init__()
     self._logger = aiidalogger.getChild(self.__class__.__name__)
     self._exit_code = None
     self._parsable_items = self.PARSABLE_ITEMS
     self._parsed_data = {}
     if 'file_path' in kwargs:
         self._data_obj = SingleFile(path=kwargs['file_path'])
     elif 'data' in kwargs:
         self._data_obj = SingleFile(data=kwargs['data'])
     else:
         self._data_obj = None
Beispiel #5
0
 def __init__(self, *args, **kwargs):  # pylint: disable=unused-argument
     """
     __init__ method of the Transport base class.
     """
     from aiida.common import AIIDA_LOGGER
     self._safe_open_interval = kwargs.pop('safe_interval',
                                           self._DEFAULT_SAFE_OPEN_INTERVAL)
     self._logger = AIIDA_LOGGER.getChild('transport').getChild(
         self.__class__.__name__)
     self._logger_extra = None
     self._is_open = False
     self._enters = 0
Beispiel #6
0
    def __init__(self, calc_parser_cls=None, **kwargs):  # pylint: disable=unused-argument
        super(BaseFileParser, self).__init__()
        self._logger = aiidalogger.getChild(self.__class__.__name__)
        self._vasp_parser = calc_parser_cls
        self.settings = None

        if calc_parser_cls is not None:
            calc_parser_cls.get_quantity.append(self.get_quantity)
            self.settings = calc_parser_cls.settings

        self.parsable_items = {}
        self._parsed_data = {}
        self._data_obj = None
Beispiel #7
0
    def __init__(self, *args, **kwargs):  # pylint: disable=unused-argument
        """
        __init__ method of the Transport base class.

        :param safe_interval: (optional, default self._DEFAULT_SAFE_OPEN_INTERVAL)
           Minimum time interval in seconds between opening new connections.
        :param use_login_shell: (optional, default True)
           if False, do not use a login shell when executing command
        """
        from aiida.common import AIIDA_LOGGER
        self._safe_open_interval = kwargs.pop('safe_interval',
                                              self._DEFAULT_SAFE_OPEN_INTERVAL)
        self._use_login_shell = kwargs.pop('use_login_shell', True)
        if self._use_login_shell:
            self._bash_command_str = 'bash -l '
        else:
            self._bash_command_str = 'bash '

        self._logger = AIIDA_LOGGER.getChild('transport').getChild(
            self.__class__.__name__)
        self._logger_extra = None
        self._is_open = False
        self._enters = 0
Beispiel #8
0
def upload_upf_family(folder,
                      group_label,
                      group_description,
                      stop_if_existing=True):
    """Upload a set of UPF files in a given group.

    :param folder: a path containing all UPF files to be added.
        Only files ending in .UPF (case-insensitive) are considered.
    :param group_label: the name of the group to create. If it exists and is non-empty, a UniquenessError is raised.
    :param group_description: string to be set as the group description. Overwrites previous descriptions.
    :param stop_if_existing: if True, check for the md5 of the files and, if the file already exists in the DB, raises a
        MultipleObjectsError. If False, simply adds the existing UPFData node to the group.
    """
    # pylint: disable=too-many-locals,too-many-branches
    import os

    from aiida import orm
    from aiida.common import AIIDA_LOGGER
    from aiida.common.exceptions import UniquenessError
    from aiida.common.files import md5_file

    if not os.path.isdir(folder):
        raise ValueError('folder must be a directory')

    # only files, and only those ending with .upf or .UPF;
    # go to the real file if it is a symlink
    filenames = [
        os.path.realpath(os.path.join(folder, i)) for i in os.listdir(folder)
        if os.path.isfile(os.path.join(folder, i))
        and i.lower().endswith('.upf')
    ]

    nfiles = len(filenames)

    automatic_user = orm.User.objects.get_default()
    group, group_created = orm.Group.objects.get_or_create(
        label=group_label, type_string=UPFGROUP_TYPE, user=automatic_user)

    if group.user.email != automatic_user.email:
        raise UniquenessError(
            'There is already a UpfFamily group with label {}'
            ', but it belongs to user {}, therefore you '
            'cannot modify it'.format(group_label, group.user.email))

    # Always update description, even if the group already existed
    group.description = group_description

    # NOTE: GROUP SAVED ONLY AFTER CHECKS OF UNICITY

    pseudo_and_created = []

    for filename in filenames:
        md5sum = md5_file(filename)
        builder = orm.QueryBuilder()
        builder.append(UpfData, filters={'attributes.md5': {'==': md5sum}})
        existing_upf = builder.first()

        if existing_upf is None:
            # return the upfdata instances, not stored
            pseudo, created = UpfData.get_or_create(filename,
                                                    use_first=True,
                                                    store_upf=False)
            # to check whether only one upf per element exists
            # NOTE: actually, created has the meaning of "to_be_created"
            pseudo_and_created.append((pseudo, created))
        else:
            if stop_if_existing:
                raise ValueError('A UPF with identical MD5 to '
                                 ' {} cannot be added with stop_if_existing'
                                 ''.format(filename))
            existing_upf = existing_upf[0]
            pseudo_and_created.append((existing_upf, False))

    # check whether pseudo are unique per element
    elements = [(i[0].element, i[0].md5sum) for i in pseudo_and_created]
    # If group already exists, check also that I am not inserting more than
    # once the same element
    if not group_created:
        for aiida_n in group.nodes:
            # Skip non-pseudos
            if not isinstance(aiida_n, UpfData):
                continue
            elements.append((aiida_n.element, aiida_n.md5sum))

    elements = set(elements)  # Discard elements with the same MD5, that would
    # not be stored twice
    elements_names = [e[0] for e in elements]

    if not len(elements_names) == len(set(elements_names)):
        duplicates = {x for x in elements_names if elements_names.count(x) > 1}
        duplicates_string = ', '.join(i for i in duplicates)
        raise UniquenessError('More than one UPF found for the elements: ' +
                              duplicates_string + '.')

        # At this point, save the group, if still unstored
    if group_created:
        group.store()

    # save the upf in the database, and add them to group
    for pseudo, created in pseudo_and_created:
        if created:
            pseudo.store()

            AIIDA_LOGGER.debug('New node {} created for file {}'.format(
                pseudo.uuid, pseudo.filename))
        else:
            AIIDA_LOGGER.debug('Reusing node {} for file {}'.format(
                pseudo.uuid, pseudo.filename))

    # Add elements to the group all togetehr
    group.add_nodes([pseudo for pseudo, created in pseudo_and_created])

    nuploaded = len([_ for _, created in pseudo_and_created if created])

    return nfiles, nuploaded
Beispiel #9
0
def parse_upf(fname, check_filename=True):
    """
    Try to get relevant information from the UPF. For the moment, only the
    element name. Note that even UPF v.2 cannot be parsed with the XML minidom!
    (e.g. due to the & characters in the human-readable section).

    If check_filename is True, raise a ParsingError exception if the filename
    does not start with the element name.
    """
    import os

    from aiida.common.exceptions import ParsingError
    from aiida.common import AIIDA_LOGGER
    from aiida.orm.nodes.data.structure import _valid_symbols

    parsed_data = {}

    try:
        upf_contents = fname.read()
        fname = fname.name
    except AttributeError:
        with io.open(fname, encoding='utf8') as handle:
            upf_contents = handle.read()

    match = REGEX_UPF_VERSION.search(upf_contents)
    if match:
        version = match.group('version')
        AIIDA_LOGGER.debug('Version found: {} for file {}'.format(
            version, fname))
    else:
        AIIDA_LOGGER.debug('Assuming version 1 for file {}'.format(fname))
        version = '1'

    parsed_data['version'] = version
    try:
        version_major = int(version.partition('.')[0])
    except ValueError:
        # If the version string does not contain a dot, fallback
        # to version 1
        AIIDA_LOGGER.debug('Falling back to version 1 for file {}, '
                           "version string '{}' unrecognized".format(
                               fname, version))
        version_major = 1

    element = None
    if version_major == 1:
        match = REGEX_ELEMENT_V1.search(upf_contents)
        if match:
            element = match.group('element_name')
    else:  # all versions > 1
        match = REGEX_ELEMENT_V2.search(upf_contents)
        if match:
            element = match.group('element_name')

    if element is None:
        raise ParsingError(
            'Unable to find the element of UPF {}'.format(fname))
    element = element.capitalize()
    if element not in _valid_symbols:
        raise ParsingError('Unknown element symbol {} for file {}'.format(
            element, fname))
    if check_filename:
        if not os.path.basename(fname).lower().startswith(element.lower()):
            raise ParsingError('Filename {0} was recognized for element '
                               '{1}, but the filename does not start '
                               'with {1}'.format(fname, element))

    parsed_data['element'] = element

    return parsed_data
Beispiel #10
0
import os

from six.moves import zip

from aiida.common import AIIDA_LOGGER, exceptions
from aiida.common.datastructures import CalcJobState
from aiida.common.folders import SandboxFolder
from aiida.common.links import LinkType
from aiida.orm import FolderData
from aiida.orm.utils.log import get_dblogger_extra
from aiida.plugins import DataFactory
from aiida.schedulers.datastructures import JobState

REMOTE_WORK_DIRECTORY_LOST_FOUND = 'lost+found'

execlogger = AIIDA_LOGGER.getChild('execmanager')


def upload_calculation(node,
                       transport,
                       calc_info,
                       script_filename,
                       dry_run=False):
    """Upload a `CalcJob` instance

    :param node: the `CalcJobNode`.
    :param transport: an already opened transport to use to submit the calculation.
    :param calc_info: the calculation info datastructure returned by `CalcJobNode.presubmit`
    :param script_filename: the job launch script returned by `CalcJobNode.presubmit`
    :return: tuple of ``calc_info`` and ``script_filename``
    """
Beispiel #11
0
# For further information please visit http://www.aiida.net               #
###########################################################################
"""Data structures used by `Scheduler` instances.

In particular, there is the definition of possible job states (job_states),
the data structure to be filled for job submission (JobTemplate), and
the data structure that is returned when querying for jobs in the scheduler
(JobInfo).
"""
import abc
import enum

from aiida.common import AIIDA_LOGGER
from aiida.common.extendeddicts import AttributeDict, DefaultFieldsAttributeDict

SCHEDULER_LOGGER = AIIDA_LOGGER.getChild('scheduler')

__all__ = (
    'JobState', 'JobResource', 'JobTemplate', 'JobInfo', 'NodeNumberJobResource', 'ParEnvJobResource', 'MachineInfo'
)


class JobState(enum.Enum):
    """Enumeration of possible scheduler states of a CalcJob.

    There is no FAILED state as every completed job is put in DONE, regardless of success.
    """

    UNDETERMINED = 'undetermined'
    QUEUED = 'queued'
    QUEUED_HELD = 'queued held'
Beispiel #12
0
 def __init__(self):
     self._cache = {}
     self._logger = AIIDA_LOGGER.getChild('plugin_version_provider')
Beispiel #13
0
def deserialize_list(mainitem, subitems, sep, original_class, original_pk,
                     lesserrors):
    """Deserialize a Python list."""
    # pylint: disable=protected-access
    # subitems contains all subitems, here I store only those of
    # deepness 1, i.e. if I have subitems '0', '1' and '1.c' I
    # store only '0' and '1'

    from aiida.common import AIIDA_LOGGER

    firstlevelsubdict = {k: v for k, v in subitems.items() if sep not in k}

    # For checking, I verify the expected values
    expected_set = {'{:d}'.format(i) for i in range(mainitem['ival'])}
    received_set = set(firstlevelsubdict.keys())
    # If there are more entries than expected, but all expected
    # ones are there, I just issue an error but I do not stop.

    if not expected_set.issubset(received_set):
        if (original_class is not None
                and original_class._subspecifier_field_name is not None):
            subspecifier_string = '{}={} and '.format(
                original_class._subspecifier_field_name, original_pk)
        else:
            subspecifier_string = ''
        if original_class is None:
            sourcestr = 'the data passed'
        else:
            sourcestr = original_class.__name__

        raise DeserializationException('Wrong list elements stored in {} for '
                                       "{}key='{}' ({} vs {})".format(
                                           sourcestr, subspecifier_string,
                                           mainitem['key'], expected_set,
                                           received_set))
    if expected_set != received_set:
        if (original_class is not None
                and original_class._subspecifier_field_name is not None):
            subspecifier_string = '{}={} and '.format(
                original_class._subspecifier_field_name, original_pk)
        else:
            subspecifier_string = ''

        sourcestr = 'the data passed' if original_class is None else original_class.__name__

        msg = ('Wrong list elements stored in {} for '
               "{}key='{}' ({} vs {})".format(sourcestr, subspecifier_string,
                                              mainitem['key'], expected_set,
                                              received_set))
        if lesserrors:
            AIIDA_LOGGER.error(msg)
        else:
            raise DeserializationException(msg)

    # I get the values in memory as a dictionary
    tempdict = {}
    for firstsubk, firstsubv in firstlevelsubdict.items():
        # I call recursively the same function to get subitems
        newsubitems = {
            k[len(firstsubk) + len(sep):]: v
            for k, v in subitems.items() if k.startswith(firstsubk + sep)
        }
        tempdict[firstsubk] = _deserialize_attribute(
            mainitem=firstsubv,
            subitems=newsubitems,
            sep=sep,
            original_class=original_class,
            original_pk=original_pk)

    # And then I put them in a list
    retlist = [tempdict['{:d}'.format(i)] for i in range(mainitem['ival'])]
    return retlist
Beispiel #14
0
def _deserialize_attribute(mainitem,
                           subitems,
                           sep,
                           original_class=None,
                           original_pk=None,
                           lesserrors=False):
    """
    Deserialize a single attribute.

    :param mainitem: the main item (either the attribute itself for base
      types (None, string, ...) or the main item for lists and dicts.
      Must contain the 'key' key and also the following keys:
      datatype, tval, fval, ival, bval, dval.
      NOTE that a type check is not performed! tval is expected to be a string,
      dval a date, etc.
    :param subitems: must be a dictionary of dictionaries. In the top-level dictionary,
      the key must be the key of the attribute, stripped of all prefixes
      (i.e., if the mainitem has key 'a.b' and we pass subitems
      'a.b.0', 'a.b.1', 'a.b.1.c', their keys must be '0', '1', '1.c').
      It must be None if the value is not iterable (int, str,
      float, ...).
      It is an empty dictionary if there are no subitems.
    :param sep: a string, the separator between subfields (to separate the
      name of a dictionary from the keys it contains, for instance)
    :param original_class: if these elements come from a specific subclass
      of DbMultipleValueAttributeBaseClass, pass here the class (note: the class,
      not the instance!). This is used only in case the wrong number of elements
      is found in the raw data, to print a more meaningful message (if the class
      has a dbnode associated to it)
    :param original_pk: if the elements come from a specific subclass
      of DbMultipleValueAttributeBaseClass that has a dbnode associated to it,
      pass here the PK integer. This is used only in case the wrong number
      of elements is found in the raw data, to print a more meaningful message
    :param lesserrors: If set to True, in some cases where the content of the
      DB is not consistent but data is still recoverable,
      it will just log the message rather than raising
      an exception (e.g. if the number of elements of a dictionary is different
      from the number declared in the ival field).

    :return: the deserialized value
    :raise aiida.backends.djsite.db.migrations.DeserializationException: if an error occurs
    """
    from aiida.common import json
    from aiida.common.timezone import (is_naive, make_aware,
                                       get_current_timezone)

    from aiida.common import AIIDA_LOGGER

    if mainitem['datatype'] == 'none':
        if subitems:
            raise DeserializationException("'{}' is of a base type, "
                                           'but has subitems!'.format(
                                               mainitem.key))
        return None
    elif mainitem['datatype'] == 'bool':
        if subitems:
            raise DeserializationException("'{}' is of a base type, "
                                           'but has subitems!'.format(
                                               mainitem.key))
        return mainitem['bval']
    elif mainitem['datatype'] == 'int':
        if subitems:
            raise DeserializationException("'{}' is of a base type, "
                                           'but has subitems!'.format(
                                               mainitem.key))
        return mainitem['ival']
    elif mainitem['datatype'] == 'float':
        if subitems:
            raise DeserializationException("'{}' is of a base type, "
                                           'but has subitems!'.format(
                                               mainitem.key))
        return mainitem['fval']
    elif mainitem['datatype'] == 'txt':
        if subitems:
            raise DeserializationException("'{}' is of a base type, "
                                           'but has subitems!'.format(
                                               mainitem.key))
        return mainitem['tval']
    elif mainitem['datatype'] == 'date':
        if subitems:
            raise DeserializationException("'{}' is of a base type, "
                                           'but has subitems!'.format(
                                               mainitem.key))
        if is_naive(mainitem['dval']):
            return make_aware(mainitem['dval'], get_current_timezone())
        else:
            return mainitem['dval']

    elif mainitem['datatype'] == 'list':
        # subitems contains all subitems, here I store only those of
        # deepness 1, i.e. if I have subitems '0', '1' and '1.c' I
        # store only '0' and '1'
        firstlevelsubdict = {k: v for k, v in subitems.items() if sep not in k}

        # For checking, I verify the expected values
        expected_set = set(['{:d}'.format(i) for i in range(mainitem['ival'])])
        received_set = set(firstlevelsubdict.keys())
        # If there are more entries than expected, but all expected
        # ones are there, I just issue an error but I do not stop.

        if not expected_set.issubset(received_set):
            if (original_class is not None
                    and original_class._subspecifier_field_name is not None):
                subspecifier_string = '{}={} and '.format(
                    original_class._subspecifier_field_name, original_pk)
            else:
                subspecifier_string = ''
            if original_class is None:
                sourcestr = 'the data passed'
            else:
                sourcestr = original_class.__name__

            raise DeserializationException(
                'Wrong list elements stored in {} for '
                "{}key='{}' ({} vs {})".format(sourcestr, subspecifier_string,
                                               mainitem['key'], expected_set,
                                               received_set))
        if expected_set != received_set:
            if (original_class is not None
                    and original_class._subspecifier_field_name is not None):
                subspecifier_string = '{}={} and '.format(
                    original_class._subspecifier_field_name, original_pk)
            else:
                subspecifier_string = ''
            if original_class is None:
                sourcestr = 'the data passed'
            else:
                sourcestr = original_class.__name__

            msg = ('Wrong list elements stored in {} for '
                   "{}key='{}' ({} vs {})".format(sourcestr,
                                                  subspecifier_string,
                                                  mainitem['key'],
                                                  expected_set, received_set))
            if lesserrors:
                AIIDA_LOGGER.error(msg)
            else:
                raise DeserializationException(msg)

        # I get the values in memory as a dictionary
        tempdict = {}
        for firstsubk, firstsubv in firstlevelsubdict.items():
            # I call recursively the same function to get subitems
            newsubitems = {
                k[len(firstsubk) + len(sep):]: v
                for k, v in subitems.items() if k.startswith(firstsubk + sep)
            }
            tempdict[firstsubk] = _deserialize_attribute(
                mainitem=firstsubv,
                subitems=newsubitems,
                sep=sep,
                original_class=original_class,
                original_pk=original_pk)

        # And then I put them in a list
        retlist = [tempdict['{:d}'.format(i)] for i in range(mainitem['ival'])]
        return retlist
    elif mainitem['datatype'] == 'dict':
        # subitems contains all subitems, here I store only those of
        # deepness 1, i.e. if I have subitems '0', '1' and '1.c' I
        # store only '0' and '1'
        firstlevelsubdict = {k: v for k, v in subitems.items() if sep not in k}

        if len(firstlevelsubdict) != mainitem['ival']:
            if (original_class is not None
                    and original_class._subspecifier_field_name is not None):
                subspecifier_string = '{}={} and '.format(
                    original_class._subspecifier_field_name, original_pk)
            else:
                subspecifier_string = ''
            if original_class is None:
                sourcestr = 'the data passed'
            else:
                sourcestr = original_class.__name__

            msg = ('Wrong dict length stored in {} for '
                   "{}key='{}' ({} vs {})".format(sourcestr,
                                                  subspecifier_string,
                                                  mainitem['key'],
                                                  len(firstlevelsubdict),
                                                  mainitem['ival']))
            if lesserrors:
                AIIDA_LOGGER.error(msg)
            else:
                raise DeserializationException(msg)

        # I get the values in memory as a dictionary
        tempdict = {}
        for firstsubk, firstsubv in firstlevelsubdict.items():
            # I call recursively the same function to get subitems
            newsubitems = {
                k[len(firstsubk) + len(sep):]: v
                for k, v in subitems.items() if k.startswith(firstsubk + sep)
            }
            tempdict[firstsubk] = _deserialize_attribute(
                mainitem=firstsubv,
                subitems=newsubitems,
                sep=sep,
                original_class=original_class,
                original_pk=original_pk)

        return tempdict
    elif mainitem['datatype'] == 'json':
        try:
            return json.loads(mainitem['tval'])
        except ValueError:
            raise DeserializationException(
                'Error in the content of the json field')
    else:
        raise DeserializationException(
            "The type field '{}' is not recognized".format(
                mainitem['datatype']))
def upload_psf_family(folder, group_label, group_description, stop_if_existing=True):
    """
    Upload a set of PSF files in a given group.

    :param folder: a path containing all PSF files to be added.
        Only files ending in .PSF (case-insensitive) are considered.
    :param group_label: the name of the group to create. If it exists and is
        non-empty, a UniquenessError is raised.
    :param group_description: a string to be set as the group description.
        Overwrites previous descriptions, if the group was existing.
    :param stop_if_existing: if True, check for the md5 of the files and,
        if the file already exists in the DB, raises a MultipleObjectsError.
        If False, simply adds the existing PsfData node to the group.
    """
    import os
    from aiida import orm
    from aiida.common import AIIDA_LOGGER as aiidalogger
    from aiida.common.exceptions import UniquenessError
    from aiida.orm.querybuilder import QueryBuilder
    from aiida_siesta.groups.pseudos import PsfFamily

    message = (  #pylint: disable=invalid-name
        'This function has been deprecated and will be removed in `v2.0.0`. ' +
        '`upload_psf_family` is substitued by `fam.create_from_folder` ' +
        'where `fam` is an instance of the families classes in `aiida_pseudo.groups.family`.'
    )

    warnings.warn(message, AiidaSiestaDeprecationWarning)

    if not os.path.isdir(folder):
        raise ValueError("folder must be a directory")

    # only files, and only those ending with .psf or .PSF;
    # go to the real file if it is a symlink
    files = [
        os.path.realpath(os.path.join(folder, i))
        for i in os.listdir(folder)
        if os.path.isfile(os.path.join(folder, i)) and i.lower().endswith('.psf')
    ]

    nfiles = len(files)

    automatic_user = orm.User.objects.get_default()
    group, group_created = PsfFamily.objects.get_or_create(label=group_label, user=automatic_user)

    if group.user.email != automatic_user.email:
        raise UniquenessError(
            "There is already a PsfFamily group with name {}"
            ", but it belongs to user {}, therefore you "
            "cannot modify it".format(group_label, group.user.email)
        )

    # Always update description, even if the group already existed
    group.description = group_description

    # NOTE: GROUP SAVED ONLY AFTER CHECKS OF UNICITY

    pseudo_and_created = []

    for afile in files:
        md5sum = md5_file(afile)
        qb = QueryBuilder()
        qb.append(PsfData, filters={'attributes.md5': {'==': md5sum}})
        existing_psf = qb.first()

        #existing_psf = PsfData.query(dbattributes__key="md5",
        #                            dbattributes__tval = md5sum)

        if existing_psf is None:
            # return the psfdata instances, not stored
            pseudo, created = PsfData.get_or_create(afile, use_first=True, store_psf=False)
            # to check whether only one psf per element exists
            # NOTE: actually, created has the meaning of "to_be_created"
            pseudo_and_created.append((pseudo, created))
        else:
            if stop_if_existing:
                raise ValueError(
                    "A PSF with identical MD5 to "
                    " {} cannot be added with stop_if_existing"
                    "".format(afile)
                )
            existing_psf = existing_psf[0]
            pseudo_and_created.append((existing_psf, False))

    # check whether pseudo are unique per element
    elements = [(i[0].element, i[0].md5sum) for i in pseudo_and_created]
    # If group already exists, check also that I am not inserting more than
    # once the same element
    if not group_created:
        for aiida_n in group.nodes:
            # Skip non-pseudos
            if not isinstance(aiida_n, PsfData):
                continue
            elements.append((aiida_n.element, aiida_n.md5sum))

    elements = set(elements)  # Discard elements with the same MD5, that would
    # not be stored twice
    elements_names = [e[0] for e in elements]

    if not len(elements_names) == len(set(elements_names)):
        duplicates = {x for x in elements_names if elements_names.count(x) > 1}
        duplicates_string = ", ".join(i for i in duplicates)
        raise UniquenessError("More than one PSF found for the elements: " + duplicates_string + ".")

    # At this point, save the group, if still unstored
    if group_created:
        group.store()

    # save the psf in the database, and add them to group
    for pseudo, created in pseudo_and_created:
        if created:
            pseudo.store()

            aiidalogger.debug("New node {} created for file {}".format(pseudo.uuid, pseudo.filename))
        else:
            aiidalogger.debug("Reusing node {} for file {}".format(pseudo.uuid, pseudo.filename))

    # Add elements to the group all togetehr
    group.add_nodes([pseudo for pseudo, created in pseudo_and_created])

    nuploaded = len([_ for _, created in pseudo_and_created if created])

    return nfiles, nuploaded
Beispiel #16
0
from tempfile import NamedTemporaryFile
from typing import Any, List, Optional, Mapping as MappingType, Tuple, Union

from aiida.common import AIIDA_LOGGER, exceptions
from aiida.common.datastructures import CalcInfo
from aiida.common.folders import SandboxFolder
from aiida.common.links import LinkType
from aiida.orm import load_node, CalcJobNode, Code, FolderData, Node, RemoteData
from aiida.orm.utils.log import get_dblogger_extra
from aiida.plugins import DataFactory
from aiida.schedulers.datastructures import JobState
from aiida.transports import Transport

REMOTE_WORK_DIRECTORY_LOST_FOUND = 'lost+found'

EXEC_LOGGER = AIIDA_LOGGER.getChild('execmanager')


def _find_data_node(inputs: MappingType[str, Any],
                    uuid: str) -> Optional[Node]:
    """Find and return the node with the given UUID from a nested mapping of input nodes.

    :param inputs: (nested) mapping of nodes
    :param uuid: UUID of the node to find
    :return: instance of `Node` or `None` if not found
    """
    data_node = None

    for input_node in inputs.values():
        if isinstance(input_node, Mapping):
            data_node = _find_data_node(input_node, uuid)