Esempio n. 1
0
    def __getitem__(self, key):
        """
        Description:
            Go through self and the sequence of maps to find the first match for the given
            key

        Input:
            key: key for the item to get
        """
        log = LoggerAdapter(logger, {'name_ext': 'NsidChainMap.__getitem__'})
        value = self._LOOKUP_FAIL_CANARY
        try:
            value = self.data[key]
            return value
        except KeyError:
            log.debug("{} not found in local dict".format(key))
            for m_nsid in self.map_nsids:
                try:
                    map = self.nsroot._lookup(m_nsid)
                except NamespaceLookupError:
                    log.warning('Unable to lookup map: {}'.format(m_nsid))

                try:
                    value = map[key]
                    break
                except KeyError:
                    log.debug('KeyError in {}. Trying next...'.format(m_nsid))
                    continue

            if value == self._LOOKUP_FAIL_CANARY:
                self.__missing__(key)
            else:
                return value
Esempio n. 2
0
def load_formatters(path=None, prefix='kaleidoscope.formatter'):
    log_name = '{}.load_formatters'.format(__name__)
    log = LoggerAdapter(logger, {'name_ext': log_name})

    log.debug("Loading formatters...")

    if path is None:
        path = [os.path.split(__file__)[0]]

    if prefix[-1] != '.':
        prefix += '.'

    log.debug("Walking packages. path: {} | prefix: {}".format(path, prefix))
    all_module_infos = list(pkgutil.walk_packages(path=path, prefix=prefix))
    log.debug("Package Walk generated {} ModInfos: {}".format(
        len(all_module_infos), all_module_infos))

    all_pkgs = filter(lambda x: x.ispkg, all_module_infos)
    all_modules = itertools.filterfalse(lambda x: x.ispkg, all_module_infos)

    successful_imports = list(all_pkgs)

    for modinfo in all_modules:
        try:
            new_mod = importlib.import_module(modinfo.name)
            successful_imports.append(new_mod)
        except ImportError as err:
            log.warning("Failed to import formatter module: {}: {}".format(\
                modinfo.name, err))

    return successful_imports
Esempio n. 3
0
    def __init__(self, implementor_namespace, implementor, nsroot=None,
        addendum=None, formatter=None, implementor_key=None,\
        implementor_state_namespace=None, pre_exec=None, post_exec=None):
        """
        Input:
            addendum: string to be concatenated on to implementor object and eval'd
            formatter: callable to pass the returned value of the implementor. Final
                Return value comes from this method
            implementor_namespace: where to lookup the implmentor NSIDs
            implementor: NSID of object that implements the operations, or direct object
                to use as an override
            implementor_state_namespace: optional namespace to look into to dynamically
                control which specific implementors are being used
                Namespace structure is expected to mirror the implementors namespace and
                have the nodes support a "state" attribute that returns "on" or "off".
            pre_exec_call: what to call before evaluating the provider's implmentation
            post_exec_call: what to call after the provider's implementation has run

        Notes:
            roughly equivalent to:
                return self.formatter(eval("self.implementor{}".format(addendum)))
        """
        log = LoggerAdapter(logger, {'name_ext': 'AddendumFormatter.__init__'})
        self._pre_exec = pre_exec
        self._post_exec = post_exec
        self.implementor_ns = implementor_namespace
        self.implementor_state_ns = implementor_state_namespace

        if isinstance(implementor, str):
            #- treat as NSID
            self.implementor_nsid = implementor
            self.implementor = None
        else:
            self.implmentor_nsid = None
            self.implementor = implementor

        #- addendums can be single string or list
        self._addendums = list()
        if isinstance(addendum, str):
            self._addendums.append(addendum)
        elif isinstance(addendum, collections.Sequence):
            self._addendums.extend(addendum)

        self.nsroot = nsroot
        self.key = implementor_key
        if callable(formatter):
            self.formatter = formatter
        else:
            self.formatter = lambda x: x

            if formatter is not None:
                log.warning("Formatter not callable: {}".format(formatter))
                msg = " will use direct return value of implementor {}".format(\
                        self.implementor_nsid)
                log.warning(msg)
Esempio n. 4
0
    def render_object(self,
                      obj,
                      spec=None,
                      specname=None,
                      attributes=None,
                      align=True):
        """
        Description:
            Top-level object rendering method.
        Input:
            spec: ObjectModelSpec (overrides specname and attributes)
            specname: name of an ObjectModelSpec to lookup (overrides attributes)
            attributes: list of attributes to render (w/out an existing spec)
            align: whether or not to try and align the attributes if the object is a
                collection of objects to render
        """
        log = LoggerAdapter(
            logger, {'name_ext': f'{self.__class__.__name__}.render_object'})

        log.debug("Entering: spec: {} | specname: {} | attributes: {} | align: {}".format(\
                spec, specname, attributes, align))
        if spec:
            return self.render_object_from_spec(obj, spec, align=align)
        elif specname:
            return self.render_object_from_specname(obj, specname, align=align)
        elif attributes:
            return self.render_object_from_attributes(obj,
                                                      attributes,
                                                      align=align)
        else:
            specname = self.make_default_specname_from_object(obj)
            log.debug("made specname: {}".format(specname))
            try:
                return self.render_object_from_specname(obj,
                                                        specname,
                                                        align=align)
            except NamespaceLookupError:
                log.warning(f"kaleidoscope can't find specname {specname}")
                return obj
Esempio n. 5
0
def scan_robots_txt(
        lines: Iterable[str],
        logger: LoggerAdapter) -> Iterator[Iterable[Tuple[int, str, str]]]:
    """Tokenizes the contents of a C{robots.txt} file.

    @param lines:
        Contents of a C{robots.txt} file.
    @param logger:
        Problems found while scanning are logged here.
    @return:
        Yields records, where each record is a sequence of
        C{(lineno, token, value)} triples.
    """
    record: List[Tuple[int, str, str]] = []
    for lineno, line in enumerate(lines, 1):
        stripped_line = line.lstrip()
        if stripped_line.startswith('#'):
            # Comment-only lines are discarded and do not end records.
            continue
        if not stripped_line:
            # Empty lines end records.
            if record:
                yield record
                record = []
            continue
        if len(stripped_line) != len(line):
            logger.warning('Line %d has whitespace before field', lineno)

        nocomment_line = stripped_line.split('#', 1)[0]
        try:
            field, value = nocomment_line.split(':', 1)
        except ValueError:
            logger.error('Line %d contains no ":"; ignoring line', lineno)
        else:
            record.append((lineno, field.casefold(), value.strip()))

    if record:
        yield record
Esempio n. 6
0
def upload_calculation(node,
                       transport,
                       calc_info,
                       script_filename,
                       dry_run=False):
    """Upload a `CalcJob` instance

    :param node: the `CalcJobNode`.
    :param transport: an already opened transport to use to submit the calculation.
    :param calc_info: the calculation info datastructure returned by `CalcJobNode.presubmit`
    :param script_filename: the job launch script returned by `CalcJobNode.presubmit`
    :return: tuple of ``calc_info`` and ``script_filename``
    """
    from logging import LoggerAdapter
    from tempfile import NamedTemporaryFile
    from aiida.orm import load_node, Code, RemoteData

    # If the calculation already has a `remote_folder`, simply return. The upload was apparently already completed
    # before, which can happen if the daemon is restarted and it shuts down after uploading but before getting the
    # chance to perform the state transition. Upon reloading this calculation, it will re-attempt the upload.
    link_label = 'remote_folder'
    if node.get_outgoing(RemoteData, link_label_filter=link_label).first():
        execlogger.warning(
            'CalcJobNode<{}> already has a `{}` output: skipping upload'.
            format(node.pk, link_label))
        return calc_info, script_filename

    computer = node.computer

    codes_info = calc_info.codes_info
    input_codes = [
        load_node(_.code_uuid, sub_classes=(Code, )) for _ in codes_info
    ]

    logger_extra = get_dblogger_extra(node)
    transport.set_logger_extra(logger_extra)
    logger = LoggerAdapter(logger=execlogger, extra=logger_extra)

    if not dry_run and node.has_cached_links():
        raise ValueError(
            'Cannot submit calculation {} because it has cached input links! If you just want to test the '
            'submission, set `metadata.dry_run` to True in the inputs.'.format(
                node.pk))

    folder = node._raw_input_folder

    # If we are performing a dry-run, the working directory should actually be a local folder that should already exist
    if dry_run:
        workdir = transport.getcwd()
    else:
        remote_user = transport.whoami()
        # TODO Doc: {username} field
        # TODO: if something is changed here, fix also 'verdi computer test'
        remote_working_directory = computer.get_workdir().format(
            username=remote_user)
        if not remote_working_directory.strip():
            raise exceptions.ConfigurationError(
                "[submission of calculation {}] No remote_working_directory configured for computer '{}'"
                .format(node.pk, computer.name))

        # If it already exists, no exception is raised
        try:
            transport.chdir(remote_working_directory)
        except IOError:
            logger.debug(
                '[submission of calculation {}] Unable to chdir in {}, trying to create it'
                .format(node.pk, remote_working_directory))
            try:
                transport.makedirs(remote_working_directory)
                transport.chdir(remote_working_directory)
            except EnvironmentError as exc:
                raise exceptions.ConfigurationError(
                    '[submission of calculation {}] '
                    'Unable to create the remote directory {} on '
                    "computer '{}': {}".format(node.pk,
                                               remote_working_directory,
                                               computer.name, exc))
        # Store remotely with sharding (here is where we choose
        # the folder structure of remote jobs; then I store this
        # in the calculation properties using _set_remote_dir
        # and I do not have to know the logic, but I just need to
        # read the absolute path from the calculation properties.
        transport.mkdir(calc_info.uuid[:2], ignore_existing=True)
        transport.chdir(calc_info.uuid[:2])
        transport.mkdir(calc_info.uuid[2:4], ignore_existing=True)
        transport.chdir(calc_info.uuid[2:4])

        try:
            # The final directory may already exist, most likely because this function was already executed once, but
            # failed and as a result was rescheduled by the eninge. In this case it would be fine to delete the folder
            # and create it from scratch, except that we cannot be sure that this the actual case. Therefore, to err on
            # the safe side, we move the folder to the lost+found directory before recreating the folder from scratch
            transport.mkdir(calc_info.uuid[4:])
        except OSError:
            # Move the existing directory to lost+found, log a warning and create a clean directory anyway
            path_existing = os.path.join(transport.getcwd(),
                                         calc_info.uuid[4:])
            path_lost_found = os.path.join(remote_working_directory,
                                           REMOTE_WORK_DIRECTORY_LOST_FOUND)
            path_target = os.path.join(path_lost_found, calc_info.uuid)
            logger.warning(
                'tried to create path {} but it already exists, moving the entire folder to {}'
                .format(path_existing, path_target))

            # Make sure the lost+found directory exists, then copy the existing folder there and delete the original
            transport.mkdir(path_lost_found, ignore_existing=True)
            transport.copytree(path_existing, path_target)
            transport.rmtree(path_existing)

            # Now we can create a clean folder for this calculation
            transport.mkdir(calc_info.uuid[4:])
        finally:
            transport.chdir(calc_info.uuid[4:])

        # I store the workdir of the calculation for later file retrieval
        workdir = transport.getcwd()
        node.set_remote_workdir(workdir)

    # I first create the code files, so that the code can put
    # default files to be overwritten by the plugin itself.
    # Still, beware! The code file itself could be overwritten...
    # But I checked for this earlier.
    for code in input_codes:
        if code.is_local():
            # Note: this will possibly overwrite files
            for f in code.get_folder_list():
                transport.put(code.get_abs_path(f), f)
            transport.chmod(code.get_local_executable(), 0o755)  # rwxr-xr-x

    # In a dry_run, the working directory is the raw input folder, which will already contain these resources
    if not dry_run:
        for filename in folder.get_content_list():
            logger.debug(
                '[submission of calculation {}] copying file/folder {}...'.
                format(node.pk, filename))
            transport.put(folder.get_abs_path(filename), filename)

    # local_copy_list is a list of tuples, each with (uuid, dest_rel_path)
    # NOTE: validation of these lists are done inside calculation.presubmit()
    local_copy_list = calc_info.local_copy_list or []
    remote_copy_list = calc_info.remote_copy_list or []
    remote_symlink_list = calc_info.remote_symlink_list or []

    for uuid, filename, target in local_copy_list:
        logger.debug(
            '[submission of calculation {}] copying local file/folder to {}'.
            format(node.pk, target))

        try:
            data_node = load_node(uuid=uuid)
        except exceptions.NotExistent:
            logger.warning(
                'failed to load Node<{}> specified in the `local_copy_list`'.
                format(uuid))

        # Note, once #2579 is implemented, use the `node.open` method instead of the named temporary file in
        # combination with the new `Transport.put_object_from_filelike`
        # Since the content of the node could potentially be binary, we read the raw bytes and pass them on
        with NamedTemporaryFile(mode='wb+') as handle:
            handle.write(data_node.get_object_content(filename, mode='rb'))
            handle.flush()
            handle.seek(0)
            transport.put(handle.name, target)

    if dry_run:
        if remote_copy_list:
            with open(os.path.join(workdir, '_aiida_remote_copy_list.txt'),
                      'w') as handle:
                for remote_computer_uuid, remote_abs_path, dest_rel_path in remote_copy_list:
                    handle.write(
                        'would have copied {} to {} in working directory on remote {}'
                        .format(remote_abs_path, dest_rel_path, computer.name))

        if remote_symlink_list:
            with open(os.path.join(workdir, '_aiida_remote_symlink_list.txt'),
                      'w') as handle:
                for remote_computer_uuid, remote_abs_path, dest_rel_path in remote_symlink_list:
                    handle.write(
                        'would have created symlinks from {} to {} in working directory on remote {}'
                        .format(remote_abs_path, dest_rel_path, computer.name))

    else:

        for (remote_computer_uuid, remote_abs_path,
             dest_rel_path) in remote_copy_list:
            if remote_computer_uuid == computer.uuid:
                logger.debug(
                    '[submission of calculation {}] copying {} remotely, directly on the machine {}'
                    .format(node.pk, dest_rel_path, computer.name))
                try:
                    transport.copy(remote_abs_path, dest_rel_path)
                except (IOError, OSError):
                    logger.warning(
                        '[submission of calculation {}] Unable to copy remote resource from {} to {}! '
                        'Stopping.'.format(node.pk, remote_abs_path,
                                           dest_rel_path))
                    raise
            else:
                raise NotImplementedError(
                    '[submission of calculation {}] Remote copy between two different machines is '
                    'not implemented yet'.format(node.pk))

        for (remote_computer_uuid, remote_abs_path,
             dest_rel_path) in remote_symlink_list:
            if remote_computer_uuid == computer.uuid:
                logger.debug(
                    '[submission of calculation {}] copying {} remotely, directly on the machine {}'
                    .format(node.pk, dest_rel_path, computer.name))
                try:
                    transport.symlink(remote_abs_path, dest_rel_path)
                except (IOError, OSError):
                    logger.warning(
                        '[submission of calculation {}] Unable to create remote symlink from {} to {}! '
                        'Stopping.'.format(node.pk, remote_abs_path,
                                           dest_rel_path))
                    raise
            else:
                raise IOError(
                    'It is not possible to create a symlink between two different machines for '
                    'calculation {}'.format(node.pk))

    if not dry_run:
        # Make sure that attaching the `remote_folder` with a link is the last thing we do. This gives the biggest
        # chance of making this method idempotent. That is to say, if a runner gets interrupted during this action, it
        # will simply retry the upload, unless we got here and managed to link it up, in which case we move to the next
        # task. Because in that case, the check for the existence of this link at the top of this function will exit
        # early from this command.
        remotedata = RemoteData(computer=computer, remote_path=workdir)
        remotedata.add_incoming(node,
                                link_type=LinkType.CREATE,
                                link_label='remote_folder')
        remotedata.store()

    return calc_info, script_filename
Esempio n. 7
0
def upload_calculation(node,
                       transport,
                       calc_info,
                       folder,
                       inputs=None,
                       dry_run=False):
    """Upload a `CalcJob` instance

    :param node: the `CalcJobNode`.
    :param transport: an already opened transport to use to submit the calculation.
    :param calc_info: the calculation info datastructure returned by `CalcJob.presubmit`
    :param folder: temporary local file system folder containing the inputs written by `CalcJob.prepare_for_submission`
    """
    # pylint: disable=too-many-locals,too-many-branches,too-many-statements
    from logging import LoggerAdapter
    from tempfile import NamedTemporaryFile
    from aiida.orm import load_node, Code, RemoteData

    # If the calculation already has a `remote_folder`, simply return. The upload was apparently already completed
    # before, which can happen if the daemon is restarted and it shuts down after uploading but before getting the
    # chance to perform the state transition. Upon reloading this calculation, it will re-attempt the upload.
    link_label = 'remote_folder'
    if node.get_outgoing(RemoteData, link_label_filter=link_label).first():
        execlogger.warning(
            f'CalcJobNode<{node.pk}> already has a `{link_label}` output: skipping upload'
        )
        return calc_info

    computer = node.computer

    codes_info = calc_info.codes_info
    input_codes = [
        load_node(_.code_uuid, sub_classes=(Code, )) for _ in codes_info
    ]

    logger_extra = get_dblogger_extra(node)
    transport.set_logger_extra(logger_extra)
    logger = LoggerAdapter(logger=execlogger, extra=logger_extra)

    if not dry_run and node.has_cached_links():
        raise ValueError(
            'Cannot submit calculation {} because it has cached input links! If you just want to test the '
            'submission, set `metadata.dry_run` to True in the inputs.'.format(
                node.pk))

    # If we are performing a dry-run, the working directory should actually be a local folder that should already exist
    if dry_run:
        workdir = transport.getcwd()
    else:
        remote_user = transport.whoami()
        remote_working_directory = computer.get_workdir().format(
            username=remote_user)
        if not remote_working_directory.strip():
            raise exceptions.ConfigurationError(
                "[submission of calculation {}] No remote_working_directory configured for computer '{}'"
                .format(node.pk, computer.label))

        # If it already exists, no exception is raised
        try:
            transport.chdir(remote_working_directory)
        except IOError:
            logger.debug(
                '[submission of calculation {}] Unable to chdir in {}, trying to create it'
                .format(node.pk, remote_working_directory))
            try:
                transport.makedirs(remote_working_directory)
                transport.chdir(remote_working_directory)
            except EnvironmentError as exc:
                raise exceptions.ConfigurationError(
                    '[submission of calculation {}] '
                    'Unable to create the remote directory {} on '
                    "computer '{}': {}".format(node.pk,
                                               remote_working_directory,
                                               computer.label, exc))
        # Store remotely with sharding (here is where we choose
        # the folder structure of remote jobs; then I store this
        # in the calculation properties using _set_remote_dir
        # and I do not have to know the logic, but I just need to
        # read the absolute path from the calculation properties.
        transport.mkdir(calc_info.uuid[:2], ignore_existing=True)
        transport.chdir(calc_info.uuid[:2])
        transport.mkdir(calc_info.uuid[2:4], ignore_existing=True)
        transport.chdir(calc_info.uuid[2:4])

        try:
            # The final directory may already exist, most likely because this function was already executed once, but
            # failed and as a result was rescheduled by the eninge. In this case it would be fine to delete the folder
            # and create it from scratch, except that we cannot be sure that this the actual case. Therefore, to err on
            # the safe side, we move the folder to the lost+found directory before recreating the folder from scratch
            transport.mkdir(calc_info.uuid[4:])
        except OSError:
            # Move the existing directory to lost+found, log a warning and create a clean directory anyway
            path_existing = os.path.join(transport.getcwd(),
                                         calc_info.uuid[4:])
            path_lost_found = os.path.join(remote_working_directory,
                                           REMOTE_WORK_DIRECTORY_LOST_FOUND)
            path_target = os.path.join(path_lost_found, calc_info.uuid)
            logger.warning(
                f'tried to create path {path_existing} but it already exists, moving the entire folder to {path_target}'
            )

            # Make sure the lost+found directory exists, then copy the existing folder there and delete the original
            transport.mkdir(path_lost_found, ignore_existing=True)
            transport.copytree(path_existing, path_target)
            transport.rmtree(path_existing)

            # Now we can create a clean folder for this calculation
            transport.mkdir(calc_info.uuid[4:])
        finally:
            transport.chdir(calc_info.uuid[4:])

        # I store the workdir of the calculation for later file retrieval
        workdir = transport.getcwd()
        node.set_remote_workdir(workdir)

    # I first create the code files, so that the code can put
    # default files to be overwritten by the plugin itself.
    # Still, beware! The code file itself could be overwritten...
    # But I checked for this earlier.
    for code in input_codes:
        if code.is_local():
            # Note: this will possibly overwrite files
            for filename in code.list_object_names():
                # Note, once #2579 is implemented, use the `node.open` method instead of the named temporary file in
                # combination with the new `Transport.put_object_from_filelike`
                # Since the content of the node could potentially be binary, we read the raw bytes and pass them on
                with NamedTemporaryFile(mode='wb+') as handle:
                    handle.write(code.get_object_content(filename, mode='rb'))
                    handle.flush()
                    transport.put(handle.name, filename)
            transport.chmod(code.get_local_executable(), 0o755)  # rwxr-xr-x

    # local_copy_list is a list of tuples, each with (uuid, dest_rel_path)
    # NOTE: validation of these lists are done inside calculation.presubmit()
    local_copy_list = calc_info.local_copy_list or []
    remote_copy_list = calc_info.remote_copy_list or []
    remote_symlink_list = calc_info.remote_symlink_list or []
    provenance_exclude_list = calc_info.provenance_exclude_list or []

    for uuid, filename, target in local_copy_list:
        logger.debug(
            f'[submission of calculation {node.uuid}] copying local file/folder to {target}'
        )

        def find_data_node(inputs, uuid):
            """Find and return the node with the given UUID from a nested mapping of input nodes.

            :param inputs: (nested) mapping of nodes
            :param uuid: UUID of the node to find
            :return: instance of `Node` or `None` if not found
            """
            from collections.abc import Mapping
            data_node = None

            for input_node in inputs.values():
                if isinstance(input_node, Mapping):
                    data_node = find_data_node(input_node, uuid)
                elif isinstance(input_node, Node) and input_node.uuid == uuid:
                    data_node = input_node
                if data_node is not None:
                    break

            return data_node

        try:
            data_node = load_node(uuid=uuid)
        except exceptions.NotExistent:
            data_node = find_data_node(inputs, uuid)

        if data_node is None:
            logger.warning(
                f'failed to load Node<{uuid}> specified in the `local_copy_list`'
            )
        else:
            dirname = os.path.dirname(target)
            if dirname:
                os.makedirs(os.path.join(folder.abspath, dirname),
                            exist_ok=True)
            with folder.open(target, 'wb') as handle:
                with data_node.open(filename, 'rb') as source:
                    shutil.copyfileobj(source, handle)
            provenance_exclude_list.append(target)

    # In a dry_run, the working directory is the raw input folder, which will already contain these resources
    if not dry_run:
        for filename in folder.get_content_list():
            logger.debug(
                f'[submission of calculation {node.pk}] copying file/folder {filename}...'
            )
            transport.put(folder.get_abs_path(filename), filename)

        for (remote_computer_uuid, remote_abs_path,
             dest_rel_path) in remote_copy_list:
            if remote_computer_uuid == computer.uuid:
                logger.debug(
                    '[submission of calculation {}] copying {} remotely, directly on the machine {}'
                    .format(node.pk, dest_rel_path, computer.label))
                try:
                    transport.copy(remote_abs_path, dest_rel_path)
                except (IOError, OSError):
                    logger.warning(
                        '[submission of calculation {}] Unable to copy remote resource from {} to {}! '
                        'Stopping.'.format(node.pk, remote_abs_path,
                                           dest_rel_path))
                    raise
            else:
                raise NotImplementedError(
                    '[submission of calculation {}] Remote copy between two different machines is '
                    'not implemented yet'.format(node.pk))

        for (remote_computer_uuid, remote_abs_path,
             dest_rel_path) in remote_symlink_list:
            if remote_computer_uuid == computer.uuid:
                logger.debug(
                    '[submission of calculation {}] copying {} remotely, directly on the machine {}'
                    .format(node.pk, dest_rel_path, computer.label))
                try:
                    transport.symlink(remote_abs_path, dest_rel_path)
                except (IOError, OSError):
                    logger.warning(
                        '[submission of calculation {}] Unable to create remote symlink from {} to {}! '
                        'Stopping.'.format(node.pk, remote_abs_path,
                                           dest_rel_path))
                    raise
            else:
                raise IOError(
                    f'It is not possible to create a symlink between two different machines for calculation {node.pk}'
                )
    else:

        if remote_copy_list:
            with open(os.path.join(workdir, '_aiida_remote_copy_list.txt'),
                      'w') as handle:
                for remote_computer_uuid, remote_abs_path, dest_rel_path in remote_copy_list:
                    handle.write(
                        'would have copied {} to {} in working directory on remote {}'
                        .format(remote_abs_path, dest_rel_path,
                                computer.label))

        if remote_symlink_list:
            with open(os.path.join(workdir, '_aiida_remote_symlink_list.txt'),
                      'w') as handle:
                for remote_computer_uuid, remote_abs_path, dest_rel_path in remote_symlink_list:
                    handle.write(
                        'would have created symlinks from {} to {} in working directory on remote {}'
                        .format(remote_abs_path, dest_rel_path,
                                computer.label))

    # Loop recursively over content of the sandbox folder copying all that are not in `provenance_exclude_list`. Note
    # that directories are not created explicitly. The `node.put_object_from_filelike` call will create intermediate
    # directories for nested files automatically when needed. This means though that empty folders in the sandbox or
    # folders that would be empty when considering the `provenance_exclude_list` will *not* be copied to the repo. The
    # advantage of this explicit copying instead of deleting the files from `provenance_exclude_list` from the sandbox
    # first before moving the entire remaining content to the node's repository, is that in this way we are guaranteed
    # not to accidentally move files to the repository that should not go there at all cost. Note that all entries in
    # the provenance exclude list are normalized first, just as the paths that are in the sandbox folder, otherwise the
    # direct equality test may fail, e.g.: './path/file.txt' != 'path/file.txt' even though they reference the same file
    provenance_exclude_list = [
        os.path.normpath(entry) for entry in provenance_exclude_list
    ]

    for root, _, filenames in os.walk(folder.abspath):
        for filename in filenames:
            filepath = os.path.join(root, filename)
            relpath = os.path.normpath(
                os.path.relpath(filepath, folder.abspath))
            if relpath not in provenance_exclude_list:
                with open(filepath, 'rb') as handle:
                    node._repository.put_object_from_filelike(handle,
                                                              relpath,
                                                              'wb',
                                                              force=True)  # pylint: disable=protected-access

    if not dry_run:
        # Make sure that attaching the `remote_folder` with a link is the last thing we do. This gives the biggest
        # chance of making this method idempotent. That is to say, if a runner gets interrupted during this action, it
        # will simply retry the upload, unless we got here and managed to link it up, in which case we move to the next
        # task. Because in that case, the check for the existence of this link at the top of this function will exit
        # early from this command.
        remotedata = RemoteData(computer=computer, remote_path=workdir)
        remotedata.add_incoming(node,
                                link_type=LinkType.CREATE,
                                link_label='remote_folder')
        remotedata.store()
Esempio n. 8
0
    def provide(self, *args, request_id=None, show_progress=True, **kwargs):
        """
        Description:
            perform the implementation for the requested service

        Input:
            *args: passed into self.get_addendum method
            request_id: None (unused in this provider)
            show_progress: show a message when calling each implementor object
            **kwargs: passed into self.get_addendum method

        Notes:
            If an object is set for the 'implementor' attribute, this will be used as the
            implmentor object, else, we will default to looking up the implmentor in a
            root node and iterating over all the sub-nodes.
        """
        log = LoggerAdapter(logger, {'name_ext': 'AddendumFormatter.provide'})
        log.debug("Entering")
        log.debug("varargs: {}".format(args))
        log.debug("kwargs: {}".format(kwargs))

        imp_iter = self._get_implementor_iterator()
        log.debug("got implementor iterator: {}".format(imp_iter))
        #- loop through all the iterators and apply the addendum
        all_outputs = list()
        formatted_outputs = list()
        all_formatted_outputs = list()
        for nsid, implementor in imp_iter:
            if self.implementor_state_ns:
                try:
                    log.debug(
                        "checking implementor flipswitch via nsid: {}".format(
                            nsid))
                    if not self.implementor_state_ns._lookup(nsid):
                        #- skip til next implementor
                        log.debug(
                            "Skipping inactive implementor: {}".format(nsid))
                        continue
                except NamespaceLookupError:
                    log.warning(
                        "No dynamic state for implementor: {}".format(nsid))

            #- per-implementor addendums use key method
            addendum = self.get_addendum(nsid, implementor, *args, **kwargs)
            if show_progress:
                log.info("Calling: {}{}".format(nsid, addendum))

            #TODO: define globals and locals
            outputs = eval("implementor{}".format(addendum), globals(),
                           locals())
            all_outputs += outputs
            formatted_outputs = self.formatter(outputs)
            all_formatted_outputs += formatted_outputs
            if show_progress:
                try:
                    n = len(list(outputs))
                except TypeError:
                    n = 1 if outputs else 0
                log.info("        {} objects returned".format(n))
        log.info("Total: {}".format(len(all_formatted_outputs)))
        return all_formatted_outputs
Esempio n. 9
0
def decode_and_report(
        data: bytes,
        encoding_options: Iterable[Tuple[Optional[str], str]],
        logger: LoggerAdapter
    ) -> Tuple[str, str]:
    """Attempt to decode text using several encoding options in order.

    @param data:
        Encoded version of the text.
    @param encoding_options: C{(encoding | None, source)*}
        Each option is a pair of encoding name and a description of
        where this encoding suggestion originated.
        If the encoding name is C{None}, the option is skipped.
    @param logger:
        Non-fatal problems are logged here.
        Such problems include an unknown or differing encodings
        among the options.
    @return: C{(text, encoding)}
        The decoded string and the encoding used to decode it.
    @raise ValueError:
        If the text could not be decoded.
    """

    # Filter and remember encoding options.
    options = [
        (encoding, source)
        for encoding, source in encoding_options
        if encoding is not None
        ]

    encodings = [encoding for encoding, source in options]
    # Always try to decode as UTF-8, since that is the most common encoding
    # these days, plus it's a superset of ASCII so it also works for old or
    # simple documents.
    encodings.append('utf-8')
    text, used_encoding = try_decode(data, encodings)

    # Report differences between suggested encodings and the one we
    # settled on.
    for encoding, source in options:
        try:
            codec = lookup_codec(encoding)
        except LookupError:
            logger.warning(
                '%s specifies encoding "%s", which is unknown to Python',
                source, encoding
                )
            continue

        std_name = standard_codec_name(codec.name)
        if std_name != used_encoding:
            logger.warning(
                '%s specifies encoding "%s", '
                'while actual encoding seems to be "%s"',
                source, encoding, used_encoding
                )
        elif std_name != encoding:
            logger.info(
                '%s specifies encoding "%s", '
                'which is not the standard name "%s"',
                source, encoding, used_encoding
                )

    return text, used_encoding
Esempio n. 10
0
class StatusBase:
    """
    Track the status of a potentially-lengthy action like moving or triggering.

    Parameters
    ----------
    timeout: float, optional
        The amount of time to wait before marking the Status as failed.  If
        ``None`` (default) wait forever. It is strongly encouraged to set a
        finite timeout.  If settle_time below is set, that time is added to the
        effective timeout.
    settle_time: float, optional
        The amount of time to wait between the caller specifying that the
        status has completed to running callbacks. Default is 0.


    Notes
    -----

    Theory of operation:

    This employs two ``threading.Event`` objects, one thread the runs for
    (timeout + settle_time) seconds, and one thread that runs for
    settle_time seconds (if settle_time is nonzero).

    At __init__ time, a *timeout* and *settle_time* are specified. A thread
    is started, on which user callbacks, registered after __init__ time via
    :meth:`add_callback`, will eventually be run. The thread waits on an
    Event be set or (timeout + settle_time) seconds to pass, whichever
    happens first.

    If (timeout + settle_time) expires and the Event has not
    been set, an internal Exception is set to ``StatusTimeoutError``, and a
    second Event is set, marking the Status as done and failed. The
    callbacks are run.

    If a callback is registered after the Status is done, it will be run
    immediately.

    If the first Event is set before (timeout + settle_time) expires,
    then the second Event is set and no internal Exception is set, marking
    the Status as done and successful. The callbacks are run.

    There are two methods that directly set the first Event. One,
    :meth:set_exception, sets it directly after setting the internal
    Exception.  The other, :meth:`set_finished`, starts a
    ``threading.Timer`` that will set it after a delay (the settle_time).
    One of these methods may be called, and at most once. If one is called
    twice or if both are called, ``InvalidState`` is raised. If they are
    called too late to prevent a ``StatusTimeoutError``, they are ignored
    but one call is still allowed. Thus, an external callback, e.g. pyepics,
    may reports success or failure after the Status object has expired, but
    to no effect because the callbacks have already been called and the
    program has moved on.

    """
    def __init__(self,
                 *,
                 timeout=None,
                 settle_time=0,
                 done=None,
                 success=None):
        super().__init__()
        self._tname = None
        self._lock = threading.RLock()
        self._event = threading.Event()  # state associated with done-ness
        self._settled_event = threading.Event()
        # "Externally initiated" means set_finished() or set_exception(exc) was
        # called, as opposed to completion via an internal timeout.
        self._externally_initiated_completion_lock = threading.Lock()
        self._externally_initiated_completion = False
        self._callbacks = deque()
        self._exception = None

        self.log = LoggerAdapter(logger=logger, extra={'status': self})

        if settle_time is None:
            settle_time = 0.0

        self._settle_time = float(settle_time)

        if timeout is not None:
            timeout = float(timeout)
        self._timeout = timeout

        # We cannot know that we are successful if we are not done.
        if success and not done:
            raise ValueError(
                "Cannot initialize with done=False but success=True.")
        if done is not None or success is not None:
            warn(
                "The 'done' and 'success' parameters will be removed in a "
                "future release. Use the methods set_finished() or "
                "set_exception(exc) to mark success or failure, respectively, "
                "after the Status has been instantiated.", DeprecationWarning)

        self._callback_thread = threading.Thread(target=self._run_callbacks,
                                                 daemon=True,
                                                 name=self._tname)
        self._callback_thread.start()

        if done:
            if success:
                self.set_finished()
            else:
                exc = UnknownStatusFailure(
                    f"The status {self!r} has failed. To obtain more specific, "
                    "helpful errors in the future, update the Device to use "
                    "set_exception(...) instead of setting success=False "
                    "at __init__ time.")
                self.set_exception(exc)

    @property
    def timeout(self):
        """
        The timeout for this action.

        This is set when the Status is created, and it cannot be changed.
        """
        return self._timeout

    @property
    def settle_time(self):
        """
        A delay between when :meth:`set_finished` is when the Status is done.

        This is set when the Status is created, and it cannot be changed.
        """
        return self._settle_time

    @property
    def done(self):
        """
        Boolean indicating whether associated operation has completed.

        This is set to True at __init__ time or by calling
        :meth:`set_finished`, :meth:`set_exception`, or (deprecated)
        :meth:`_finished`. Once True, it can never become False.
        """
        return self._event.is_set()

    @done.setter
    def done(self, value):
        # For now, allow this setter to work only if it has no effect.
        # In a future release, make this property not settable.
        if bool(self._event.is_set()) != bool(value):
            raise RuntimeError(
                "The done-ness of a status object cannot be changed by "
                "setting its `done` attribute directly. Call `set_finished()` "
                "or `set_exception(exc).")
        warn(
            "Do not set the `done` attribute of a status object directly. "
            "It should only be set indirectly by calling `set_finished()` "
            "or `set_exception(exc)`. "
            "Direct setting was never intended to be supported and it will be "
            "disallowed in a future release of ophyd, causing this code path "
            "to fail.", UserWarning)

    @property
    def success(self):
        """
        Boolean indicating whether associated operation has completed.

        This is set to True at __init__ time or by calling
        :meth:`set_finished`, :meth:`set_exception`, or (deprecated)
        :meth:`_finished`. Once True, it can never become False.
        """
        return self.done and self._exception is None

    @success.setter
    def success(self, value):
        # For now, allow this setter to work only if it has no effect.
        # In a future release, make this property not settable.
        if bool(self.success) != bool(value):
            raise RuntimeError(
                "The success state of a status object cannot be changed by "
                "setting its `success` attribute directly. Call "
                "`set_finished()` or `set_exception(exc)`.")
        warn(
            "Do not set the `success` attribute of a status object directly. "
            "It should only be set indirectly by calling `set_finished()` "
            "or `set_exception(exc)`. "
            "Direct setting was never intended to be supported and it will be "
            "disallowed in a future release of ophyd, causing this code path "
            "to fail.", UserWarning)

    def _handle_failure(self):
        pass

    def _settled(self):
        """Hook for when status has completed and settled"""
        pass

    def _run_callbacks(self):
        """
        Set the Event and run the callbacks.
        """
        if self.timeout is None:
            timeout = None
        else:
            timeout = self.timeout + self.settle_time
        if not self._settled_event.wait(timeout):
            # We have timed out. It's possible that set_finished() has already
            # been called but we got here before the settle_time timer expired.
            # And it's possible that in this space be between the above
            # statement timing out grabbing the lock just below,
            # set_exception(exc) has been called. Both of these possibilties
            # are accounted for.
            self.log.warning("%r has timed out", self)
            with self._externally_initiated_completion_lock:
                # Set the exception and mark the Status as done, unless
                # set_exception(exc) was called externally before we grabbed
                # the lock.
                if self._exception is None:
                    exc = StatusTimeoutError(
                        f"Status {self!r} failed to complete in specified timeout."
                    )
                    self._exception = exc
        # Mark this as "settled".
        try:
            self._settled()
        except Exception:
            # No alternative but to log this. We can't supersede set_exception,
            # and we have to continue and run the callbacks.
            self.log.exception("%r encountered error during _settled()", self)
        # Now we know whether or not we have succeed or failed, either by
        # timeout above or by set_exception(exc), so we can set the Event that
        # will mark this Status as done.
        with self._lock:
            self._event.set()
        if self._exception is not None:
            try:
                self._handle_failure()
            except Exception:
                self.log.exception(
                    "%r encountered an error during _handle_failure()", self)
        # The callbacks have access to self, from which they can distinguish
        # success or failure.
        for cb in self._callbacks:
            try:
                cb(self)
            except Exception:
                self.log.exception(
                    "An error was raised on a background thread while "
                    "running the callback %r(%r).", cb, self)
        self._callbacks.clear()

    def set_exception(self, exc):
        """
        Mark as finished but failed with the given Exception.

        This method should generally not be called by the *recipient* of this
        Status object, but only by the object that created and returned it.

        Parameters
        ----------
        exc: Exception
        """
        # Since we rely on this being raise-able later, check proactively to
        # avoid potentially very confusing failures.
        if not (isinstance(exc, Exception)
                or isinstance(exc, type) and issubclass(exc, Exception)):
            # Note that Python allows `raise Exception` or raise Exception()`
            # so we allow a class or an instance here too.
            raise ValueError(f"Expected an Exception, got {exc!r}")

        # Ban certain Timeout subclasses that have special significance. This
        # would probably never come up except due to some rare user error, but
        # if it did it could be very confusing indeed!
        for exc_class in (StatusTimeoutError, WaitTimeoutError):
            if (isinstance(exc, exc_class)
                    or isinstance(exc, type) and issubclass(exc, exc_class)):
                raise ValueError(
                    f"{exc_class} has special significance and cannot be set "
                    "as the exception. Use a plain TimeoutError or some other "
                    "subclass thereof.")

        with self._externally_initiated_completion_lock:
            if self._externally_initiated_completion:
                raise InvalidState(
                    "Either set_finished() or set_exception() has "
                    f"already been called on {self!r}")
            self._externally_initiated_completion = True
            if isinstance(self._exception, StatusTimeoutError):
                # We have already timed out.
                return
            self._exception = exc
            self._settled_event.set()

    def set_finished(self):
        """
        Mark as finished successfully.

        This method should generally not be called by the *recipient* of this
        Status object, but only by the object that created and returned it.
        """
        with self._externally_initiated_completion_lock:
            if self._externally_initiated_completion:
                raise InvalidState(
                    "Either set_finished() or set_exception() has "
                    f"already been called on {self!r}")
            self._externally_initiated_completion = True
        # Note that in either case, the callbacks themselves are run from the
        # same thread. This just sets an Event, either from this thread (the
        # one calling set_finished) or the thread created below.
        if self.settle_time > 0:
            threading.Timer(self.settle_time, self._settled_event.set).start()
        else:
            self._settled_event.set()

    def _finished(self, success=True, **kwargs):
        """
        Inform the status object that it is done and if it succeeded.

        This method is deprecated. Please use :meth:`set_finished` or
        :meth:`set_exception`.

        .. warning::

           kwargs are not used, but are accepted because pyepics gives
           in a bunch of kwargs that we don't care about.  This allows
           the status object to be handed directly to pyepics (but
           this is probably a bad idea for other reason.

           This may be deprecated in the future.

        Parameters
        ----------
        success : bool, optional
           if the action succeeded.
        """
        if success:
            self.set_finished()
        else:
            # success=False does not give any information about *why* it
            # failed, so set a generic exception.
            exc = UnknownStatusFailure(
                f"The status {self!r} has failed. To obtain more specific, "
                "helpful errors in the future, update the Device to use "
                "set_exception(...) instead of _finished(success=False).")
            self.set_exception(exc)

    def exception(self, timeout=None):
        """
        Return the exception raised by the action.

        If the action has completed successfully, return ``None``. If it has
        finished in error, return the exception.

        Parameters
        ----------
        timeout: Union[Number, None], optional
            If None (default) wait indefinitely until the status finishes.

        Raises
        ------
        WaitTimeoutError
            If the status has not completed within ``timeout`` (starting from
            when this method was called, not from the beginning of the action).
        """
        if not self._event.wait(timeout=timeout):
            raise WaitTimeoutError("Status has not completed yet.")
        return self._exception

    def wait(self, timeout=None):
        """
        Block until the action completes.

        When the action has finished succesfully, return ``None``. If the
        action has failed, raise the exception.

        Parameters
        ----------
        timeout: Union[Number, None], optional
            If None (default) wait indefinitely until the status finishes.

        Raises
        ------
        WaitTimeoutError
            If the status has not completed within ``timeout`` (starting from
            when this method was called, not from the beginning of the action).
        StatusTimeoutError
            If the status has failed because the *timeout* that it was
            initialized with has expired.
        Exception
            This is ``status.exception()``, raised if the status has finished
            with an error.  This may include ``TimeoutError``, which
            indicates that the action itself raised ``TimeoutError``, distinct
            from ``WaitTimeoutError`` above.
        """
        if not self._event.wait(timeout=timeout):
            raise WaitTimeoutError("Status has not completed yet.")
        if self._exception is not None:
            raise self._exception

    @property
    def callbacks(self):
        """
        Callbacks to be run when the status is marked as finished
        """
        return self._callbacks

    @property
    def finished_cb(self):
        with self._lock:
            if len(self.callbacks) == 1:
                warn(
                    "The property `finished_cb` is deprecated, and must raise "
                    "an error if a status object has multiple callbacks. Use "
                    "the `callbacks` property instead.",
                    stacklevel=2)
                cb, = self.callbacks
                assert cb is not None
                return cb
            else:
                raise UseNewProperty(
                    "The deprecated `finished_cb` property "
                    "cannot be used for status objects that have "
                    "multiple callbacks. Use the `callbacks` "
                    "property instead.")

    def add_callback(self, callback):
        """
        Register a callback to be called once when the Status finishes.

        The callback will be called exactly once. If the Status is finished
        before a callback is added, it will be called immediately. This is
        threadsafe.

        The callback will be called regardless of success of failure. The
        callback has access to this status object, so it can distinguish success
        or failure by inspecting the object.

        Parameters
        ----------
        callback: callable
            Expected signature: ``callback(status)``.

            The signature ``callback()`` is also supported for
            backward-compatibility but will issue warnings. Support will be
            removed in a future release of ophyd.
        """
        # Handle func with signature callback() for back-compat.
        callback = adapt_old_callback_signature(callback)
        with self._lock:
            if self.done:
                # Call it once and do not hold a reference to it.
                callback(self)
            else:
                # Hold a strong reference to this. In other contexts we tend to
                # hold weak references to callbacks, but this is a single-shot
                # callback, so we will hold a strong reference until we call it,
                # and then clear this cache to drop the reference(s).
                self._callbacks.append(callback)

    @finished_cb.setter
    def finished_cb(self, cb):
        with self._lock:
            if not self.callbacks:
                warn(
                    "The setter `finished_cb` is deprecated, and must raise "
                    "an error if a status object already has one callback. Use "
                    "the `add_callback` method instead.",
                    stacklevel=2)
                self.add_callback(cb)
            else:
                raise UseNewProperty(
                    "The deprecated `finished_cb` setter cannot "
                    "be used for status objects that already "
                    "have one callback. Use the `add_callbacks` "
                    "method instead.")

    def __and__(self, other):
        """
        Returns a new 'composite' status object, AndStatus,
        with the same base API.

        It will finish when both `self` or `other` finish.
        """
        return AndStatus(self, other)
Esempio n. 11
0
class StatusBase:
    """
    This is a base class that provides a single-slot callback for when the
    specific operation has finished.

    Parameters
    ----------
    timeout : float, optional
        The default timeout to use for a blocking wait, and the amount of time
        to wait to mark the operation as failed
    settle_time : float, optional
        The amount of time to wait between the caller specifying that the
        status has completed to running callbacks
    """
    def __init__(self,
                 *,
                 timeout=None,
                 settle_time=None,
                 done=False,
                 success=False):
        super().__init__()
        self._tname = None
        self._lock = threading.RLock()
        self._callbacks = deque()
        self._done = done
        self.success = success
        self.timeout = None

        self.log = LoggerAdapter(logger=logger, extra={'status': self})

        if settle_time is None:
            settle_time = 0.0

        self.settle_time = float(settle_time)

        if timeout is not None:
            self.timeout = float(timeout)

        if self.done:
            # in the case of a pre-completed status object,
            # don't handle timeout
            return

        if self.timeout is not None and self.timeout > 0.0:
            thread = threading.Thread(target=self._wait_and_cleanup,
                                      daemon=True,
                                      name=self._tname)
            self._timeout_thread = thread
            self._timeout_thread.start()

    @property
    def done(self):
        """
        Boolean indicating whether associated operation has completed.

        This is set to True at __init__ time or by calling `_finished()`. Once
        True, it can never become False.
        """
        return self._done

    @done.setter
    def done(self, value):
        # For now, allow this setter to work only if it has no effect.
        # In a future release, make this property not settable.
        if bool(self._done) != bool(value):
            raise RuntimeError(
                "The done-ness of a status object cannot be changed by "
                "setting its `done` attribute directly. Call `_finished()`.")
        warn(
            "Do not set the `done` attribute of a status object directly. "
            "It should only be set indirectly by calling `_finished()`. "
            "Direct setting was never intended to be supported and it will be "
            "disallowed in a future release of ophyd, causing this code path "
            "to fail.", UserWarning)

    def _wait_and_cleanup(self):
        """Handle timeout"""
        try:
            if self.timeout is not None:
                timeout = self.timeout + self.settle_time
            else:
                timeout = None
            wait(self, timeout=timeout, poll_rate=0.2)
        except TimeoutError:
            with self._lock:
                if self.done:
                    # Avoid race condition with settling.
                    return
                self.log.warning('timeout after %.2f seconds', timeout)
                try:
                    self._handle_failure()
                finally:
                    self._finished(success=False)
        except RuntimeError:
            pass
        finally:
            self._timeout_thread = None

    def _handle_failure(self):
        pass

    def _settled(self):
        """Hook for when status has completed and settled"""
        pass

    def _settle_then_run_callbacks(self, success=True):
        # wait until the settling time is done to mark completion
        if self.settle_time > 0.0:
            time.sleep(self.settle_time)

        with self._lock:
            if self.done:
                # We timed out while waiting for the settle time.
                return
            self.success = success
            self._done = True
            self._settled()

            for cb in self._callbacks:
                cb()
            self._callbacks.clear()

    def _finished(self, success=True, **kwargs):
        """Inform the status object that it is done and if it succeeded

        .. warning::

           kwargs are not used, but are accepted because pyepics gives
           in a bunch of kwargs that we don't care about.  This allows
           the status object to be handed directly to pyepics (but
           this is probably a bad idea for other reason.

           This may be deprecated in the future.

        Parameters
        ----------
        success : bool, optional
           if the action succeeded.
        """
        if self.done:
            self.log.info('finished')
            return

        if success and self.settle_time > 0:
            # delay gratification until the settle time is up
            self._settle_thread = threading.Thread(
                target=self._settle_then_run_callbacks,
                daemon=True,
                kwargs=dict(success=success),
            )
            self._settle_thread.start()
        else:
            self._settle_then_run_callbacks(success=success)

    @property
    def callbacks(self):
        """
        Callbacks to be run when the status is marked as finished

        The callback has no arguments ::

            def cb() -> None:

        """
        return self._callbacks

    @property
    @_locked
    def finished_cb(self):
        if len(self.callbacks) == 1:
            warn(
                "The property `finished_cb` is deprecated, and must raise "
                "an error if a status object has multiple callbacks. Use "
                "the `callbacks` property instead.",
                stacklevel=2)
            cb, = self.callbacks
            assert cb is not None
            return cb
        else:
            raise UseNewProperty("The deprecated `finished_cb` property "
                                 "cannot be used for status objects that have "
                                 "multiple callbacks. Use the `callbacks` "
                                 "property instead.")

    @_locked
    def add_callback(self, cb):
        if self.done:
            cb()
        else:
            self._callbacks.append(cb)

    @finished_cb.setter
    @_locked
    def finished_cb(self, cb):
        if not self.callbacks:
            warn(
                "The setter `finished_cb` is deprecated, and must raise "
                "an error if a status object already has one callback. Use "
                "the `add_callback` method instead.",
                stacklevel=2)
            self.add_callback(cb)
        else:
            raise UseNewProperty("The deprecated `finished_cb` setter cannot "
                                 "be used for status objects that already "
                                 "have one callback. Use the `add_callbacks` "
                                 "method instead.")

    def __and__(self, other):
        """
        Returns a new 'composite' status object, AndStatus,
        with the same base API.

        It will finish when both `self` or `other` finish.
        """
        return AndStatus(self, other)