示例#1
0
def move_with_path_safety_checks(full_source_path: T.Path,
                                 full_dest_path: T.Path) -> None:
    """
    Method that creates a hardlink at destination, with the latest
    mtime, and the hardlink from source, with checks that the source and
    destination are well-behaved

    @param full_source_path full path to the source file
    @param full_dest_path location full path to the destination

    """

    if not full_source_path.exists():
        raise exception.NoSourceFound(
            f"Source file {full_source_path} does not exist")
    if not full_dest_path.parent.exists():
        raise exception.NoParentForDestination(
            f"Source path exists {full_source_path} but destination parent {full_dest_path.parent} does not exist"
        )
    if full_dest_path.exists():
        raise exception.DestinationAlreadyExists(
            f"Destination {full_dest_path} already has an existing file")

    full_source_path.replace(full_dest_path)
    log.debug(f"{full_source_path} moved to {full_dest_path} ")
    file.touch(full_dest_path)
    log.info(f"File has been restored at {full_dest_path}")
示例#2
0
def drain(persistence: core.persistence.base.Persistence, *,
          force: bool = False) -> int:
    """ Drain phase """
    handler = _Handler(config.archive.handler)
    criteria = Filter(state=State.Staged(notified=True), stakeholder=Anything)

    try:
        with persistence.files(criteria) as staging_queue:
            # NOTE The returned files will be purged on exit of this
            # context manager. An exception MUST be raised to avoid that
            # (e.g., if we need to cancel the drain, or if the
            # downstream handler fails, etc.)
            if (count := len(staging_queue)) == 0:
                raise _StagingQueueEmpty()

            if count < config.archive.threshold and not force:
                raise _StagingQueueUnderThreshold(
                    f"Only {count} files to archive; use --force-drain to ignore the threshold")

            required_capacity = staging_queue.accumulator
            log.info(
                f"Checking downstream handler is ready for {human_size(required_capacity)}B...")
            handler.preflight(required_capacity)

            log.info("Handler is ready; beginning drain...")
            handler.consume(f.key for f in staging_queue)
            log.info(
                f"Successfully drained {count} files into the downstream handler")

    except _StagingQueueEmpty:
        log.info("Staging queue is empty")

    except _StagingQueueUnderThreshold as e:
        log.info(f"Skipping: {e}")

    except _HandlerBusy:
        log.warning("The downstream handler is busy; try again later...")

    except _DownstreamFull:
        log.error(
            "The downstream handler is reporting it is out of capacity and cannot proceed")
        return 1

    except _UnknownHandlerError:
        log.critical(
            "The downstream handler failed unexpectedly; please check its logs for details...")
        return 1

    return 0
示例#3
0
def untrack(files: T.Iterable[T.Path]) -> None:
    """ Untrack the given files """
    for f in files:
        if not file.is_regular(f):
            # Skip non-regular files
            log.warning(f"Cannot untrack {f}: Doesn't exist or is not regular")
            log.info(
                "Contact HGI if a file exists in the vault, but has been deleted outside"
            )
            continue

        vault = _create_vault(f)
        if (branch := vault.branch(f)) is not None:
            try:
                vault.remove(branch, f)

            except core.vault.exception.VaultCorruption as e:
                # Corruption detected
                log.critical(f"Corruption detected: {e}")
                log.info("Contact HGI to resolve this corruption")

            except core.vault.exception.PermissionDenied as e:
                # User doesn't have permission to remove files
                log.error(f"Permission denied: {e}")

            except core.idm.exception.NoSuchIdentity as e:
                # IdM doesn't know about the vault's group
                log.critical(f"Unknown vault group: {e}")
                log.info("Contact HGI to resolve this inconsistency")

            except core.vault.exception.PhysicalVaultFile:
                # This wouldn't make sense, so we just skip it sans log
                pass
示例#4
0
def view(branch: Branch,
         view_mode: ViewContext,
         absolute: bool,
         idm: IDMBase.IdentityManager = idm) -> None:
    """ List the contents of the given branch

    :param branch: Which Vault branch we're going to look at
    :param view_mode:
        ViewContext.All: list all files,
        ViewContext.Here: list files in current directory,
        ViewContext.Mine: files owned by current user
    :param absolute: - Whether to view absolute paths or not
    """
    cwd = file.cwd()
    vault = _create_vault(cwd, idm)
    count = 0
    for path, _limbo_file in vault.list(branch):
        relative_path = relativise(path, cwd)
        if view_mode == ViewContext.Here and "/" in str(relative_path):
            continue
        elif view_mode == ViewContext.Mine and path.stat().st_uid != os.getuid(
        ):
            continue

        if branch == Branch.Limbo:
            time_to_live = config.deletion.limbo - \
                (time.now() - time.epoch(_limbo_file.stat().st_mtime))
            print(relative_path if absolute else relative_path,
                  f"{round(time_to_live/time.delta(hours=1), 1)} hours",
                  sep="\t")
        else:
            print(path if absolute else relative_path)

        count += 1
    log.info(
        f"""{branch} branch of the vault in {vault.root} contains {count} files
        {'in the current directory' if view_mode == ViewContext.Here
        else 'owned by the current user' if view_mode == ViewContext.Mine
        else ''}""")
示例#5
0
    def consume(self, files: T.Iterator[T.Path]) -> None:
        """
        Drain the files, NULL-delimited, through the handler's stdin

        @param   files                 File queue
        @raises  _UnknownHandlerError  Handler did not accept the queue
        """
        log = self.log
        handler = subprocess.Popen(self._handler, stdin=PIPE,
                                   stdout=DEVNULL,
                                   stderr=DEVNULL)
        for file in files:
            if not is_regular(file):
                log.error(
                    "Skipping: {file} is not a regular file or does not exist")
                continue

            log.info(f"Draining: {file}")
            handler.stdin.write(bytes(file))
            handler.stdin.write(b"\0")

        handler.stdin.close()
        if handler.wait() != 0:
            raise _UnknownHandlerError()
示例#6
0
def add(branch: Branch, files: T.Iterable[T.Path]) -> None:
    """ Add the given files to the appropriate branch """
    for f in files:
        if not file.is_regular(f):
            # Skip non-regular files
            log.warning(f"Cannot add {f}: Doesn't exist or is not regular")
            continue

        try:
            vault = _create_vault(f)
            vault.add(branch, f)

        except core.vault.exception.VaultCorruption as e:
            # Corruption detected
            log.critical(f"Corruption detected: {e}")
            log.info("Contact HGI to resolve this corruption")

        except core.vault.exception.PermissionDenied as e:
            # User does have permission to add files
            log.error(f"Permission denied: {e}")

        except core.vault.exception.PhysicalVaultFile as e:
            # Trying to add a vault file to the vault
            log.error(f"Cannot add: {e}")
示例#7
0
def main(argv: T.List[str] = sys.argv) -> None:
    args = usage.parse_args(argv[1:])

    log.info("Enter Sandman")

    # Cheery thoughts
    if args.weaponise:
        log.warning("Weaponised: Now I am become Death, "
                    "the destroyer of worlds")
    else:
        log.info("Dry Run: The filesystem will not be affected "
                 "and the drain phase will not run")

    persistence = Persistence(config.persistence, idm)

    # Sweep Phase
    log.info("Starting the sweep phase")

    try:
        if args.stats is not None:
            log.info(f"Walking mpistat output from {args.stats}")
            log.warning("mpistat data may not be up to date")
            walker = mpistatWalker(args.stats, *args.vaults)

        else:
            log.info("Walking the filesystem directly")
            log.warning("This is an expensive operation")
            walker = FilesystemWalker(*args.vaults)

    except InvalidVaultBases as e:
        # Safety checks failed on input Vault paths
        log.critical(e)
        sys.exit(1)

    Sweeper(walker, persistence, args.weaponise)

    # Drain Phase
    if args.weaponise:
        log.info("Starting the drain phase")
        if (exit_code := drain(persistence, force=args.force_drain)) != 0:
            sys.exit(exit_code)
示例#8
0
    persistence = Persistence(config.persistence, idm)

    # Sweep Phase
    log.info("Starting the sweep phase")

    try:
        if args.stats is not None:
            log.info(f"Walking mpistat output from {args.stats}")
            log.warning("mpistat data may not be up to date")
            walker = mpistatWalker(args.stats, *args.vaults)

        else:
            log.info("Walking the filesystem directly")
            log.warning("This is an expensive operation")
            walker = FilesystemWalker(*args.vaults)

    except InvalidVaultBases as e:
        # Safety checks failed on input Vault paths
        log.critical(e)
        sys.exit(1)

    Sweeper(walker, persistence, args.weaponise)

    # Drain Phase
    if args.weaponise:
        log.info("Starting the drain phase")
        if (exit_code := drain(persistence, force=args.force_drain)) != 0:
            sys.exit(exit_code)

    log.info("Off to Never, Neverland")