示例#1
0
def _local_to_local(
    worker_id: int,
    src: str,
    dest: str,
    status: Status,
    manager: SyncManager,
    hooks: Hooks,
    dryrun: bool,
    monitoring_interval: Union[int, float],
    sync_interval: Union[int, float],
):
    hooks.run_before()

    startpos, maxblock = _get_range(worker_id, status)
    _log(worker_id, f"Start sync({src} -> {dest}) {maxblock} blocks")
    srcdev = io.open(src, "rb+")
    destdev = io.open(dest, "rb+")
    srcdev.seek(startpos)
    destdev.seek(startpos)

    t_last = timeit.default_timer()
    try:
        for src_block, dest_block, _ in zip(
                _get_blocks(srcdev, status.block_size),
                _get_blocks(destdev, status.block_size),
                range(maxblock),
        ):
            if manager.suspended:
                _log(worker_id, "Waiting for resume...")
                manager._wait_resuming()
            if manager.canceled:
                break

            if src_block != dest_block:
                if not dryrun:
                    destdev.seek(-len(src_block), io.SEEK_CUR)
                    srcdev.write(src_block)
                    destdev.flush()
                status.add_block("diff")
            else:
                status.add_block("same")

            t_cur = timeit.default_timer()
            if monitoring_interval <= t_cur - t_last:
                hooks.run_monitor(status)
                t_last = t_cur
            if 0 < sync_interval:
                time.sleep(sync_interval)
    except Exception as e:
        _log(worker_id, msg=str(e), exc_info=True)
        hooks.run_on_error(e, status)
    finally:
        srcdev.close()
        destdev.close()
    hooks.run_after(status)
示例#2
0
def fake_status():
    return Status(
        workers=2,
        block_size=500,
        src_size=1_000,
        dest_size=1_000,
    )
示例#3
0
def local_to_local(
    src: str,
    dest: str,
    block_size: Union[str, int] = ByteSizes.MiB,
    workers: int = 1,
    create_dest: bool = False,
    wait: bool = False,
    dryrun: bool = False,
    on_before: Optional[Callable[..., Any]] = None,
    on_after: Optional[Callable[[Status], Any]] = None,
    monitor: Optional[Callable[[Status], Any]] = None,
    on_error: Optional[Callable[[Exception, Status], Any]] = None,
    monitoring_interval: Union[int, float] = 1,
    sync_interval: Union[int, float] = 0,
) -> Tuple[Optional[SyncManager], Status]:
    status = Status(
        workers=workers,
        block_size=_get_block_size(block_size),
        src_size=_get_size(src),
    )
    if create_dest:
        _do_create(dest, status.src_size)
    status.dest_size = _get_size(dest)
    manager = SyncManager()
    sync_options = {
        "src":
        src,
        "dest":
        dest,
        "status":
        status,
        "manager":
        manager,
        "hooks":
        Hooks(on_before=on_before,
              on_after=on_after,
              monitor=monitor,
              on_error=on_error),
        "dryrun":
        dryrun,
        "monitoring_interval":
        monitoring_interval,
        "sync_interval":
        sync_interval,
    }
    return _sync(manager, status, workers, _local_to_local, sync_options, wait)
示例#4
0
def local_to_remote(
    src: str,
    dest: str,
    block_size: Union[str, int] = ByteSizes.MiB,
    workers: int = 1,
    create_dest: bool = False,
    wait: bool = False,
    dryrun: bool = False,
    on_before: Optional[Callable[..., Any]] = None,
    on_after: Optional[Callable[[Status], Any]] = None,
    monitor: Optional[Callable[[Status], Any]] = None,
    on_error: Optional[Callable[[Exception, Status], Any]] = None,
    monitoring_interval: Union[int, float] = 1,
    sync_interval: Union[int, float] = 0,
    hash1: str = "sha256",
    read_server_command: Optional[str] = None,
    write_server_command: Optional[str] = None,
    allow_load_system_host_keys: bool = True,
    compress: bool = True,
    **ssh_config,
) -> Tuple[Optional[SyncManager], Status]:
    status: Status = Status(
        workers=workers,
        block_size=_get_block_size(block_size),
        src_size=_get_size(src),
    )

    ssh = _connect_ssh(allow_load_system_host_keys, compress, **ssh_config)
    if sftp := ssh.open_sftp():
        if read_server_command is None:
            sftp.put(DEFAULT_READ_SERVER_SCRIPT_PATH, READ_SERVER_SCRIPT_NAME)
            read_server_command = f"python3 {READ_SERVER_SCRIPT_NAME}"
        if write_server_command is None:
            sftp.put(DEFAULT_WRITE_SERVER_SCRIPT_PATH,
                     WRITE_SERVER_SCRIPT_NAME)
            write_server_command = f"python3 {WRITE_SERVER_SCRIPT_NAME}"
示例#5
0
def _remote_to_local(
    worker_id: int,
    ssh: paramiko.SSHClient,
    src: str,
    dest: str,
    status: Status,
    manager: SyncManager,
    dryrun: bool,
    monitoring_interval: Union[int, float],
    sync_interval: Union[int, float],
    hash1: str,
    read_server_command: str,
    hooks: Hooks,
):
    hash_ = getattr(hashlib, hash1)
    hash_len = hash_().digest_size

    hooks.run_before()

    reader_stdin, *_ = ssh.exec_command(read_server_command)
    reader_stdout = reader_stdin.channel.makefile("rb")
    reader_stdin.write(f"{src}\n")
    reader_stdout.readline()
    startpos, maxblock = _get_range(worker_id, status)
    _log(worker_id, f"Start sync({src} -> {dest}) {maxblock} blocks")
    reader_stdin.write(
        f"{status.block_size}\n{hash1}\n{startpos}\n{maxblock}\n")

    t_last = timeit.default_timer()
    with open(dest, "rb+") as fileobj:
        fileobj.seek(startpos)
        try:
            for dest_block, _ in zip(_get_blocks(fileobj, status.block_size),
                                     range(maxblock)):
                if manager.suspended:
                    _log(worker_id, "Waiting for resume...")
                    manager._wait_resuming()
                if manager.canceled:
                    break

                src_block_hash: bytes = reader_stdout.read(hash_len)
                dest_block_hash: bytes = hash_(dest_block).digest()
                if src_block_hash != dest_block_hash:
                    if not dryrun:
                        reader_stdin.write(DIFF)
                        src_block = reader_stdout.read(status.block_size)
                        fileobj.seek(-len(src_block), io.SEEK_CUR)
                        fileobj.write(src_block)
                        fileobj.flush()
                    else:
                        reader_stdin.write(SKIP)
                    status.add_block("diff")
                else:
                    reader_stdin.write(SKIP)
                    status.add_block("same")

                t_cur = timeit.default_timer()
                if monitoring_interval <= t_cur - t_last:
                    hooks.run_monitor(status)
                    t_last = t_cur
                if 0 < sync_interval:
                    time.sleep(sync_interval)
        except Exception as e:
            _log(worker_id, msg=str(e), exc_info=True)
            hooks.run_on_error(e, status)
        finally:
            reader_stdin.close()
            reader_stdout.close()
        hooks.run_after(status)
示例#6
0
    sync_interval: Union[int, float] = 0,
    hash1: str = "sha256",
    allow_load_system_host_keys: bool = True,
    compress: bool = True,
    read_server_command: Optional[str] = None,
    **ssh_config,
):
    ssh = _connect_ssh(allow_load_system_host_keys, compress, **ssh_config)
    if read_server_command is None and (sftp := ssh.open_sftp()):
        sftp.put(DEFAULT_READ_SERVER_SCRIPT_PATH, READ_SERVER_SCRIPT_NAME)
        read_server_command = f"python3 {READ_SERVER_SCRIPT_NAME}"

    status = Status(
        workers=workers,
        block_size=ByteSizes.parse_readable_byte_size(block_size)
        if isinstance(block_size, str) else block_size,
        src_size=_get_remotedev_size(ssh, read_server_command,
                                     src),  # type: ignore[arg-type]
    )
    if create_dest:
        _do_create(dest, status.src_size)
    status.dest_size = _get_size(dest)
    manager = SyncManager()
    sync_options = {
        "ssh":
        ssh,
        "src":
        src,
        "dest":
        dest,
        "status":