def test_wait_sync():
    mock_worker1, mock_worker2 = Mock(), Mock()
    manager = SyncManager()
    manager.workers.append(mock_worker1)
    manager.workers.append(mock_worker2)
    manager.wait_sync()
    mock_worker1.join.assert_called_once()
    mock_worker2.join.assert_called_once()
Exemple #2
0
def _local_to_local(
    worker_id: int,
    src: str,
    dest: str,
    status: Status,
    manager: SyncManager,
    hooks: Hooks,
    dryrun: bool,
    monitoring_interval: Union[int, float],
    sync_interval: Union[int, float],
):
    hooks.run_before()

    startpos, maxblock = _get_range(worker_id, status)
    _log(worker_id, f"Start sync({src} -> {dest}) {maxblock} blocks")
    srcdev = io.open(src, "rb+")
    destdev = io.open(dest, "rb+")
    srcdev.seek(startpos)
    destdev.seek(startpos)

    t_last = timeit.default_timer()
    try:
        for src_block, dest_block, _ in zip(
                _get_blocks(srcdev, status.block_size),
                _get_blocks(destdev, status.block_size),
                range(maxblock),
        ):
            if manager.suspended:
                _log(worker_id, "Waiting for resume...")
                manager._wait_resuming()
            if manager.canceled:
                break

            if src_block != dest_block:
                if not dryrun:
                    destdev.seek(-len(src_block), io.SEEK_CUR)
                    srcdev.write(src_block)
                    destdev.flush()
                status.add_block("diff")
            else:
                status.add_block("same")

            t_cur = timeit.default_timer()
            if monitoring_interval <= t_cur - t_last:
                hooks.run_monitor(status)
                t_last = t_cur
            if 0 < sync_interval:
                time.sleep(sync_interval)
    except Exception as e:
        _log(worker_id, msg=str(e), exc_info=True)
        hooks.run_on_error(e, status)
    finally:
        srcdev.close()
        destdev.close()
    hooks.run_after(status)
def test_suspend_and_resume():
    manager = SyncManager()
    manager.suspend()
    assert manager.suspended

    manager.resume()
    assert not manager.suspended

    manager._suspend = Mock()
    manager._wait_resuming()
    manager._suspend.wait.assert_called_once()
Exemple #4
0
def _sync(
    manager: SyncManager,
    status: Status,
    workers: int,
    sync: Callable,
    sync_options: Dict[str, Any],
    wait: bool = False,
) -> Tuple[Optional[SyncManager], Status]:
    for i in range(1, workers + 1):
        sync_options["worker_id"] = i
        worker = threading.Thread(target=sync, kwargs=sync_options)
        worker.start()
        manager.workers.append(worker)
    if wait:
        manager.wait_sync()
        return None, status
    return manager, status
def test_finished():
    worker = Mock()
    worker.is_alive.return_value = False
    manager = SyncManager()
    manager.workers.append(worker)
    assert manager.finished

    worker.is_alive.return_value = True
    assert not manager.finished
Exemple #6
0
def local_to_local(
    src: str,
    dest: str,
    block_size: Union[str, int] = ByteSizes.MiB,
    workers: int = 1,
    create_dest: bool = False,
    wait: bool = False,
    dryrun: bool = False,
    on_before: Optional[Callable[..., Any]] = None,
    on_after: Optional[Callable[[Status], Any]] = None,
    monitor: Optional[Callable[[Status], Any]] = None,
    on_error: Optional[Callable[[Exception, Status], Any]] = None,
    monitoring_interval: Union[int, float] = 1,
    sync_interval: Union[int, float] = 0,
) -> Tuple[Optional[SyncManager], Status]:
    status = Status(
        workers=workers,
        block_size=_get_block_size(block_size),
        src_size=_get_size(src),
    )
    if create_dest:
        _do_create(dest, status.src_size)
    status.dest_size = _get_size(dest)
    manager = SyncManager()
    sync_options = {
        "src":
        src,
        "dest":
        dest,
        "status":
        status,
        "manager":
        manager,
        "hooks":
        Hooks(on_before=on_before,
              on_after=on_after,
              monitor=monitor,
              on_error=on_error),
        "dryrun":
        dryrun,
        "monitoring_interval":
        monitoring_interval,
        "sync_interval":
        sync_interval,
    }
    return _sync(manager, status, workers, _local_to_local, sync_options, wait)
Exemple #7
0
def _remote_to_local(
    worker_id: int,
    ssh: paramiko.SSHClient,
    src: str,
    dest: str,
    status: Status,
    manager: SyncManager,
    dryrun: bool,
    monitoring_interval: Union[int, float],
    sync_interval: Union[int, float],
    hash1: str,
    read_server_command: str,
    hooks: Hooks,
):
    hash_ = getattr(hashlib, hash1)
    hash_len = hash_().digest_size

    hooks.run_before()

    reader_stdin, *_ = ssh.exec_command(read_server_command)
    reader_stdout = reader_stdin.channel.makefile("rb")
    reader_stdin.write(f"{src}\n")
    reader_stdout.readline()
    startpos, maxblock = _get_range(worker_id, status)
    _log(worker_id, f"Start sync({src} -> {dest}) {maxblock} blocks")
    reader_stdin.write(
        f"{status.block_size}\n{hash1}\n{startpos}\n{maxblock}\n")

    t_last = timeit.default_timer()
    with open(dest, "rb+") as fileobj:
        fileobj.seek(startpos)
        try:
            for dest_block, _ in zip(_get_blocks(fileobj, status.block_size),
                                     range(maxblock)):
                if manager.suspended:
                    _log(worker_id, "Waiting for resume...")
                    manager._wait_resuming()
                if manager.canceled:
                    break

                src_block_hash: bytes = reader_stdout.read(hash_len)
                dest_block_hash: bytes = hash_(dest_block).digest()
                if src_block_hash != dest_block_hash:
                    if not dryrun:
                        reader_stdin.write(DIFF)
                        src_block = reader_stdout.read(status.block_size)
                        fileobj.seek(-len(src_block), io.SEEK_CUR)
                        fileobj.write(src_block)
                        fileobj.flush()
                    else:
                        reader_stdin.write(SKIP)
                    status.add_block("diff")
                else:
                    reader_stdin.write(SKIP)
                    status.add_block("same")

                t_cur = timeit.default_timer()
                if monitoring_interval <= t_cur - t_last:
                    hooks.run_monitor(status)
                    t_last = t_cur
                if 0 < sync_interval:
                    time.sleep(sync_interval)
        except Exception as e:
            _log(worker_id, msg=str(e), exc_info=True)
            hooks.run_on_error(e, status)
        finally:
            reader_stdin.close()
            reader_stdout.close()
        hooks.run_after(status)
Exemple #8
0
        workers=workers,
        block_size=_get_block_size(block_size),
        src_size=_get_size(src),
    )

    ssh = _connect_ssh(allow_load_system_host_keys, compress, **ssh_config)
    if sftp := ssh.open_sftp():
        if read_server_command is None:
            sftp.put(DEFAULT_READ_SERVER_SCRIPT_PATH, READ_SERVER_SCRIPT_NAME)
            read_server_command = f"python3 {READ_SERVER_SCRIPT_NAME}"
        if write_server_command is None:
            sftp.put(DEFAULT_WRITE_SERVER_SCRIPT_PATH,
                     WRITE_SERVER_SCRIPT_NAME)
            write_server_command = f"python3 {WRITE_SERVER_SCRIPT_NAME}"

    manager = SyncManager()
    sync_options = {
        "ssh":
        ssh,
        "src":
        src,
        "dest":
        dest,
        "status":
        status,
        "manager":
        manager,
        "create_dest":
        create_dest,
        "dryrun":
        dryrun,
def test_cancel_sync():
    manager = SyncManager()
    assert not manager.canceled

    manager.cancel_sync()
    assert manager.canceled