def _local_to_local( worker_id: int, src: str, dest: str, status: Status, manager: SyncManager, hooks: Hooks, dryrun: bool, monitoring_interval: Union[int, float], sync_interval: Union[int, float], ): hooks.run_before() startpos, maxblock = _get_range(worker_id, status) _log(worker_id, f"Start sync({src} -> {dest}) {maxblock} blocks") srcdev = io.open(src, "rb+") destdev = io.open(dest, "rb+") srcdev.seek(startpos) destdev.seek(startpos) t_last = timeit.default_timer() try: for src_block, dest_block, _ in zip( _get_blocks(srcdev, status.block_size), _get_blocks(destdev, status.block_size), range(maxblock), ): if manager.suspended: _log(worker_id, "Waiting for resume...") manager._wait_resuming() if manager.canceled: break if src_block != dest_block: if not dryrun: destdev.seek(-len(src_block), io.SEEK_CUR) srcdev.write(src_block) destdev.flush() status.add_block("diff") else: status.add_block("same") t_cur = timeit.default_timer() if monitoring_interval <= t_cur - t_last: hooks.run_monitor(status) t_last = t_cur if 0 < sync_interval: time.sleep(sync_interval) except Exception as e: _log(worker_id, msg=str(e), exc_info=True) hooks.run_on_error(e, status) finally: srcdev.close() destdev.close() hooks.run_after(status)
def test_suspend_and_resume(): manager = SyncManager() manager.suspend() assert manager.suspended manager.resume() assert not manager.suspended manager._suspend = Mock() manager._wait_resuming() manager._suspend.wait.assert_called_once()
def _remote_to_local( worker_id: int, ssh: paramiko.SSHClient, src: str, dest: str, status: Status, manager: SyncManager, dryrun: bool, monitoring_interval: Union[int, float], sync_interval: Union[int, float], hash1: str, read_server_command: str, hooks: Hooks, ): hash_ = getattr(hashlib, hash1) hash_len = hash_().digest_size hooks.run_before() reader_stdin, *_ = ssh.exec_command(read_server_command) reader_stdout = reader_stdin.channel.makefile("rb") reader_stdin.write(f"{src}\n") reader_stdout.readline() startpos, maxblock = _get_range(worker_id, status) _log(worker_id, f"Start sync({src} -> {dest}) {maxblock} blocks") reader_stdin.write( f"{status.block_size}\n{hash1}\n{startpos}\n{maxblock}\n") t_last = timeit.default_timer() with open(dest, "rb+") as fileobj: fileobj.seek(startpos) try: for dest_block, _ in zip(_get_blocks(fileobj, status.block_size), range(maxblock)): if manager.suspended: _log(worker_id, "Waiting for resume...") manager._wait_resuming() if manager.canceled: break src_block_hash: bytes = reader_stdout.read(hash_len) dest_block_hash: bytes = hash_(dest_block).digest() if src_block_hash != dest_block_hash: if not dryrun: reader_stdin.write(DIFF) src_block = reader_stdout.read(status.block_size) fileobj.seek(-len(src_block), io.SEEK_CUR) fileobj.write(src_block) fileobj.flush() else: reader_stdin.write(SKIP) status.add_block("diff") else: reader_stdin.write(SKIP) status.add_block("same") t_cur = timeit.default_timer() if monitoring_interval <= t_cur - t_last: hooks.run_monitor(status) t_last = t_cur if 0 < sync_interval: time.sleep(sync_interval) except Exception as e: _log(worker_id, msg=str(e), exc_info=True) hooks.run_on_error(e, status) finally: reader_stdin.close() reader_stdout.close() hooks.run_after(status)