def test_open(tmpdir, fmt): disk = str(tmpdir.join("disk." + fmt)) qemu_img.create(disk, fmt, size=1024**2) offset = 64 * 1024 data = b"it works" with qemu_nbd.open(disk, fmt) as d: d.write(offset, data) d.flush() with qemu_nbd.open(disk, fmt, read_only=True) as d: assert d.read(offset, len(data)) == data
def test_full_backup_complete_chain(tmpdir, nbd_sock, checkpoint): depth = 3 chunk_size = 1024**2 disk_size = depth * chunk_size for i in range(depth): # Create disk based on previous one. disk = str(tmpdir.join("disk.{}".format(i))) if i == 0: qemu_img.create(disk, "qcow2", size=disk_size) else: qemu_img.create(disk, "qcow2", backing="disk.{}".format(i - 1)) # This data can be read only from this disk. with qemu_nbd.open(disk, "qcow2") as d: offset = i * chunk_size d.write(offset, b"%d\n" % offset) d.flush() # Start full backup and copy the data, veifying what we read. with backup.full_backup(tmpdir, disk, "qcow2", nbd_sock, checkpoint=checkpoint): verify_full_backup(nbd_sock, "sda") if checkpoint: bitmaps = list_bitmaps(disk) assert len(bitmaps) == 1 assert bitmaps[0]["name"] == checkpoint
def test_full_backup_single_image(tmpdir, user_file, fmt, nbd_sock): chunk_size = 1024**3 disk_size = 5 * chunk_size # Create disk create_image(user_file.path, fmt, disk_size) # Pupulate disk with data. with qemu_nbd.open(user_file.path, fmt) as d: for offset in range(0, disk_size, chunk_size): d.write(offset, b"%d\n" % offset) d.flush() checkpoint = "check1" if fmt == "qcow2" else None # Start full backup and copy the data, veifying what we read. with backup.full_backup(tmpdir, user_file.path, fmt, nbd_sock, checkpoint=checkpoint): verify_full_backup(nbd_sock, "sda") if checkpoint: bitmaps = list_bitmaps(user_file.path) assert len(bitmaps) == 1 assert bitmaps[0]["name"] == checkpoint
def copy_disk(nbd_url, backup_disk): log.info("Backing up data extents from %s to %s", nbd_url, backup_disk) backup_url = urlparse(nbd_url) with nbd.open(backup_url) as src_client, \ qemu_nbd.open(backup_disk, "qcow2") as dst_client: nbdutil.copy(src_client, dst_client)
def test_options(tmpdir, fmt, cache, aio, discard): size = 4 * 1024**2 chunk_size = 128 * 1024 src = str(tmpdir.join("src." + fmt)) qemu_img.create(src, fmt, size=size) with qemu_nbd.open(src, fmt) as c: for offset in range(0, size, chunk_size): c.write(offset, struct.pack(">Q", offset)) c.flush() dst = str(tmpdir.join("dst." + fmt)) qemu_img.create(dst, fmt, size=size) src_addr = nbd.UnixAddress(str(tmpdir.join("src.sock"))) dst_addr = nbd.UnixAddress(str(tmpdir.join("dst.sock"))) with qemu_nbd.run( src, fmt, src_addr, read_only=True, cache=cache, aio=aio, discard=discard), \ qemu_nbd.run( dst, fmt, dst_addr, cache=cache, aio=aio, discard=discard), \ nbd.Client(src_addr) as src_client, \ nbd.Client(dst_addr) as dst_client: nbdutil.copy(src_client, dst_client) qemu_img.compare(src, dst)
def test_compare_different(tmpdir, src_fmt, dst_fmt): size = 1024**2 src = str(tmpdir.join("src." + src_fmt)) dst = str(tmpdir.join("dst." + dst_fmt)) qemu_img.create(src, src_fmt, size=size) qemu_img.create(dst, dst_fmt, size=size) with qemu_nbd.open(dst, dst_fmt) as c: c.write(size // 2, b"x") c.flush() with pytest.raises(qemu_img.ContentMismatch): qemu_img.compare(src, dst)
def test_copy_nbd_to_nbd(tmpdir, src_fmt, dst_fmt, zero): # Make sure we have zero extents larger than MAX_ZERO_SIZE (1 GiB). It # would be nice to have also data extents larger than MAX_COPY_SIZE (128 # MiB), but this is too slow for automated tests. size = 2 * io.MAX_ZERO_SIZE # Default cluser size with qcow2 format. cluster_size = 64 * 1024 src = str(tmpdir.join("src." + src_fmt)) qemu_img.create(src, src_fmt, size=size) with qemu_nbd.open(src, src_fmt) as c: # Create first data extent. c.write(0, b"data extent 1\n") # Between the data extents we have a zero extent bigger than # io.MAX_ZERO_SIZE. # Create data extent larger than io.BUFFER_SIZE. data = b"data extent 2\n" + b"x" * io.BUFFER_SIZE c.write(io.MAX_ZERO_SIZE + 2 * cluster_size, data) # Between the data extents we have a zero extent smaller than # io.MAX_ZERO_SIZE. # Create last data extent at the end of the image. c.write(size - 4096, b"data extent 3\n") c.flush() src_sock = UnixAddress(tmpdir.join("src.sock")) src_url = urlparse(src_sock.url()) dst = str(tmpdir.join("dst." + dst_fmt)) qemu_img.create(dst, dst_fmt, size=size) dst_sock = UnixAddress(tmpdir.join("dst.sock")) dst_url = urlparse(dst_sock.url()) with qemu_nbd.run(src, src_fmt, src_sock, read_only=True), \ qemu_nbd.run(dst, dst_fmt, dst_sock), \ nbd.open(src_url, "r") as src_backend, \ nbd.open(dst_url, "r+") as dst_backend: # Because we copy to new image, we can alays use zero=False, but we # test both to verify that the result is the same. io.copy(src_backend, dst_backend, zero=zero) qemu_img.compare(src, dst)
def test_full_backup(tmpdir, fmt, transport): disk_size = 1024**2 disk_part = disk_size // 4 disk = str(tmpdir.join("disk." + fmt)) backup_disk = str(tmpdir.join("backup.raw")) # Create disk qemu_img.create(disk, fmt, size=disk_size) # Pupulate disk with data. with qemu_nbd.open(disk, fmt) as d: for i in range(0, disk_size, disk_part): data = b"%d\n" % i d.write(i, data.ljust(512)) d.flush() if transport == "unix": nbd_sock = nbd.UnixAddress(tmpdir.join("nbd.sock")) else: nbd_sock = nbd.TCPAddress("localhost", testutil.random_tcp_port()) # Backup using qemu-img convert. with backup.full_backup(tmpdir, disk, fmt, nbd_sock): log.debug("Backing up image with qemu-img") qemu_img.convert(nbd_sock.url("sda"), backup_disk, src_fmt="raw", dst_fmt="raw", progress=True) # Compare source and backup disks. with qemu_nbd.open(disk, fmt, read_only=True) as d, \ io.open(backup_disk, "rb") as b: for i in range(0, disk_size, disk_part): b.seek(i) assert d.read(i, 512) == b.read(512)
def test_shared(tmpdir, fmt): size = 1024**2 chunk_size = size // 2 src = str(tmpdir.join("src." + fmt)) qemu_img.create(src, fmt, size=size) with qemu_nbd.open(src, fmt) as c: c.write(0, b"a" * chunk_size) c.write(0, b"b" * chunk_size) c.flush() dst = str(tmpdir.join("dst." + fmt)) qemu_img.create(dst, fmt, size=size) src_addr = nbd.UnixAddress(str(tmpdir.join("src.sock"))) dst_addr = nbd.UnixAddress(str(tmpdir.join("dst.sock"))) # Start 2 qemu-nbd servers, each with 2 clients that can read and write in # parallel for higher throughput. with qemu_nbd.run(src, fmt, src_addr, read_only=True, shared=2), \ qemu_nbd.run(dst, fmt, dst_addr, shared=2), \ nbd.Client(src_addr) as src_client1, \ nbd.Client(src_addr) as src_client2, \ nbd.Client(dst_addr) as dst_client1, \ nbd.Client(dst_addr) as dst_client2: # Copy first half of the image with src_client1 and dst_client2 and # second half with src_client2 and dst_client2. In a real application # we would have more clients, running in multiple threads. chunk1 = src_client1.read(0, chunk_size) dst_client1.write(0, chunk1) chunk2 = src_client2.read(chunk_size, chunk_size) dst_client2.write(chunk_size, chunk2) dst_client1.flush() dst_client2.flush() qemu_img.compare(src, dst)
def copy_dirty(nbd_url, backup_disk): log.info("Backing up dirty extents from %s to %s", nbd_url, backup_disk) backup_url = urlparse(nbd_url) with nbd.open(backup_url, dirty=True) as src_client, \ qemu_nbd.open(backup_disk, "qcow2") as dst_client: buf = bytearray(4 * 1024**2) offset = 0 for ext in nbdutil.extents(src_client, dirty=True): if ext.dirty: todo = ext.length while todo: step = min(todo, len(buf)) view = memoryview(buf)[:step] src_client.readinto(offset, view) dst_client.write(offset, view) offset += step todo -= step else: offset += ext.length