def test_dirty_bitmap(tmpdir): size = 1024**2 # Create image with empty bitmap. img = str(tmpdir.join("img.qcow2")) qemu_img.create(img, "qcow2", size=size) qemu_img.bitmap_add(img, "b0") # Write data to image, modifying the bitmap. with qemu_nbd.open(img, "qcow2") as c: # This will allocate one cluster. By default bitmap granularity is also # one cluster, so this will make the first extent dirty. c.write(0, b"a") c.flush() # Read dirty extents. with qemu_nbd.open(img, "qcow2", read_only=True, bitmap="b0") as c: extents = c.extents(0, size)[nbd.QEMU_DIRTY_BITMAP + "b0"] bitmap = qemu_img.info(img)["format-specific"]["data"]["bitmaps"][0] assert extents == [ nbd.Extent(length=bitmap["granularity"], flags=nbd.EXTENT_DIRTY), nbd.Extent(length=size - bitmap["granularity"], flags=0), ]
def test_ova(tmpdir, fmt): size = 1024**2 offset = 64 * 1024 disks = [] # Created disks with unique content. for i in range(2): disk = str(tmpdir.join("disk{}.{}".format(i, fmt))) qemu_img.create(disk, fmt, size=size) with qemu_nbd.open(disk, fmt) as d: d.write(offset, disk.encode("utf-8")) d.flush() disks.append(disk) ova = str(tmpdir.join("vm.ova")) # Create a ova file. with tarfile.open(ova, "w") as tar: for disk in disks: tar.add(disk, arcname=os.path.basename(disk)) # Read disks contents from the ova file. with tarfile.open(ova) as tar: for disk in disks: member = tar.getmember(os.path.basename(disk)) with qemu_nbd.open( ova, fmt=fmt, read_only=True, offset=member.offset_data, size=member.size) as d: assert d.export_size == size data = disk.encode("utf-8") assert d.read(offset, len(data)) == data
def test_zero_extents_qcow2(tmpdir): size = 10 * 1024**2 # Create base image with one data and one zero cluster. base = str(tmpdir.join("base.qcow2")) qemu_img.create(base, "qcow2", size=size) with qemu_nbd.open(base, "qcow2") as c: c.write(0 * CLUSTER_SIZE, b"A" * CLUSTER_SIZE) c.zero(1 * CLUSTER_SIZE, CLUSTER_SIZE) c.flush() # Create top image with one data and one zero cluster. top = str(tmpdir.join("top.qcow2")) qemu_img.create( top, "qcow2", backing_file=base, backing_format="qcow2") with qemu_nbd.open(top, "qcow2") as c: c.write(3 * CLUSTER_SIZE, b"B" * CLUSTER_SIZE) c.zero(4 * CLUSTER_SIZE, CLUSTER_SIZE) c.flush() extents = list(client.extents(top)) assert extents == [ # Extents from base... ZeroExtent( start=0 * CLUSTER_SIZE, length=CLUSTER_SIZE, zero=False, hole=False), ZeroExtent( start=1 * CLUSTER_SIZE, length=CLUSTER_SIZE, zero=True, hole=False), ZeroExtent( start=2 * CLUSTER_SIZE, length=CLUSTER_SIZE, zero=True, hole=True), # Extents from top... ZeroExtent( start=3 * CLUSTER_SIZE, length=CLUSTER_SIZE, zero=False, hole=False), ZeroExtent( start=4 * CLUSTER_SIZE, length=CLUSTER_SIZE, zero=True, hole=False), # Rest of unallocated data... ZeroExtent( start=5 * CLUSTER_SIZE, length=size - 5 * CLUSTER_SIZE, zero=True, hole=True), ]
def test_download_shallow(srv, nbd_server, tmpdir, base_fmt): size = 10 * 1024**2 # Create source base image with some data in first clusters. src_base = str(tmpdir.join("src_base." + base_fmt)) qemu_img.create(src_base, base_fmt, size=size) with qemu_nbd.open(src_base, base_fmt) as c: c.write(0 * CLUSTER_SIZE, b"a" * CLUSTER_SIZE) c.write(1 * CLUSTER_SIZE, b"b" * CLUSTER_SIZE) c.write(2 * CLUSTER_SIZE, b"c" * CLUSTER_SIZE) c.flush() # Create source top image with some data in second cluster and zero in the # third cluster. src_top = str(tmpdir.join("src_top.qcow2")) qemu_img.create(src_top, "qcow2", backing_file=src_base, backing_format=base_fmt) with qemu_nbd.open(src_top, "qcow2") as c: c.write(1 * CLUSTER_SIZE, b"B" * CLUSTER_SIZE) c.zero(2 * CLUSTER_SIZE, CLUSTER_SIZE) c.flush() # Create empty backing file for downloding top image. dst_base = str(tmpdir.join("dst_base." + base_fmt)) qemu_img.create(dst_base, base_fmt, size=size) dst_top = str(tmpdir.join("dst_top.qcow2")) # Start nbd server exporting top image without the backing file. nbd_server.image = src_top nbd_server.fmt = "qcow2" nbd_server.backing_chain = False nbd_server.shared = 8 nbd_server.start() # Upload using nbd backend. url = prepare_transfer(srv, nbd_server.sock.url(), size=size) client.download(url, dst_top, srv.config.tls.ca_file, backing_file=dst_base, backing_format=base_fmt) # Stop the server to allow comparing. nbd_server.stop() # To compare we need to remove its backing files. qemu_img.unsafe_rebase(src_top, "") qemu_img.unsafe_rebase(dst_top, "") qemu_img.compare(src_top, dst_top, format1="qcow2", format2="qcow2", strict=True)
def test_open(tmpdir, fmt): disk = str(tmpdir.join("disk." + fmt)) qemu_img.create(disk, fmt, size=1024**2) offset = 64 * 1024 data = b"it works" with qemu_nbd.open(disk, fmt) as d: d.write(offset, data) d.flush() with qemu_nbd.open(disk, fmt, read_only=True) as d: assert d.read(offset, len(data)) == data
def test_dirty_extents(tmpdir): size = 1024**2 # Create base image with empty dirty bitmap. base = str(tmpdir.join("base.qcow2")) qemu_img.create(base, "qcow2", size=size) qemu_img.bitmap_add(base, "b0") # Write data, modifying the dirty bitmap. with qemu_nbd.open(base, "qcow2") as c: c.write(0 * CLUSTER_SIZE, b"A" * CLUSTER_SIZE) c.zero(1 * CLUSTER_SIZE, CLUSTER_SIZE) c.flush() # Create top image with empty dirty bitmap. top = str(tmpdir.join("top.qcow2")) qemu_img.create(top, "qcow2", backing_file=base, backing_format="qcow2") qemu_img.bitmap_add(top, "b0") # Write data, modifying the dirty bitmap. with qemu_nbd.open(top, "qcow2") as c: c.write(3 * CLUSTER_SIZE, b"B" * CLUSTER_SIZE) c.zero(4 * CLUSTER_SIZE, CLUSTER_SIZE) c.flush() dirty_extents = list(client.extents(base, bitmap="b0")) assert dirty_extents == [ DirtyExtent(start=0 * CLUSTER_SIZE, length=2 * CLUSTER_SIZE, dirty=True), DirtyExtent(start=2 * CLUSTER_SIZE, length=size - 2 * CLUSTER_SIZE, dirty=False), ] dirty_extents = list(client.extents(top, bitmap="b0")) # Note: qemu-nbd reports dirty extents only for the top image. assert dirty_extents == [ DirtyExtent(start=0 * CLUSTER_SIZE, length=3 * CLUSTER_SIZE, dirty=False), DirtyExtent(start=3 * CLUSTER_SIZE, length=2 * CLUSTER_SIZE, dirty=True), DirtyExtent(start=5 * CLUSTER_SIZE, length=size - 5 * CLUSTER_SIZE, dirty=False), ]
def copy_disk(nbd_url, backup_disk): log.info("Backing up data extents from %s to %s", nbd_url, backup_disk) backup_url = urlparse(nbd_url) with nbd.open(backup_url) as src_client, \ qemu_nbd.open(backup_disk, "qcow2") as dst_client: nbdutil.copy(src_client, dst_client)
def test_zero_extents_raw(tmpdir): size = 10 * 1024**2 # Create image with some data, zero and holes. image = str(tmpdir.join("image.raw")) qemu_img.create(image, "raw", size=size) with qemu_nbd.open(image, "raw") as c: c.write(0 * CLUSTER_SIZE, b"A" * CLUSTER_SIZE) c.zero(1 * CLUSTER_SIZE, CLUSTER_SIZE) c.write(2 * CLUSTER_SIZE, b"B" * CLUSTER_SIZE) c.flush() extents = list(client.extents(image)) # Note: raw files report unallocated as zero, not a a hole. assert extents == [ ZeroExtent(start=0 * CLUSTER_SIZE, length=CLUSTER_SIZE, zero=False, hole=False), ZeroExtent(start=1 * CLUSTER_SIZE, length=CLUSTER_SIZE, zero=True, hole=False), ZeroExtent(start=2 * CLUSTER_SIZE, length=CLUSTER_SIZE, zero=False, hole=False), ZeroExtent(start=3 * CLUSTER_SIZE, length=size - 3 * CLUSTER_SIZE, zero=True, hole=False), ]
def test_ova_compressed_qcow2(tmpdir): size = 1024**2 offset = 64 * 1024 data = b"I can eat glass and it doesn't hurt me." tmp = str(tmpdir.join("disk.raw")) with open(tmp, "wb") as f: f.truncate(size) f.seek(offset) f.write(data) disk = str(tmpdir.join("disk.qcow2")) qemu_img.convert(tmp, disk, "raw", "qcow2", compressed=True) ova = str(tmpdir.join("vm.ova")) # Create tar file with compressed qcow2 disk. with tarfile.open(ova, "w") as tar: tar.add(disk, arcname=os.path.basename(disk)) # Read disk contents from the tar file. with tarfile.open(ova) as tar: member = tar.getmember(os.path.basename(disk)) with qemu_nbd.open( ova, fmt="qcow2", read_only=True, offset=member.offset_data, size=member.size) as d: assert d.export_size == size assert d.read(offset, len(data)) == data
def test_zero_extents_from_ova(tmpdir): size = 10 * 1024**2 # Create image with data, zero and hole clusters. disk = str(tmpdir.join("disk.qcow2")) qemu_img.create(disk, "qcow2", size=size) with qemu_nbd.open(disk, "qcow2") as c: c.write(0 * CLUSTER_SIZE, b"A" * CLUSTER_SIZE) c.zero(1 * CLUSTER_SIZE, CLUSTER_SIZE) c.flush() # Create OVA whith this image. ova = str(tmpdir.join("vm.ova")) with tarfile.open(ova, "w") as tar: tar.add(disk, arcname=os.path.basename(disk)) extents = list(client.extents(ova, member="disk.qcow2")) assert extents == [ ZeroExtent(start=0 * CLUSTER_SIZE, length=CLUSTER_SIZE, zero=False, hole=False), ZeroExtent(start=1 * CLUSTER_SIZE, length=CLUSTER_SIZE, zero=True, hole=False), ZeroExtent(start=2 * CLUSTER_SIZE, length=size - 2 * CLUSTER_SIZE, zero=True, hole=True), ]
def test_full_backup_complete_chain(tmpdir, nbd_sock, checkpoint): depth = 3 chunk_size = 1024**2 disk_size = depth * chunk_size for i in range(depth): # Create disk based on previous one. disk = str(tmpdir.join("disk.{}".format(i))) if i == 0: qemu_img.create(disk, "qcow2", size=disk_size) else: qemu_img.create(disk, "qcow2", backing_file="disk.{}".format(i - 1), backing_format="qcow2") # This data can be read only from this disk. with qemu_nbd.open(disk, "qcow2") as d: offset = i * chunk_size d.write(offset, b"%d\n" % offset) d.flush() # Start full backup and copy the data, veifying what we read. with backup.full_backup(tmpdir, disk, "qcow2", nbd_sock, checkpoint=checkpoint): verify_full_backup(nbd_sock, "sda") if checkpoint: bitmaps = list_bitmaps(disk) assert len(bitmaps) == 1 assert bitmaps[0]["name"] == checkpoint
def test_download_qcow2_as_raw(tmpdir, srv): src = str(tmpdir.join("src.qcow2")) qemu_img.create(src, "qcow2", size=IMAGE_SIZE) # Allocate one cluster in the middle of the image. with qemu_nbd.open(src, "qcow2") as c: c.write(CLUSTER_SIZE, b"a" * CLUSTER_SIZE) c.flush() actual_size = os.path.getsize(src) url = prepare_transfer(srv, "file://" + src, size=actual_size) dst = str(tmpdir.join("dst.qcow2")) # When downloading qcow2 image using the nbd backend, we get raw data and # we can convert it to any format we want. Howver when downloading using # the file backend, we get qcow2 bytestream and we cannot convert it. # # To store the qcow2 bytestream, we must use fmt="raw". This instructs # qemu-nbd on the client side to treat the data as raw bytes, storing them # without any change on the local file. # # This is baisically like: # # qemu-img convert -f raw -O raw src.qcow2 dst.qcow2 # client.download(url, dst, srv.config.tls.ca_file, fmt="raw") # The result should be identical qcow2 image content. Allocation may # differ but for this test we get identical allocation. qemu_img.compare(src, dst, format1="qcow2", format2="qcow2", strict=True)
def test_options(tmpdir, fmt, options): size = 4 * 1024**2 chunk_size = 128 * 1024 src = str(tmpdir.join("src." + fmt)) qemu_img.create(src, fmt, size=size) with qemu_nbd.open(src, fmt) as c: for offset in range(0, size, chunk_size): c.write(offset, struct.pack(">Q", offset)) c.flush() dst = str(tmpdir.join("dst." + fmt)) qemu_img.create(dst, fmt, size=size) src_addr = nbd.UnixAddress(str(tmpdir.join("src.sock"))) dst_addr = nbd.UnixAddress(str(tmpdir.join("dst.sock"))) with qemu_nbd.run( src, fmt, src_addr, read_only=True, **options), \ qemu_nbd.run( dst, fmt, dst_addr, **options), \ nbd.Client(src_addr) as src_client, \ nbd.Client(dst_addr) as dst_client: nbdutil.copy(src_client, dst_client) qemu_img.compare(src, dst)
def test_backing_chain(tmpdir): size = 128 * 1024 base = str(tmpdir.join("base.raw")) top = str(tmpdir.join("top.qcow2")) base_data = b"data from base".ljust(32, b"\0") # Add base image with some data. qemu_img.create(base, "raw", size=size) with qemu_nbd.open(base, "raw") as c: c.write(0, base_data) c.flush() # Add empty overlay. qemu_img.create(top, "qcow2", backing_file=base, backing_format="raw") top_addr = nbd.UnixAddress(str(tmpdir.join("sock"))) # By default, we see data from base. with qemu_nbd.run(top, "qcow2", top_addr), \ nbd.Client(top_addr) as c: assert c.read(0, 32) == base_data # With backing chain disabled, we see data only from top. with qemu_nbd.run(top, "qcow2", top_addr, backing_chain=False), \ nbd.Client(top_addr) as c: assert c.read(0, 32) == b"\0" * 32
def test_full_backup_single_image(tmpdir, user_file, fmt, nbd_sock): chunk_size = 1024**3 disk_size = 5 * chunk_size # Create disk create_image(user_file.path, fmt, disk_size) # Pupulate disk with data. with qemu_nbd.open(user_file.path, fmt) as d: for offset in range(0, disk_size, chunk_size): d.write(offset, b"%d\n" % offset) d.flush() checkpoint = "check1" if fmt == "qcow2" else None # Start full backup and copy the data, veifying what we read. with backup.full_backup(tmpdir, user_file.path, fmt, nbd_sock, checkpoint=checkpoint): verify_full_backup(nbd_sock, "sda") if checkpoint: bitmaps = list_bitmaps(user_file.path) assert len(bitmaps) == 1 assert bitmaps[0]["name"] == checkpoint
def populate_image(path, fmt, extents): with qemu_nbd.open(path, fmt) as c: offset = 0 for kind, length in extents: if kind == "data": c.write(offset, b"x" * length) elif kind == "zero": c.zero(offset, length, punch_hole=False) elif kind == "hole": pass # Unallocated offset += length c.flush()
def test_upload_hole_at_start_sparse(tmpdir, srv, fmt): size = 3 * 1024**2 src = str(tmpdir.join("src")) qemu_img.create(src, fmt, size=size) with qemu_nbd.open(src, fmt) as c: c.write(size - 1024**2, b"b" * 1024**2) c.flush() log.debug("src extents: %s", list(nbdutil.extents(c))) dst = str(tmpdir.join("dst")) with open(dst, "wb") as f: f.write(b"a" * size) url = prepare_transfer(srv, "file://" + dst, size=size) client.upload(src, url, srv.config.tls.ca_file) with qemu_nbd.open(dst, "raw", read_only=True) as c: log.debug("dst extents: %s", list(nbdutil.extents(c))) qemu_img.compare(src, dst, format1=fmt, format2="raw", strict=fmt == "raw")
def test_compare_identical_content(tmpdir, src_fmt, dst_fmt): size = 1024**2 src = str(tmpdir.join("src." + src_fmt)) dst = str(tmpdir.join("dst." + dst_fmt)) qemu_img.create(src, src_fmt, size=size) qemu_img.create(dst, dst_fmt, size=size) # Destination image has different allocation. with qemu_nbd.open(dst, dst_fmt) as c: c.write(size // 2, b"\0") c.flush() qemu_img.compare(src, dst, format1=src_fmt, format2=dst_fmt)
def test_zero_allocation(user_file): # Create sparse image. with open(user_file.path, "w") as f: f.truncate(2 * 1024**2) with qemu_nbd.open(user_file.path, "raw") as c: # Zero with punch_hole=False allocates space. c.zero(0, c.export_size, punch_hole=False) c.flush() assert os.stat(user_file.path).st_blocks * 512 == c.export_size # Default zero deallocates space (depends on qemu-nbd default). c.zero(0, c.export_size) c.flush() assert os.stat(user_file.path).st_blocks == 0
def test_full_backup(tmpdir, fmt, transport): disk_size = 1024**2 disk_part = disk_size // 4 disk = str(tmpdir.join("disk." + fmt)) backup_disk = str(tmpdir.join("backup.raw")) # Create disk qemu_img.create(disk, fmt, size=disk_size) # Pupulate disk with data. with qemu_nbd.open(disk, fmt) as d: for i in range(0, disk_size, disk_part): data = b"%d\n" % i d.write(i, data.ljust(512)) d.flush() if transport == "unix": nbd_sock = nbd.UnixAddress(tmpdir.join("nbd.sock")) else: nbd_sock = nbd.TCPAddress("localhost", testutil.random_tcp_port()) # Backup using qemu-img convert. with backup.full_backup(tmpdir, disk, fmt, nbd_sock): log.debug("Backing up image with qemu-img") qemu_img.convert(nbd_sock.url("sda"), backup_disk, src_fmt="raw", dst_fmt="raw", progress=True) # Compare source and backup disks. with qemu_nbd.open(disk, fmt, read_only=True) as d, \ io.open(backup_disk, "rb") as b: for i in range(0, disk_size, disk_part): b.seek(i) assert d.read(i, 512) == b.read(512)
def test_compare_different_content(tmpdir, src_fmt, dst_fmt): size = 1024**2 src = str(tmpdir.join("src." + src_fmt)) dst = str(tmpdir.join("dst." + dst_fmt)) qemu_img.create(src, src_fmt, size=size) qemu_img.create(dst, dst_fmt, size=size) # Destination image has different content. with qemu_nbd.open(dst, dst_fmt) as c: c.write(size // 2, b"x") c.flush() with pytest.raises(qemu_img.ContentMismatch): qemu_img.compare(src, dst, format1=src_fmt, format2=dst_fmt)
def test_compare_different_allocation(tmpdir, src_fmt, dst_fmt): # Images has same content, but different allocation. size = 1024**2 src = str(tmpdir.join("src." + src_fmt)) dst = str(tmpdir.join("dst." + dst_fmt)) qemu_img.create(src, src_fmt, size=size) qemu_img.create(dst, dst_fmt, size=size) with qemu_nbd.open(dst, dst_fmt) as c: c.write(size // 2, b"\0") c.flush() with pytest.raises(qemu_img.ContentMismatch): qemu_img.compare( src, dst, format1=src_fmt, format2=dst_fmt, strict=True)
def test_upload_full_sparse(tmpdir, srv, fmt): src = str(tmpdir.join("src")) qemu_img.create(src, fmt, size=IMAGE_SIZE) with qemu_nbd.open(src, fmt) as c: c.write(0, b"b" * IMAGE_SIZE) c.flush() dst = str(tmpdir.join("dst")) with open(dst, "wb") as f: f.write(b"a" * IMAGE_SIZE) url = prepare_transfer(srv, "file://" + dst) client.upload(src, url, srv.config.tls.ca_file) qemu_img.compare(src, dst, strict=True)
def test_upload_hole_at_start_sparse(tmpdir, srv, fmt): src = str(tmpdir.join("src")) qemu_img.create(src, fmt, size=IMAGE_SIZE) with qemu_nbd.open(src, fmt) as c: c.write(IMAGE_SIZE - 6, b"middle") c.flush() dst = str(tmpdir.join("dst")) with open(dst, "wb") as f: f.write(b"a" * IMAGE_SIZE) url = prepare_transfer(srv, "file://" + dst) client.upload(src, url, srv.config.tls.ca_file) qemu_img.compare(src, dst, format1=fmt, format2="raw", strict=fmt == "raw")
def test_upload_hole_at_end_sparse(tmpdir, srv, fmt): size = 3 * 1024**2 src = str(tmpdir.join("src")) qemu_img.create(src, fmt, size=size) with qemu_nbd.open(src, fmt) as c: c.write(0, b"b" * 1024**2) c.flush() dst = str(tmpdir.join("dst")) with open(dst, "wb") as f: f.write(b"a" * size) url = prepare_transfer(srv, "file://" + dst, size=size) client.upload(src, url, srv.config.tls.ca_file) qemu_img.compare(src, dst, format1=fmt, format2="raw", strict=fmt == "raw")
def test_shared(tmpdir, fmt): size = 1024**2 chunk_size = size // 2 src = str(tmpdir.join("src." + fmt)) qemu_img.create(src, fmt, size=size) with qemu_nbd.open(src, fmt) as c: c.write(0, b"a" * chunk_size) c.write(0, b"b" * chunk_size) c.flush() dst = str(tmpdir.join("dst." + fmt)) qemu_img.create(dst, fmt, size=size) src_addr = nbd.UnixAddress(str(tmpdir.join("src.sock"))) dst_addr = nbd.UnixAddress(str(tmpdir.join("dst.sock"))) # Start 2 qemu-nbd servers, each with 2 clients that can read and write in # parallel for higher throughput. with qemu_nbd.run(src, fmt, src_addr, read_only=True, shared=2), \ qemu_nbd.run(dst, fmt, dst_addr, shared=2), \ nbd.Client(src_addr) as src_client1, \ nbd.Client(src_addr) as src_client2, \ nbd.Client(dst_addr) as dst_client1, \ nbd.Client(dst_addr) as dst_client2: # Copy first half of the image with src_client1 and dst_client2 and # second half with src_client2 and dst_client2. In a real application # we would have more clients, running in multiple threads. chunk1 = src_client1.read(0, chunk_size) dst_client1.write(0, chunk1) chunk2 = src_client2.read(chunk_size, chunk_size) dst_client2.write(chunk_size, chunk2) dst_client1.flush() dst_client2.flush() qemu_img.compare(src, dst)
def test_detect_zeroes_disabled(tmpdir, fmt, detect_zeroes): size = 1024**2 disk = str(tmpdir.join("disk." + fmt)) qemu_img.create(disk, fmt, size=size) with qemu_nbd.open(disk, fmt, detect_zeroes=detect_zeroes) as c: # These zeroes should not be detected. c.write(0, b"\0" * size) c.flush() extents = c.extents(0, size) assert extents["base:allocation"] == [ nbd.Extent(length=1048576, flags=0), ] if fmt != "raw": assert extents["qemu:allocation-depth"] == [ nbd.Extent(length=1048576, flags=0), ]
def copy_dirty(nbd_url, backup_disk): log.info("Backing up dirty extents from %s to %s", nbd_url, backup_disk) backup_url = urlparse(nbd_url) with nbd.open(backup_url, dirty=True) as src_client, \ qemu_nbd.open(backup_disk, "qcow2") as dst_client: buf = bytearray(4 * 1024**2) offset = 0 for ext in nbdutil.extents(src_client, dirty=True): if ext.dirty: todo = ext.length while todo: step = min(todo, len(buf)) view = memoryview(buf)[:step] src_client.readinto(offset, view) dst_client.write(offset, view) offset += step todo -= step else: offset += ext.length
def test_file_backend(srv, client, user_file, fmt): qemu_img.create(user_file.path, fmt, size="2m") with qemu_nbd.open(user_file.path, fmt) as c: # Add cluster with data. c.write(1 * 1024**2, b"some data") c.flush() # File backend operate on host data, not guest data. checksum = blkhash.checksum(user_file.path) # File backend uses actual size, not vitual size. size = os.path.getsize(user_file.path) ticket = testutil.create_ticket(url="file://" + user_file.path, size=size) srv.auth.add(ticket) res = client.request("GET", "/images/{}/checksum".format(ticket["uuid"])) data = res.read() assert res.status == 200 res = json.loads(data) assert res == checksum
def test_dirty_extents(tmpdir): size = 1024**2 # Create base image with empty dirty bitmap. base = str(tmpdir.join("base.qcow2")) qemu_img.create(base, "qcow2", size=size) qemu_img.bitmap_add(base, "b0") # Write data, modifying the dirty bitmap. with qemu_nbd.open(base, "qcow2") as c: c.write(0 * CLUSTER_SIZE, b"A" * CLUSTER_SIZE) c.zero(1 * CLUSTER_SIZE, CLUSTER_SIZE) c.flush() # Create top image with empty dirty bitmap. top = str(tmpdir.join("top.qcow2")) qemu_img.create(top, "qcow2", backing_file=base, backing_format="qcow2") qemu_img.bitmap_add(top, "b0") # Write data, modifying the dirty bitmap. with qemu_nbd.open(top, "qcow2") as c: c.write(3 * CLUSTER_SIZE, b"B" * CLUSTER_SIZE) c.zero(4 * CLUSTER_SIZE, CLUSTER_SIZE) c.flush() dirty_extents = list(client.extents(base, bitmap="b0")) expected = [ # First cluster is dirty data. DirtyExtent( start=0 * CLUSTER_SIZE, length=1 * CLUSTER_SIZE, dirty=True, zero=False), # Second cluster is dirty zero. DirtyExtent( start=1 * CLUSTER_SIZE, length=1 * CLUSTER_SIZE, dirty=True, zero=True), # Third cluster is clean zero. DirtyExtent( start=2 * CLUSTER_SIZE, length=size - 2 * CLUSTER_SIZE, dirty=False, zero=True), ] log.debug("base image dirty extents: %s", dirty_extents) assert dirty_extents == expected dirty_extents = list(client.extents(top, bitmap="b0")) # Note: qemu-nbd reports dirty extents only for the top image, but zero # extents are read from the base image. expected = [ # First cluster is clean data, read from base image. DirtyExtent( start=0 * CLUSTER_SIZE, length=1 * CLUSTER_SIZE, dirty=False, zero=False), # Second and third clusters are read from base image. Because they are # both clean zero, they are merged. DirtyExtent( start=1 * CLUSTER_SIZE, length=2 * CLUSTER_SIZE, dirty=False, zero=True), # Forth cluster is a data extent modified in top image. DirtyExtent( start=3 * CLUSTER_SIZE, length=1 * CLUSTER_SIZE, dirty=True, zero=False), # Fifth cluster is a zero extent modifed in to image. DirtyExtent( start=4 * CLUSTER_SIZE, length=1 * CLUSTER_SIZE, dirty=True, zero=True), # The rest is clean zero extent. DirtyExtent( start=5 * CLUSTER_SIZE, length=size - 5 * CLUSTER_SIZE, dirty=False, zero=True), ] log.debug("top image dirty extents: %s", dirty_extents) assert dirty_extents == expected