Esempio n. 1
0
    def sync_disks(self, vm_id, disks_mappings, guestfs_helper, sync_index,
                   last_sync):
        MAX_PREAD_LEN = 23 << 20  # 23MB (24MB requests fail in VDDK)
        MAX_AIO_IN_FLIGHT = 2

        vm = self._find_vm_by_id(vm_id)
        snapshot = self.create_snapshot(vm_id,
                                        name="conversion-%s" %
                                        now.strftime('%Y%m%d-%H%M%S'))
        self._update_change_ids(vm_id, snapshot, disks_mappings)
        self._get_changed_extents(vm_id, snapshot, disks_mappings, sync_index,
                                  last_sync)

        for dm in disks_mappings:
            logging.debug("Opening locally attached disk %s" %
                          dm["destination"]["conversion_host_path"])
            fd = os.open(dm["destination"]["conversion_host_path"],
                         os.O_WRONLY | os.O_CREAT)

            if len(dm["source"]["extents"]) == 0:
                os.close(fd)
                continue

            logging.info("Connecting the source disk %s with NBD",
                         dm["source"]["id"])
            nbd_cmd = guestfs_helper.nbd_expose_disk_cmd(dm["source"],
                                                         None,
                                                         vm_moref=vm._moId,
                                                         sync_index=sync_index)
            logging.debug("NBD Command: %s", nbd_cmd)
            nbd_handle = nbd.NBD()
            nbd_handle.add_meta_context("base:allocation")
            nbd_handle.connect_command(nbd_cmd)

            logging.info("Getting block info for disk: %s" %
                         dm["source"]["id"])
            copied = 0
            position = 0
            data_blocks = []
            for extent in dm["source"]["extents"]:
                if extent.length < 1 << 20:
                    logging.debug(
                        "Skipping block status for extent of size %d B at offset %d B"
                        % (extent.length, extent.start))
                    data_blocks.append({
                        "offset": extent.start,
                        "length": extent.length,
                        "flags": 0
                    })
                    continue

                blocks = self._get_block_status(nbd_handle, extent)
                logging.debug("Gathered block status of %d: %s" %
                              (len(blocks), blocks))
                data_blocks += [
                    x for x in blocks if not x['flags'] & nbd.STATE_HOLE
                ]

            logging.debug("Block status filtered down to %d data blocks" %
                          len(data_blocks))
            if len(data_blocks) == 0:
                logging.debug("No extents have allocated data for disk: %s" %
                              dm["source"]["id"])
                os.close(fd)
                continue

            to_copy = sum([x['length'] for x in data_blocks])
            logging.debug("Copying %d B of data for disk %s" %
                          (to_copy, dm["source"]["id"]))

            self._state.disks[dm["source"]["id"]]["syncs"].append({
                "to_copy": to_copy,
                "copied": 0
            })
            self._state.write()

            for block in data_blocks:
                logging.debug("Block at offset %s flags: %s", block["offset"],
                              block["flags"])
                if block["flags"] & nbd.STATE_ZERO:
                    logging.debug("Writing %d B of zeros to offset %d B" %
                                  (block["length"], block["offset"]))
                    # Optimize for memory usage, maybe?
                    os.pwrite(fd, [0] * block["length"], block["offset"])
                else:
                    count = 0
                    while count < block["length"]:
                        length = min(block["length"] - count, MAX_PREAD_LEN)
                        offset = block["offset"] + count

                        logging.debug("Reading %d B from offset %d B" %
                                      (length, offset))
                        buf = nbd.Buffer(length)
                        nbd_handle.aio_pread(
                            buf,
                            offset,
                            lambda err, fd=fd, buf=buf, offset=offset: self.
                            _write_data(fd, buf, offset, err))
                        count += length

                        while nbd_handle.aio_in_flight() > MAX_AIO_IN_FLIGHT:
                            nbd_handle.poll(-1)
                        guestfs_helper.nbd_process_aio_requests(nbd_handle)

                        copied += length
                        self._state.disks[dm["source"]["id"]]["syncs"][
                            sync_index]["copied"] = copied
                        self._state.write()

            guestfs_helper.nbd_wait_for_aio_commands_to_finish(nbd_handle)

            if copied == 0:
                logging.debug("Nothing to copy for disk: %s" %
                              dm["source"]["id"])
            else:
                logging.debug("Copied %d B for disk: %s" %
                              (copied, dm["source"]["id"]))

            nbd_handle.shutdown()
            os.close(fd)

        self._remove_all_snapshots(vm_id)
Esempio n. 2
0
def asynch_copy(src, dst):
    size = src.get_size()

    # This is our reading position in the source.
    soff = 0

    writes = []

    # This callback is called when any pread from the source
    # has completed.
    def read_completed(buf, offset, error):
        global bytes_read
        bytes_read += buf.size()
        wr = (buf, offset)
        writes.append(wr)
        # By returning 1 here we auto-retire the pread command.
        return 1

    # This callback is called when any pwrite to the destination
    # has completed.
    def write_completed(buf, error):
        global bytes_written
        bytes_written += buf.size()
        # By returning 1 here we auto-retire the pwrite command.
        return 1

    # The main loop which runs until we have finished reading and
    # there are no more commands in flight.
    while soff < size or src.aio_in_flight () > 0 or dst.aio_in_flight() > 0 \
          or len(writes) > 0:
        # If we're able to submit more reads from the source
        # then do so now.
        if soff < size and src.aio_in_flight() < max_reads_in_flight:
            bufsize = min(bs, size - soff)
            buf = nbd.Buffer(bufsize)
            # NB: Python lambdas are BROKEN.
            # https://stackoverflow.com/questions/2295290
            src.aio_pread(
                buf,
                soff,
                lambda err, buf=buf, soff=soff: read_completed(buf, soff, err))
            soff += bufsize

        # If there are any write commands waiting to be issued
        # to the destination, send them now.
        for buf, offset in writes:
            # See above link about broken Python lambdas.
            dst.aio_pwrite(buf,
                           offset,
                           lambda err, buf=buf: write_completed(buf, err))
        writes = []

        poll = select.poll()

        sfd = src.aio_get_fd()
        dfd = dst.aio_get_fd()

        sevents = 0
        devents = 0
        if src.aio_get_direction() & nbd.AIO_DIRECTION_READ:
            sevents += select.POLLIN
        if src.aio_get_direction() & nbd.AIO_DIRECTION_WRITE:
            sevents += select.POLLOUT
        if dst.aio_get_direction() & nbd.AIO_DIRECTION_READ:
            devents += select.POLLIN
        if dst.aio_get_direction() & nbd.AIO_DIRECTION_WRITE:
            devents += select.POLLOUT
        poll.register(sfd, sevents)
        poll.register(dfd, devents)
        for (fd, revents) in poll.poll():
            # The direction of each handle can change since we
            # slept in the select.
            if fd == sfd and revents & select.POLLIN and \
                    src.aio_get_direction() & nbd.AIO_DIRECTION_READ:
                src.aio_notify_read()
            elif fd == sfd and revents & select.POLLOUT and \
                    src.aio_get_direction() & nbd.AIO_DIRECTION_WRITE:
                src.aio_notify_write()
            elif fd == dfd and revents & select.POLLIN and \
                    dst.aio_get_direction() & nbd.AIO_DIRECTION_READ:
                dst.aio_notify_read()
            elif fd == dfd and revents & select.POLLOUT and \
                    dst.aio_get_direction() & nbd.AIO_DIRECTION_WRITE:
                dst.aio_notify_write()
Esempio n. 3
0
buf[510] = 0x55
buf[511] = 0xAA

datafile = "510-pwrite.data"

with open(datafile, "wb") as f:
    f.truncate(512)

h = nbd.NBD()
h.connect_command(
    ["nbdkit", "-s", "--exit-with-parent", "-v", "file", datafile])

buf1 = nbd.Buffer.from_bytearray(buf)
cookie = h.aio_pwrite(buf1, 0, flags=nbd.CMD_FLAG_FUA)
while not h.aio_command_completed(cookie):
    h.poll(-1)

buf2 = nbd.Buffer(512)
cookie = h.aio_pread(buf2, 0)
while not h.aio_command_completed(cookie):
    h.poll(-1)

assert buf == buf2.to_bytearray()

with open(datafile, "rb") as f:
    content = f.read()

assert buf == content

os.unlink(datafile)
Esempio n. 4
0
    assert s == nbd.READ_DATA


def callback(user_data, err):
    print("in callback, user_data %d,%d" % user_data)
    if user_data[0] == 42:
        assert err.value == 0
    else:
        assert err.value == errno.EPROTO
    err.value = errno.ENOMEM
    if user_data[1] != 42:
        raise ValueError('unexpected user_data')


# First try: succeed in both callbacks
buf = nbd.Buffer(512)
cookie = h.aio_pread_structured(buf, 0, lambda *args: chunk(42, *args),
                                lambda *args: callback((42, 42), *args))
while not h.aio_command_completed(cookie):
    h.poll(-1)

buf = buf.to_bytearray()

print("%r" % buf)

assert buf == expected

# Second try: fail only during callback
buf = nbd.Buffer(512)
cookie = h.aio_pread_structured(buf, 0, lambda *args: chunk(42, *args),
                                lambda *args: callback((42, 43), *args))