Esempio n. 1
0
    def migrate(self, destHostIp):
        destUrl = "qemu+tcp://{0}/system".format(destHostIp)
        tcpUri = "tcp://{0}".format(destHostIp)
        try:
            self.domain.migrateToURI2(
                destUrl, tcpUri, None,
                libvirt.VIR_MIGRATE_LIVE | libvirt.VIR_MIGRATE_PEER2PEER
                | libvirt.VIR_MIGRATE_UNDEFINE_SOURCE
                | libvirt.VIR_MIGRATE_PERSIST_DEST
                | libvirt.VIR_MIGRATE_TUNNELLED, None, 0)
        except libvirt.libvirtError as ex:
            logger.warn(linux.get_exception_stacktrace())
            raise kvmagent.KvmError('unable to migrate vm[uuid:%s] to %s, %s' %
                                    (self.uuid, destUrl, str(ex)))

        try:
            if not linux.wait_callback_success(self.wait_for_state_change,
                                               callback_data=None,
                                               timeout=300):
                raise kvmagent.KvmError('timeout after 300 seconds')
        except kvmagent.KvmError as ke:
            raise ke
        except Exception as e:
            logger.debug(linux.get_exception_stacktrace())

        logger.debug(
            'successfully migrated vm[uuid:{0}] to dest url[{1}]'.format(
                self.uuid, destUrl))
Esempio n. 2
0
    def _parse_image_reference(self, backupStorageInstallPath):
        if not backupStorageInstallPath.startswith(self.ZSTORE_PROTOSTR):
            raise kvmagent.KvmError('unexpected backup storage install path %s' % backupStorageInstallPath)

        xs = backupStorageInstallPath[len(self.ZSTORE_PROTOSTR):].split('/')
        if len(xs) != 2:
            raise kvmagent.KvmError('unexpected backup storage install path %s' % backupStorageInstallPath)

        return xs[0], xs[1]
Esempio n. 3
0
    def stop(self, graceful=True, timeout=5, undefine=True):
        def cleanup_addons():
            for chan in self.domain_xmlobject.devices.get_child_node_as_list(
                    'channel'):
                if chan.type_ == 'unix':
                    path = chan.source.path_
                    shell.call('rm -f %s' % path)

        def loop_shutdown(_):
            try:
                self.domain.shutdown()
            except:
                #domain has been shutdown
                pass

            return self.wait_for_state_change(self.VM_STATE_SHUTDOWN)

        def loop_undefine(_):
            if not undefine:
                return True

            if not self.is_alive():
                return True

            try:
                self.domain.undefineFlags(
                    libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE
                    | libvirt.VIR_DOMAIN_UNDEFINE_SNAPSHOTS_METADATA)
            except libvirt.libvirtError:
                self.domain.undefine()

            return self.wait_for_state_change(None)

        def loop_destroy(_):
            try:
                self.domain.destroy()
            except:
                #domain has been destroyed
                pass

            return self.wait_for_state_change(self.VM_STATE_SHUTDOWN)

        do_destroy = True
        if graceful:
            if linux.wait_callback_success(loop_shutdown, None, timeout=60):
                do_destroy = False

        if do_destroy:
            if not linux.wait_callback_success(loop_destroy, None, timeout=60):
                raise kvmagent.KvmError(
                    'failed to destroy vm, timeout after 60 secs')

        cleanup_addons()
        if not linux.wait_callback_success(loop_undefine, None, timeout=60):
            raise kvmagent.KvmError(
                'failed to undefine vm, timeout after 60 secs')
Esempio n. 4
0
    def connect(self, req):
        none_shared_mount_fs_type = [
            'xfs', 'ext2', 'ext3', 'ext4', 'vfat', 'tmpfs', 'btrfs'
        ]
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        if not linux.timeout_isdir(cmd.mountPoint):
            raise kvmagent.KvmError(
                '%s is not a directory, the mount point seems not setup' %
                cmd.mountPoint)

        folder_fs_type = shell.call("df -T %s|tail -1|awk '{print $2}'" %
                                    cmd.mountPoint).strip()
        if folder_fs_type in none_shared_mount_fs_type:
            raise kvmagent.KvmError(
                '%s filesystem is %s, which is not a shared mount point type.'
                % (cmd.mountPoint, folder_fs_type))

        id_dir = os.path.join(cmd.mountPoint, "zstack_smp_id_file")
        shell.call("mkdir -p %s" % id_dir)
        lock_file = os.path.join(id_dir, "uuid.lock")

        @lock.file_lock(lock_file, locker=lock.Flock())
        def check_other_smp_and_set_id_file(uuid, existUuids):
            o = shell.ShellCmd('''\
            ls %s | grep -v %s | grep -o "[0-9a-f]\{8\}[0-9a-f]\{4\}[1-5][0-9a-f]\{3\}[89ab][0-9a-f]\{3\}[0-9a-f]\{12\}"\
            ''' % (id_dir, uuid))
            o(False)
            if o.return_code != 0:
                file_uuids = []
            else:
                file_uuids = o.stdout.splitlines()

            for file_uuid in file_uuids:
                if file_uuid in existUuids:
                    raise Exception(
                        "the mount point [%s] has been occupied by other SMP[uuid:%s], Please attach this directly"
                        % (cmd.mountPoint, file_uuid))

            logger.debug("existing id files: %s" % file_uuids)
            self.id_files[uuid] = os.path.join(id_dir, uuid)

            if not os.path.exists(self.id_files[uuid]):
                # check if hosts in the same cluster mount the same path but different storages.
                rsp.isFirst = True
                for file_uuid in file_uuids:
                    linux.rm_file_force(os.path.join(id_dir, file_uuid))
                linux.touch_file(self.id_files[uuid])
                linux.sync()

        rsp = ConnectRsp()
        check_other_smp_and_set_id_file(cmd.uuid, cmd.existUuids)

        rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(
            cmd.mountPoint)
        return jsonobject.dumps(rsp)
Esempio n. 5
0
    def connect(self, req):
        none_shared_mount_fs_type = ['xfs', 'ext2', 'ext3', 'ext4', 'vfat', 'tmpfs', 'btrfs']
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        self.mount_point = cmd.mountPoint
        if not os.path.isdir(self.mount_point):
            raise kvmagent.KvmError('%s is not a directory, the mount point seems not setup' % self.mount_point)

        folder_fs_type = shell.call("df -T %s|tail -1|awk '{print $2}'" % self.mount_point).strip()
        if folder_fs_type in none_shared_mount_fs_type:
            raise kvmagent.KvmError('%s filesystem is %s, which is not a shared mount point type.' % (self.mount_point, folder_fs_type))

        rsp = AgentRsp()
        rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity()
        return jsonobject.dumps(rsp)
Esempio n. 6
0
    def _wait_for_block_job(self,
                            disk_path,
                            abort_on_error=False,
                            wait_for_job_clean=False):
        """Wait for libvirt block job to complete.

        Libvirt may return either cur==end or an empty dict when
        the job is complete, depending on whether the job has been
        cleaned up by libvirt yet, or not.

        :returns: True if still in progress
                  False if completed
        """

        status = self.domain.blockJobInfo(disk_path, 0)
        if status == -1 and abort_on_error:
            raise kvmagent.KvmError(
                'libvirt error while requesting blockjob info.')

        try:
            cur = status.get('cur', 0)
            end = status.get('end', 0)
        except Exception as e:
            logger.warn(linux.get_exception_stacktrace())
            return False

        if wait_for_job_clean:
            job_ended = not status
        else:
            job_ended = cur == end

        return not job_ended
Esempio n. 7
0
        def make_volumes():
            devices = elements['devices']
            volumes = [cmd.rootVolume]
            volumes.extend(cmd.dataVolumes)
            for v in volumes:
                if v.deviceId >= len(Vm.DEVICE_LETTERS):
                    err = "%s exceeds max disk limit, it's %s but only 26 allowed" % v.deviceId
                    logger.warn(err)
                    raise kvmagent.KvmError(err)

                dev_letter = Vm.DEVICE_LETTERS[v.deviceId]
                disk = e(devices, 'disk', None, {
                    'type': 'file',
                    'device': 'disk',
                    'snapshot': 'external'
                })
                e(disk, 'driver', None, {
                    'name': 'qemu',
                    'type': 'qcow2',
                    'cache': 'none'
                })
                e(disk, 'source', None, {'file': v.installPath})
                if use_virtio:
                    e(disk, 'target', None, {
                        'dev': 'vd%s' % dev_letter,
                        'bus': 'virtio'
                    })
                else:
                    e(disk, 'target', None, {
                        'dev': 'hd%s' % dev_letter,
                        'bus': 'ide'
                    })
Esempio n. 8
0
        def upload():
            if not os.path.exists(cmd.primaryStorageInstallPath):
                raise kvmagent.KvmError('cannot find %s' %
                                        cmd.primaryStorageInstallPath)

            linux.scp_upload(cmd.hostname, cmd.sshKey,
                             cmd.primaryStorageInstallPath,
                             cmd.backupStorageInstallPath)
Esempio n. 9
0
 def reboot(self, timeout=60):
     self.stop(timeout=20, undefine=False)
     try:
         self.domain.createWithFlags(0)
     except libvirt.libvirtError as e:
         logger.warn(linux.get_exception_stacktrace())
         raise kvmagent.KvmError('unable to start vm[uuid:%s], %s' %
                                 (self.uuid, str(e)))
Esempio n. 10
0
 def _get_image_reference(self, primaryStorageInstallPath):
     try:
         with open(self._get_image_json_file(primaryStorageInstallPath)) as f:
             imf = jsonobject.loads(f.read())
             return imf.name, imf.id
     except IOError, e:
         errmsg = '_get_image_reference {0} failed: {1}'.format(primaryStorageInstallPath, e)
         raise kvmagent.KvmError(errmsg)
Esempio n. 11
0
    def _get_qemu_version(self):
        ret = shell.call('%s -version' % kvmagent.get_qemu_path())
        words = ret.split()
        for w in words:
            if w == 'version':
                return words[words.index(w)+1].strip()

        raise kvmagent.KvmError('cannot get qemu version[%s]' % ret)
Esempio n. 12
0
    def _get_qemu_version(self):
        # to be compatible with both `2.6.0` and `2.9.0(qemu-kvm-ev-2.9.0-16.el7_4.8.1)`
        ret = shell.call('%s -version' % kvmagent.get_qemu_path())
        words = ret.split()
        for w in words:
            if w == 'version':
                return words[words.index(w) + 1].strip().split('(')[0]

        raise kvmagent.KvmError('cannot get qemu version[%s]' % ret)
Esempio n. 13
0
        def rebase_all_to_active_file():
            self.domain.blockRebase(disk_name, None, 0, 0)

            def wait_job(_):
                return not self._wait_for_block_job(disk_name,
                                                    abort_on_error=True)

            if not linux.wait_callback_success(wait_job, timeout=300):
                raise kvmagent.KvmError('live full snapshot merge failed')
Esempio n. 14
0
    def connect(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        self.mount_point = cmd.mountPoint
        if not os.path.isdir(self.mount_point):
            raise kvmagent.KvmError(
                '%s is not a directory, the mount point seems not setup')

        rsp = AgentRsp()
        rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity()
        return jsonobject.dumps(rsp)
Esempio n. 15
0
    def merge_snapshot(self, cmd):
        target_disk, disk_name = self._get_target_disk(cmd.deviceId)

        def rebase_all_to_active_file():
            self.domain.blockRebase(disk_name, None, 0, 0)

            def wait_job(_):
                return not self._wait_for_block_job(disk_name,
                                                    abort_on_error=True)

            if not linux.wait_callback_success(wait_job, timeout=300):
                raise kvmagent.KvmError('live full snapshot merge failed')

        def has_blockcommit_relative_version():
            try:
                ver = libvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE
                return True
            except:
                return False

        def do_commit(base, top, flags=0):
            self.domain.blockCommit(disk_name, base, top, 0, flags)

            logger.debug('start block commit %s --> %s' % (top, base))

            def wait_job(_):
                logger.debug(
                    'merging snapshot chain is waiting for blockCommit job completion'
                )
                return not self._wait_for_block_job(disk_name,
                                                    abort_on_error=True)

            if not linux.wait_callback_success(wait_job, timeout=300):
                raise kvmagent.KvmError(
                    'live merging snapshot chain failed, timeout after 300s')

            logger.debug('end block commit %s --> %s' % (top, base))

        def commit_to_intermediate_file():
            # libvirt blockCommit is from @top to @base; however, parameters @srcPath and @destPath in cmd indicate
            # direction @base to @top. We reverse the direction here for using blockCommit
            do_commit(cmd.srcPath, cmd.destPath,
                      libvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE)

        if cmd.fullRebase:
            rebase_all_to_active_file()
        else:
            if has_blockcommit_relative_version():
                commit_to_intermediate_file()
            else:
                raise kvmagent.KvmError(
                    'libvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE is not detected, cannot do live block commit'
                )
Esempio n. 16
0
    def _start_vm(self, cmd):
        try:
            vm = get_vm_by_uuid(cmd.vmInstanceUuid)
        except kvmagent.KvmError:
            vm = None

        try:
            if vm:
                if vm.state == Vm.VM_STATE_RUNNING:
                    raise kvmagent.KvmError(
                        'vm[uuid:%s, name:%s] is already running' %
                        (cmd.vmInstanceUuid, vm.get_name()))
                else:
                    vm.destroy()

            vm = Vm.from_StartVmCmd(cmd)
            vm.start(cmd.timeout)
        except libvirt.libvirtError as e:
            logger.warn(linux.get_exception_stacktrace())
            raise kvmagent.KvmError(
                'unable to start vm[uuid:%s, name:%s], libvirt error: %s' %
                (cmd.vmInstanceUuid, cmd.vmName, str(e)))
Esempio n. 17
0
 def start(self, timeout=60):
     #TODO: 1. enbale hair_pin mode
     logger.debug('creating vm:\n%s' % self.domain_xml)
     conn = kvmagent.get_libvirt_connection()
     domain = conn.defineXML(self.domain_xml)
     self.domain = domain
     self.domain.createWithFlags(0)
     if not linux.wait_callback_success(self.wait_for_state_change,
                                        self.VM_STATE_RUNNING,
                                        timeout=timeout):
         raise kvmagent.KvmError(
             'unable to start vm[uuid:%s, name:%s], vm state is not changing to running after %s seconds'
             % (self.uuid, self.get_name(), timeout))
Esempio n. 18
0
    def _get_target_disk(self, device_id):
        target_disk = None
        disk_name = 'vd%s' % self.DEVICE_LETTERS[device_id]
        for disk in self.domain_xmlobject.devices.get_child_node_as_list(
                'disk'):
            if disk.target.dev_ == disk_name:
                target_disk = disk
                break

        if not target_disk:
            raise kvmagent.KvmError(
                'unable to find volume[%s] on vm[uuid:%s]' %
                (disk_name, self.uuid))

        return target_disk, disk_name
Esempio n. 19
0
        def take_full_snapshot():
            logger.debug('start rebasing to make a full snapshot')
            self.domain.blockRebase(disk_name, None, 0, 0)

            logger.debug('rebasing full snapshot is in processing')

            def wait_job(_):
                logger.debug(
                    'full snapshot is waiting for blockRebase job completion')
                return not self._wait_for_block_job(disk_name,
                                                    abort_on_error=True)

            if not linux.wait_callback_success(wait_job, timeout=300):
                raise kvmagent.KvmError('live full snapshot failed')

            return take_delta_snapshot()
Esempio n. 20
0
def get_vm_by_uuid(uuid, exception_if_not_existing=True):
    try:
        domain = kvmagent.get_libvirt_connection().lookupByName(uuid)
        vm = Vm.from_virt_domain(domain)
        return vm
    except libvirt.libvirtError as e:
        error_code = e.get_error_code()
        if error_code == libvirt.VIR_ERR_NO_DOMAIN:
            if exception_if_not_existing:
                raise kvmagent.KvmError('unable to find vm[uuid:%s]' % uuid)
            else:
                return None

        err = 'error happened when looking up vm[uuid:%(uuid)s], libvirt error code: %(error_code)s, %(e)s' % locals(
        )
        raise libvirt.libvirtError(err)
Esempio n. 21
0
    def detach_data_volume(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = DetachDataVolumeResponse()
        try:
            volume = cmd.volume
            vm = get_vm_by_uuid(cmd.vmInstanceUuid)
            if vm.state != Vm.VM_STATE_RUNNING:
                raise kvmagent.KvmError(
                    'unable to detach volume[%s] to vm[uuid:%s], vm must be running'
                    % (volume.installPath, vm.uuid))
            vm.detach_data_volume(volume)
        except kvmagent.KvmError as e:
            logger.warn(linux.get_exception_stacktrace())
            rsp.error = str(e)
            rsp.success = False

        return jsonobject.dumps(rsp)
Esempio n. 22
0
        def do_commit(base, top, flags=0):
            self.domain.blockCommit(disk_name, base, top, 0, flags)

            logger.debug('start block commit %s --> %s' % (top, base))

            def wait_job(_):
                logger.debug(
                    'merging snapshot chain is waiting for blockCommit job completion'
                )
                return not self._wait_for_block_job(disk_name,
                                                    abort_on_error=True)

            if not linux.wait_callback_success(wait_job, timeout=300):
                raise kvmagent.KvmError(
                    'live merging snapshot chain failed, timeout after 300s')

            logger.debug('end block commit %s --> %s' % (top, base))
Esempio n. 23
0
        def take_delta_snapshot():
            snapshot = etree.Element('domainsnapshot')
            disks = e(snapshot, 'disks')
            d = e(disks,
                  'disk',
                  None,
                  attrib={
                      'name': disk_name,
                      'snapshot': 'external',
                      'type': 'file'
                  })
            e(d, 'source', None, attrib={'file': install_path})
            e(d, 'driver', None, attrib={'type': 'qcow2'})

            xml = etree.tostring(snapshot)
            logger.debug(
                'creating snapshot for vm[uuid:{0}] volume[id:{1}]:\n{2}'.
                format(self.uuid, device_id, xml))
            snap_flags = libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY | libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA
            QUIESCE = libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE

            try:
                self.domain.snapshotCreateXML(xml, snap_flags | QUIESCE)
                return previous_install_path, install_path
            except libvirt.libvirtError:
                logger.debug(
                    'unable to create quiesced VM snapshot, attempting again with quiescing disabled'
                )

            try:
                self.domain.snapshotCreateXML(xml, snap_flags)
                return previous_install_path, install_path
            except libvirt.libvirtError as ex:
                logger.warn(linux.get_exception_stacktrace())
                raise kvmagent.KvmError(
                    'unable to take snapshot of vm[uuid:{0}] volume[id:{1}], {2}'
                    .format(self.uuid, device_id, str(ex)))
Esempio n. 24
0
    def detach_data_volume(self, volume):
        assert volume.deviceId != 0, 'how can root volume gets detached???'
        target_disk = None
        disk_name = 'vd%s' % self.DEVICE_LETTERS[volume.deviceId]
        for disk in self.domain_xmlobject.devices.get_child_node_as_list(
                'disk'):
            if disk.target.dev_ == disk_name:
                target_disk = disk
                break

        if not target_disk:
            raise kvmagent.KvmError(
                'unable to find data volume[%s] on vm[uuid:%s]' %
                (disk_name, self.uuid))

        xmlstr = target_disk.dump()
        logger.debug('detaching volume from vm[uuid:%s]:\n%s' %
                     (self.uuid, xmlstr))
        try:

            # libvirt has a bug that if detaching volume just after vm created, it likely fails. So we retry three time here
            def detach(error_out):
                try:
                    self.domain.detachDeviceFlags(
                        xmlstr, libvirt.VIR_DOMAIN_AFFECT_LIVE
                        | libvirt.VIR_DOMAIN_AFFECT_CONFIG)
                except libvirt.libvirtError as e:
                    if error_out:
                        raise e
                    return True

                def wait_for_detach(_):
                    me = get_vm_by_uuid(self.uuid)
                    for disk in me.domain_xmlobject.devices.get_child_node_as_list(
                            'disk'):
                        if disk.source.file_ == volume.installPath:
                            logger.debug(
                                'volume[%s] is still in process of detaching, wait it'
                                % volume.installPath)
                            return False
                    return True

                return linux.wait_callback_success(wait_for_detach, None, 5, 1)

            if detach(True):
                logger.debug(
                    'successfully detached volume[deviceId:%s, installPath:%s] from vm[uuid:%s]'
                    % (volume.deviceId, volume.installPath, self.uuid))
                return
            if detach(False):
                logger.debug(
                    'successfully detached volume[deviceId:%s, installPath:%s] from vm[uuid:%s]'
                    % (volume.deviceId, volume.installPath, self.uuid))
                return
            if detach(False):
                logger.debug(
                    'successfully detached volume[deviceId:%s, installPath:%s] from vm[uuid:%s]'
                    % (volume.deviceId, volume.installPath, self.uuid))
                return

            raise kvmagent.KvmError(
                'libvirt fails to detach volume[deviceId:%s, installPath:%s] from vm[uuid:%s] in 15s, timeout'
                % (volume.deviceId, volume.installPath, self.uuid))

        except libvirt.libvirtError as ex:
            vm = get_vm_by_uuid(self.uuid)
            logger.warn('vm dump: %s' % vm.domain_xml)
            logger.warn(linux.get_exception_stacktrace())
            raise kvmagent.KvmError(
                'unable to detach volume[%s] from vm[uuid:%s], %s' %
                (volume.installPath, self.uuid, str(ex)))
Esempio n. 25
0
    def take_volume_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = TakeSnapshotResponse()

        def makedir_if_need(new_path):
            dirname = os.path.dirname(new_path)
            if not os.path.exists(dirname):
                os.makedirs(dirname, 0755)

        def take_full_snapshot_by_qemu_img_convert(previous_install_path,
                                                   install_path):
            makedir_if_need(install_path)
            linux.qcow2_create_template(previous_install_path, install_path)
            new_volume_path = os.path.join(
                os.path.dirname(install_path),
                '{0}.qcow2'.format(uuidhelper.uuid()))
            makedir_if_need(new_volume_path)
            linux.qcow2_clone(install_path, new_volume_path)
            return install_path, new_volume_path

        def take_delta_snapshot_by_qemu_img_convert(previous_install_path,
                                                    install_path):
            new_volume_path = os.path.join(
                os.path.dirname(install_path),
                '{0}.qcow2'.format(uuidhelper.uuid()))
            makedir_if_need(new_volume_path)
            linux.qcow2_clone(previous_install_path, new_volume_path)
            return previous_install_path, new_volume_path

        try:
            if not cmd.vmUuid:
                if cmd.fullSnapshot:
                    rsp.snapshotInstallPath, rsp.newVolumeInstallPath = take_full_snapshot_by_qemu_img_convert(
                        cmd.volumeInstallPath, cmd.installPath)
                else:
                    rsp.snapshotInstallPath, rsp.newVolumeInstallPath = take_delta_snapshot_by_qemu_img_convert(
                        cmd.volumeInstallPath, cmd.installPath)

            else:
                vm = get_vm_by_uuid(cmd.vmUuid,
                                    exception_if_not_existing=False)

                if vm and vm.state != vm.VM_STATE_RUNNING and vm.state != vm.VM_STATE_SHUTDOWN:
                    raise kvmagent.KvmError(
                        'unable to take snapshot on vm[uuid:{0}] volume[id:{1}], because vm is not Running or Stopped, current state is {2}'
                        .format(vm.uuid, cmd.deviceId, vm.state))

                if vm and vm.state == vm.VM_STATE_RUNNING:
                    rsp.snapshotInstallPath, rsp.newVolumeInstallPath = vm.take_volume_snapshot(
                        cmd.deviceId, cmd.installPath, cmd.fullSnapshot)
                else:
                    if cmd.fullSnapshot:
                        rsp.snapshotInstallPath, rsp.newVolumeInstallPath = take_full_snapshot_by_qemu_img_convert(
                            cmd.volumeInstallPath, cmd.installPath)
                    else:
                        rsp.snapshotInstallPath, rsp.newVolumeInstallPath = take_delta_snapshot_by_qemu_img_convert(
                            cmd.volumeInstallPath, cmd.installPath)

                if cmd.fullSnapshot:
                    logger.debug(
                        'took full snapshot on vm[uuid:{0}] volume[id:{1}], snapshot path:{2}, new volulme path:{3}'
                        .format(cmd.vmUuid, cmd.deviceId,
                                rsp.snapshotInstallPath,
                                rsp.newVolumeInstallPath))
                else:
                    logger.debug(
                        'took delta snapshot on vm[uuid:{0}] volume[id:{1}], snapshot path:{2}, new volulme path:{3}'
                        .format(cmd.vmUuid, cmd.deviceId,
                                rsp.snapshotInstallPath,
                                rsp.newVolumeInstallPath))

            rsp.size = os.path.getsize(rsp.snapshotInstallPath)
        except kvmagent.KvmError as e:
            logger.warn(linux.get_exception_stacktrace())
            rsp.error = str(e)
            rsp.success = False

        return jsonobject.dumps(rsp)
Esempio n. 26
0
    def attach_data_volume(self, volume):
        if volume.deviceId >= len(self.DEVICE_LETTERS):
            err = "vm[uuid:%s] exceeds max disk limit, device id[%s], but only 24 allowed" % (
                self.uuid, volume.deviceId)
            logger.warn(err)
            raise kvmagent.KvmError(err)

        disk = etree.Element('disk', attrib={'type': 'file', 'device': 'disk'})
        e(disk, 'driver', None, {
            'name': 'qemu',
            'type': 'qcow2',
            'cache': 'none'
        })
        e(disk, 'source', None, {'file': volume.installPath})
        e(disk, 'target', None, {
            'dev': 'vd%s' % self.DEVICE_LETTERS[volume.deviceId],
            'bus': 'virtio'
        })

        xml = etree.tostring(disk)
        logger.debug('attaching volume[%s] to vm[uuid:%s]:\n%s' %
                     (volume.installPath, self.uuid, xml))
        try:
            # libvirt has a bug that if attaching volume just after vm created, it likely fails. So we retry three time here
            def attach(error_out):
                try:
                    self.domain.attachDeviceFlags(
                        xml, libvirt.VIR_DOMAIN_AFFECT_LIVE
                        | libvirt.VIR_DOMAIN_AFFECT_CONFIG)
                except libvirt.libvirtError as e:
                    if error_out:
                        raise e
                    return True

                def wait_for_attach(_):
                    me = get_vm_by_uuid(self.uuid)
                    for disk in me.domain_xmlobject.devices.get_child_node_as_list(
                            'disk'):
                        if disk.source.file_ == volume.installPath:
                            return True

                    logger.debug(
                        'volume[%s] is still in process of attaching, wait it'
                        % volume.installPath)
                    return False

                return linux.wait_callback_success(wait_for_attach, None, 5, 1)

            if attach(True):
                logger.debug(
                    'successfully attached volume[deviceId:%s, installPath:%s] to vm[uuid:%s]'
                    % (volume.deviceId, volume.installPath, self.uuid))
                return
            if attach(False):
                logger.debug(
                    'successfully attached volume[deviceId:%s, installPath:%s] to vm[uuid:%s]'
                    % (volume.deviceId, volume.installPath, self.uuid))
                return
            if attach(False):
                logger.debug(
                    'successfully attached volume[deviceId:%s, installPath:%s] to vm[uuid:%s]'
                    % (volume.deviceId, volume.installPath, self.uuid))
                return

            raise kvmagent.KvmError(
                'failed to attach volume[deviceId:%s, installPath:%s] to vm[uuid:%s] in 15s, timeout'
                % (volume.deviceId, volume.installPath, self.uuid))

        except libvirt.libvirtError as ex:
            logger.warn(linux.get_exception_stacktrace())
            raise kvmagent.KvmError(
                'unable to attach volume[%s] to vm[uuid:%s], %s' %
                (volume.installPath, self.uuid, str(ex)))