Пример #1
0
    def _regular_run(self):
        self.log.debug("Starting migration source thread")
        self._recovery = False
        self._update_outgoing_limit()
        try:
            startTime = time.time()
            # Guest agent API version must be updated before _srcDomXML
            # is created to have the version in _srcDomXML metadata.
            self._vm.update_guest_agent_api_version()
            machineParams = self._setupRemoteMachineParams()
            self._setupVdsConnection()
            self._prepareGuest()

            while not self._started:
                try:
                    self.log.info("Migration semaphore: acquiring")
                    with SourceThread.ongoingMigrations:
                        self.log.info("Migration semaphore: acquired")
                        timeout = config.getint(
                            'vars', 'guest_lifecycle_event_reply_timeout')
                        if self.hibernating:
                            self._vm.guestAgent.events.before_hibernation(
                                wait_timeout=timeout)
                        elif self._enableGuestEvents:
                            self._vm.guestAgent.events.before_migration(
                                wait_timeout=timeout)
                        if self._migrationCanceledEvt.is_set():
                            self._raiseAbortError()
                        self.log.debug(
                            "migration semaphore acquired "
                            "after %d seconds",
                            time.time() - startTime)
                        self._startUnderlyingMigration(time.time(),
                                                       machineParams)
                        self._finishSuccessfully(machineParams)
                except libvirt.libvirtError as e:
                    if e.get_error_code() == libvirt.VIR_ERR_OPERATION_ABORTED:
                        self.status = response.error(
                            'migCancelErr', message='Migration canceled')
                    # This error occurs when hypervisor cannot start
                    # the migration. For example, when a domain with the same
                    # name already exists on the destination.
                    elif e.get_error_code() == \
                            libvirt.VIR_ERR_OPERATION_FAILED:
                        self.status = response.error('migOperationErr',
                                                     message=e.get_str2())
                    raise
                except MigrationLimitExceeded:
                    retry_timeout = config.getint('vars',
                                                  'migration_retry_timeout')
                    self.log.debug(
                        "Migration destination busy. Initiating "
                        "retry in %d seconds.", retry_timeout)
                    self._migrationCanceledEvt.wait(retry_timeout)
        except MigrationDestinationSetupError as e:
            self._recover(str(e))
            # we know what happened, no need to dump hollow stack trace
        except Exception as e:
            self._recover(str(e))
            self.log.exception("Failed to migrate")
Пример #2
0
def unregister(uuids):
    try:
        uuids = [str(uuid.UUID(s)) for s in uuids]
    except ValueError as e:
        logging.warning("Attempt to unregister invalid uuid %s: %s" %
                        (uuids, e))
        return response.error("secretBadRequestErr")

    con = libvirtconnection.get()
    try:
        for sec_uuid in uuids:
            logging.info("Unregistering secret %r", sec_uuid)
            try:
                virsecret = con.secretLookupByUUIDString(sec_uuid)
            except libvirt.libvirtError as e:
                if e.get_error_code() != libvirt.VIR_ERR_NO_SECRET:
                    raise
                logging.debug("No such secret %r", sec_uuid)
            else:
                virsecret.undefine()
    except libvirt.libvirtError as e:
        logging.error("Could not unregister secrets: %s", e)
        return response.error("secretUnregisterErr")

    return response.success()
Пример #3
0
def unregister(uuids):
    try:
        uuids = [str(uuid.UUID(s)) for s in uuids]
    except ValueError as e:
        logging.warning("Attempt to unregister invalid uuid %s: %s" %
                        (uuids, e))
        return response.error("secretBadRequestErr")

    con = libvirtconnection.get()
    try:
        for sec_uuid in uuids:
            logging.info("Unregistering secret %r", sec_uuid)
            try:
                virsecret = con.secretLookupByUUIDString(sec_uuid)
            except libvirt.libvirtError as e:
                if e.get_error_code() != libvirt.VIR_ERR_NO_SECRET:
                    raise
                logging.debug("No such secret %r", sec_uuid)
            else:
                virsecret.undefine()
    except libvirt.libvirtError as e:
        logging.error("Could not unregister secrets: %s", e)
        return response.error("secretUnregisterErr")

    return response.success()
Пример #4
0
    def test_register_libvirt_error(self):
        def fail(xml):
            raise vmfakecon.Error(libvirt.VIR_ERR_INTERNAL_ERROR)

        self.connection.secretDefineXML = fail
        res = secret.register([make_secret()])
        self.assertEqual(res, response.error("secretRegisterErr"))
Пример #5
0
 def _recover(self, message):
     if not response.is_error(self.status):
         self.status = response.error('migrateErr')
     self.log.error(message)
     if not self.hibernating and self._destServer is not None:
         if self._vm.post_copy == PostCopyPhase.RUNNING:
             # We can't recover a VM after a failed post-copy migration.
             # And the destination takes care of the situation itself.
             self._vm.handle_failed_post_copy(clean_vm=True)
             return
         try:
             self._destServer.destroy(self._vm.id)
         except Exception:
             self.log.exception("Failed to destroy remote VM")
     # if the guest was stopped before migration, we need to cont it
     if self.hibernating:
         self._vm.cont(ignoreStatus=True)
         if self._enableGuestEvents:
             self._vm.guestAgent.events.after_hibernation_failure()
     elif self._enableGuestEvents:
         self._vm.guestAgent.events.after_migration_failure()
     # either way, migration has finished
     self._failed = True
     if self._recovery:
         self._vm.set_last_status(vmstatus.UP, vmstatus.MIGRATION_SOURCE)
         self._recovery = False
     else:
         self._vm.lastStatus = vmstatus.UP
     self._started = False
     self._vm.send_status_event()
Пример #6
0
def test_diskreplicatefinish_transient_disk():
    src_drive = make_drive(src_drive_conf,
                           storage.DRIVE_SHARED_TYPE.TRANSIENT)
    _vm = FakeVm([src_drive])
    result = _vm.diskReplicateFinish(src_drive_conf, dst_drive_conf)

    assert result == response.error("transientErr")
Пример #7
0
    def test_unregister_libvirt_error(self):
        def fail(uuid):
            raise vmfakecon.Error(libvirt.VIR_ERR_INTERNAL_ERROR)

        self.connection.secretLookupByUUIDString = fail
        res = secret.unregister([str(uuid.uuid4())])
        self.assertEqual(res, response.error("secretUnregisterErr"))
Пример #8
0
def test_diskreplicatefinish_transient_disk():
    src_drive = make_drive(src_drive_conf,
                           storage.DRIVE_SHARED_TYPE.TRANSIENT)
    _vm = FakeVm([src_drive])
    result = _vm.diskReplicateFinish(src_drive_conf, dst_drive_conf)

    assert result == response.error("transientErr")
Пример #9
0
def delete(job_id):
    try:
        _delete(job_id)
    except ClientError as e:
        logging.info('Cannot delete job, error: %s', e)
        return response.error(e.name)
    return response.success()
Пример #10
0
Файл: jobs.py Проект: xin49/vdsm
def delete(job_id):
    try:
        _delete(job_id)
    except ClientError as e:
        logging.info('Cannot delete job, error: %s', e)
        return response.error(e.name)
    return response.success()
Пример #11
0
 def _recover(self, message):
     if not response.is_error(self.status):
         self.status = response.error('migrateErr')
     self.log.error(message)
     if not self.hibernating and self._destServer is not None:
         if self._vm.post_copy == PostCopyPhase.RUNNING:
             # We can't recover a VM after a failed post-copy migration.
             # And the destination takes care of the situation itself.
             self._vm.handle_failed_post_copy(clean_vm=True)
             return
         try:
             self._destServer.destroy(self._vm.id)
         except Exception:
             self.log.exception("Failed to destroy remote VM")
     # if the guest was stopped before migration, we need to cont it
     if self.hibernating:
         self._vm.cont(ignoreStatus=True)
         if self._enableGuestEvents:
             self._vm.guestAgent.events.after_hibernation_failure()
     elif self._enableGuestEvents:
         self._vm.guestAgent.events.after_migration_failure()
     # either way, migration has finished
     if self._recovery:
         self._vm.set_last_status(vmstatus.UP, vmstatus.MIGRATION_SOURCE)
         self._recovery = False
     else:
         self._vm.lastStatus = vmstatus.UP
     self._started = False
     self._vm.send_status_event()
Пример #12
0
Файл: jobs.py Проект: xin49/vdsm
def abort(job_id):
    try:
        job = get(job_id)
        job.abort()
    except ClientError as e:
        logging.info('Cannot abort job, error: %s', e)
        return response.error(e.name)
    return response.success()
Пример #13
0
    def test_error(self):
        NAME = 'noVM'  # no special meaning, any error is fine
        res = response.error(NAME)

        template = errCode[NAME]
        self.assertEqual(res["status"]["code"], template["status"]["code"])
        self.assertEqual(res["status"]["message"],
                         template["status"]["message"])
Пример #14
0
    def test_error(self):
        NAME = 'noVM'  # no special meaning, any error is fine
        res = response.error(NAME)

        template = errCode[NAME]
        self.assertEqual(res["status"]["code"], template["status"]["code"])
        self.assertEqual(res["status"]["message"],
                         template["status"]["message"])
Пример #15
0
    def test_error_with_message(self):
        NAME = 'noVM'  # no special meaning, any error is fine
        MESSAGE = 'we want a specific message here'
        res = response.error(NAME, MESSAGE)

        template = errCode[NAME]
        self.assertEqual(res["status"]["code"], template["status"]["code"])
        self.assertEqual(res["status"]["message"], MESSAGE)
Пример #16
0
    def test_error_with_message(self):
        NAME = 'noVM'  # no special meaning, any error is fine
        MESSAGE = 'we want a specific message here'
        res = response.error(NAME, MESSAGE)

        template = errCode[NAME]
        self.assertEqual(res["status"]["code"], template["status"]["code"])
        self.assertEqual(res["status"]["message"], MESSAGE)
Пример #17
0
def abort(job_id):
    try:
        job = get(job_id)
        job.abort()
    except ClientError as e:
        logging.info('Cannot abort job, error: %s', e)
        return response.error(e.name)
    return response.success()
Пример #18
0
    def _regular_run(self):
        self.log.debug("Starting migration source thread")
        self._recovery = False
        self._update_outgoing_limit()
        try:
            startTime = time.time()
            machineParams = self._setupRemoteMachineParams()
            self._setupVdsConnection()
            self._prepareGuest()

            while not self._started:
                try:
                    self.log.info("Migration semaphore: acquiring")
                    with SourceThread.ongoingMigrations:
                        self.log.info("Migration semaphore: acquired")
                        timeout = config.getint(
                            'vars', 'guest_lifecycle_event_reply_timeout')
                        if self.hibernating:
                            self._vm.guestAgent.events.before_hibernation(
                                wait_timeout=timeout)
                        elif self._enableGuestEvents:
                            self._vm.guestAgent.events.before_migration(
                                wait_timeout=timeout)
                        if self._migrationCanceledEvt.is_set():
                            self._raiseAbortError()
                        self.log.debug(
                            "migration semaphore acquired "
                            "after %d seconds",
                            time.time() - startTime)
                        migrationParams = {
                            'dst': self._dst,
                            'mode': self._mode,
                            'method': METHOD_ONLINE,
                            'dstparams': self._dstparams,
                            'dstqemu': self._dstqemu,
                        }
                        self._startUnderlyingMigration(time.time(),
                                                       migrationParams,
                                                       machineParams)
                        self._finishSuccessfully(machineParams)
                except libvirt.libvirtError as e:
                    if e.get_error_code() == libvirt.VIR_ERR_OPERATION_ABORTED:
                        self.status = response.error(
                            'migCancelErr', message='Migration canceled')
                    raise
                except MigrationLimitExceeded:
                    retry_timeout = config.getint('vars',
                                                  'migration_retry_timeout')
                    self.log.debug(
                        "Migration destination busy. Initiating "
                        "retry in %d seconds.", retry_timeout)
                    self._migrationCanceledEvt.wait(retry_timeout)
        except MigrationDestinationSetupError as e:
            self._recover(str(e))
            # we know what happened, no need to dump hollow stack trace
        except Exception as e:
            self._recover(str(e))
            self.log.exception("Failed to migrate")
Пример #19
0
def test_diskreplicatefinish_job_not_found():
    src_drive = make_drive(src_drive_conf)
    _vm = FakeVm([src_drive])

    # Passing an empty dict so 'cur' and 'end will not be found
    _vm._dom = FakeDomain({})
    result = _vm.diskReplicateFinish(src_drive_conf, dst_drive_conf)

    assert result == response.error("replicaErr")
Пример #20
0
def test_diskreplicatefinish_job_not_finished():
    _vm = FakeVm([make_drive(src_drive_conf)])
    _vm._dom = FakeDomain({'cur': 0, 'end': 1})
    result = _vm.diskReplicateFinish(src_drive_conf, dst_drive_conf)

    assert result == response.error("unavail")

    # if pivot was not called the monitor should not have been disabled
    assert not _vm.drive_monitor.was_disabled
Пример #21
0
def test_diskreplicatefinish_job_not_found():
    src_drive = make_drive(src_drive_conf)
    _vm = FakeVm([src_drive])

    # Passing an empty dict so 'cur' and 'end will not be found
    _vm._dom = FakeDomain({})
    result = _vm.diskReplicateFinish(src_drive_conf, dst_drive_conf)

    assert result == response.error("replicaErr")
Пример #22
0
    def teardownImage(self, domainId, poolId, imageId):
        if imageId == TEARDOWN_ERROR_IMAGE_ID:
            return response.error('teardownError')

        imagepath = _vol_path(self._image_path_base, domainId, poolId, imageId)
        resultpath = _vol_path(self._image_path_base, domainId, poolId,
                               imageId, ext='.res')
        os.rename(imagepath, resultpath)
        return response.success()
Пример #23
0
    def teardownImage(self, domainId, poolId, imageId):
        if imageId == TEARDOWN_ERROR_IMAGE_ID:
            return response.error('teardownError')

        imagepath = _vol_path(self._image_path_base, domainId, poolId, imageId)
        resultpath = _vol_path(self._image_path_base, domainId, poolId,
                               imageId, ext='.res')
        os.rename(imagepath, resultpath)
        return response.success()
Пример #24
0
    def _regular_run(self):
        self.log.debug("Starting migration source thread")
        self._recovery = False
        self._update_outgoing_limit()
        try:
            startTime = time.time()
            machineParams = self._setupRemoteMachineParams()
            self._setupVdsConnection()
            self._prepareGuest()

            while not self._started:
                try:
                    self.log.info("Migration semaphore: acquiring")
                    with SourceThread.ongoingMigrations:
                        self.log.info("Migration semaphore: acquired")
                        timeout = config.getint(
                            'vars', 'guest_lifecycle_event_reply_timeout')
                        if self.hibernating:
                            self._vm.guestAgent.events.before_hibernation(
                                wait_timeout=timeout)
                        elif self._enableGuestEvents:
                            self._vm.guestAgent.events.before_migration(
                                wait_timeout=timeout)
                        if self._migrationCanceledEvt.is_set():
                            self._raiseAbortError()
                        self.log.debug("migration semaphore acquired "
                                       "after %d seconds",
                                       time.time() - startTime)
                        migrationParams = {
                            'dst': self._dst,
                            'mode': self._mode,
                            'method': METHOD_ONLINE,
                            'dstparams': self._dstparams,
                            'dstqemu': self._dstqemu,
                        }
                        self._startUnderlyingMigration(
                            time.time(), migrationParams, machineParams
                        )
                        self._finishSuccessfully(machineParams)
                except libvirt.libvirtError as e:
                    if e.get_error_code() == libvirt.VIR_ERR_OPERATION_ABORTED:
                        self.status = response.error(
                            'migCancelErr', message='Migration canceled')
                    raise
                except MigrationLimitExceeded:
                    retry_timeout = config.getint('vars',
                                                  'migration_retry_timeout')
                    self.log.debug("Migration destination busy. Initiating "
                                   "retry in %d seconds.", retry_timeout)
                    self._migrationCanceledEvt.wait(retry_timeout)
        except MigrationDestinationSetupError as e:
            self._recover(str(e))
            # we know what happened, no need to dump hollow stack trace
        except Exception as e:
            self._recover(str(e))
            self.log.exception("Failed to migrate")
Пример #25
0
def test_active_merge_storage_unavailable(monkeypatch):
    monkeypatch.setattr(CleanupThread, "WAIT_INTERVAL", 0.01)

    config = Config('active-merge')
    sd_id = config.values["drive"]["domainID"]
    img_id = config.values["drive"]["imageID"]
    merge_params = config.values["merge_params"]
    job_id = merge_params["jobUUID"]
    base_id = merge_params["baseVolUUID"]
    top_id = merge_params["topVolUUID"]

    vm = RunningVM(config)

    simulate_base_needs_extend(vm, sd_id, img_id, top_id, base_id)

    with monkeypatch.context() as ctx:
        # Simulate unavailable storage.
        fail = lambda *args, **kwargs: response.error("unavail")
        ctx.setattr(vm.cif.irs, "imageSyncVolumeChain", fail)

        vm.merge(**merge_params)

        simulate_volume_extension(vm, base_id)

        # Start a libvirt active block commit block job.
        block_job = vm._dom.block_jobs["sda"]
        block_job["cur"] = block_job["end"]

        # Simulate completion of backup job - libvirt updates the xml.
        vm._dom.xml = config.xmls["02-commit-ready.xml"]

        assert parse_jobs(vm)[job_id]['state'] == Job.COMMIT

        # Trigger cleanup and pivot attempt which fails - resource unavailable.
        vm.query_jobs()

        # Wait until the first cleanup completes.
        if not vm._drive_merger.wait_for_cleanup(TIMEOUT):
            raise RuntimeError("Timeout waiting for cleanup")

        assert parse_jobs(vm)[job_id]['state'] == Job.CLEANUP

        # Verify volume monitor is enabled after the failure.
        assert vm.volume_monitor.enabled

    # Verify cleanup thread switched to FAILED.
    ct = vm._drive_merger._cleanup_threads.get(job_id)
    assert ct.state == CleanupThread.FAILED

    # Next query_jobs() call will start a new cleanup thread.
    vm.query_jobs()
    ct = vm._drive_merger._cleanup_threads.get(job_id)
    assert ct.state == CleanupThread.TRYING
Пример #26
0
Файл: v2v.py Проект: minqf/vdsm
def get_external_vm_names(uri, username, password):
    try:
        conn = libvirtconnection.open_connection(uri=uri,
                                                 username=username,
                                                 passwd=password)
    except libvirt.libvirtError as e:
        logging.exception('error connecting to hypervisor')
        return response.error('V2VConnection', str(e))

    with closing(conn):
        vms = [vm.name() for vm in _list_domains(conn)]
        return response.success(vmNames=vms)
Пример #27
0
def get_external_vm_names(uri, username, password):
    try:
        conn = libvirtconnection.open_connection(uri=uri,
                                                 username=username,
                                                 passwd=password)
    except libvirt.libvirtError as e:
        logging.error('error connecting to hypervisor: %r', e.message)
        return response.error('V2VConnection', e.message)

    with closing(conn):
        vms = [vm.name() for vm in _list_domains(conn)]
        return response.success(vmNames=vms)
Пример #28
0
def register(secrets, clear=False):
    try:
        secrets = [Secret(params) for params in secrets]
    except ValueError as e:
        logging.warning("Attempt to register invalid secret: %s", e)
        return response.error("secretBadRequestErr")

    con = libvirtconnection.get()
    try:
        for secret in secrets:
            logging.info("Registering secret %s", secret)
            secret.register(con)
        if clear:
            uuids = frozenset(sec.uuid for sec in secrets)
            for virsecret in con.listAllSecrets():
                if virsecret.UUIDString() not in uuids and _is_ovirt_secret(virsecret):
                    virsecret.undefine()
    except libvirt.libvirtError as e:
        logging.error("Could not register secret %s: %s", secret, e)
        return response.error("secretRegisterErr")

    return response.success()
Пример #29
0
def test_blockjobabort_failed(monkeypatch):
    def raising_blockjobabort():
        raise Exception('blockJobAbort failed')

    src_drive = make_drive(src_drive_conf)
    dst_drive = make_drive(dst_drive_conf)

    _vm = FakeVm([src_drive, dst_drive])
    _vm._dom = FakeDomain({'cur': 1, 'end': 1})

    monkeypatch.setattr(FakeDomain, 'blockJobAbort', raising_blockjobabort)
    result = _vm.diskReplicateFinish(src_drive_conf, dst_drive_conf)

    assert result == response.error("changeDisk")
Пример #30
0
def test_blockjobabort_failed(monkeypatch):
    def raising_blockjobabort():
        raise Exception('blockJobAbort failed')

    src_drive = make_drive(src_drive_conf)
    dst_drive = make_drive(dst_drive_conf)

    _vm = FakeVm([src_drive, dst_drive])
    _vm._dom = FakeDomain({'cur': 1, 'end': 1})

    monkeypatch.setattr(FakeDomain, 'blockJobAbort', raising_blockjobabort)
    result = _vm.diskReplicateFinish(src_drive_conf, dst_drive_conf)

    assert result == response.error("changeDisk")
Пример #31
0
def register(secrets, clear=False):
    try:
        secrets = [Secret(params) for params in secrets]
    except ValueError as e:
        logging.warning("Attempt to register invalid secret: %s", e)
        return response.error("secretBadRequestErr")

    con = libvirtconnection.get()
    try:
        for secret in secrets:
            logging.info("Registering secret %s", secret)
            secret.register(con)
        if clear:
            uuids = frozenset(sec.uuid for sec in secrets)
            for virsecret in con.listAllSecrets():
                if (virsecret.UUIDString() not in uuids
                        and _is_ovirt_secret(virsecret)):
                    virsecret.undefine()
    except libvirt.libvirtError as e:
        logging.error("Could not register secret %s: %s", secret, e)
        return response.error("secretRegisterErr")

    return response.success()
Пример #32
0
    def testSetNumberOfVcpusFailed(self, virt_error, vdsm_error,
                                   error_message):
        def _fail(*args):
            raise_libvirt_error(virt_error, error_message)

        with MonkeyPatchScope([(hooks, 'before_set_num_of_cpus', lambda: None)
                               ]):
            with fake.VM() as testvm:
                dom = fake.Domain()
                dom.setVcpusFlags = _fail
                testvm._dom = dom

                res = testvm.setNumberOfCpus(4)  # random value

                assert res == response.error(vdsm_error)
Пример #33
0
    def testSetNumberOfVcpusFailed(self, virt_error, vdsm_error,
                                   error_message):
        def _fail(*args):
            raise_libvirt_error(virt_error, error_message)

        with MonkeyPatchScope([(hooks, 'before_set_num_of_cpus',
                                lambda: None)]):
            with fake.VM() as testvm:
                dom = fake.Domain()
                dom.setVcpusFlags = _fail
                testvm._dom = dom

                res = testvm.setNumberOfCpus(4)  # random value

                self.assertEqual(res, response.error(vdsm_error))
Пример #34
0
    def start(self):
        # are there any available methods for power-down?
        if self.chain.callbacks:
            # flag for successful power-down event detection
            # this flag is common for both shutdown and reboot workflows
            # because we want to exit the CallbackChain in case either
            # of them happens
            self.event.clear()

            self.chain.start()
            return response.success(message=self.returnMsg)
        else:
            # No tools, no ACPI
            return response.error(
                'exist',
                message='VM without ACPI or active oVirt guest agent. '
                'Try Forced Shutdown.')
Пример #35
0
    def start(self):
        # are there any available methods for power-down?
        if self.chain.callbacks:
            # flag for successful power-down event detection
            # this flag is common for both shutdown and reboot workflows
            # because we want to exit the CallbackChain in case either
            # of them happens
            self.event.clear()

            self.chain.start()
            return response.success(message=self.returnMsg)
        else:
            # No tools, no ACPI
            return response.error(
                'exist',
                message='VM without ACPI or active oVirt guest agent. '
                        'Try Forced Shutdown.')
Пример #36
0
 def _recover(self, message):
     if not response.is_error(self.status):
         self.status = response.error('migrateErr')
     self.log.error(message)
     if not self.hibernating and self._destServer is not None:
         try:
             self._destServer.destroy(self._vm.id)
         except Exception:
             self.log.exception("Failed to destroy remote VM")
     # if the guest was stopped before migration, we need to cont it
     if self.hibernating:
         self._vm.cont(ignoreStatus=True)
         if self._enableGuestEvents:
             self._vm.guestAgent.events.after_hibernation_failure()
     elif self._enableGuestEvents:
         self._vm.guestAgent.events.after_migration_failure()
     # either way, migration has finished
     self._vm.lastStatus = vmstatus.UP
     self._vm.send_status_event()
Пример #37
0
    def testUpdateDeviceGraphicsFailed(self):
        with fake.VM(devices=self.GRAPHIC_DEVICES) as testvm:
            message = 'fake timeout while setting ticket'
            device = 'spice'
            domXml = '''
                <devices>
                    <graphics type="%s" port="5900" />
                </devices>''' % device

            def _fail(*args):
                raise virdomain.TimeoutError(defmsg=message)

            domain = fake.Domain(domXml)
            domain.updateDeviceFlags = _fail
            testvm._dom = domain

            res = self._updateGraphicsDevice(testvm, device,
                                             _GRAPHICS_DEVICE_PARAMS)

            self.assertEqual(res, response.error('ticketErr', message))
Пример #38
0
    def testUpdateDeviceGraphicsFailed(self):
        with fake.VM(devices=self.GRAPHIC_DEVICES) as testvm:
            message = 'fake timeout while setting ticket'
            device = 'spice'
            domXml = '''
                <devices>
                    <graphics type="%s" port="5900" />
                </devices>''' % device

            def _fail(*args):
                raise virdomain.TimeoutError(defmsg=message)

            domain = fake.Domain(domXml)
            domain.updateDeviceFlags = _fail
            testvm._dom = domain

            res = self._updateGraphicsDevice(testvm, device,
                                             _GRAPHICS_DEVICE_PARAMS)

            self.assertEqual(res,
                             response.error('ticketErr', message))
Пример #39
0
 def test_delete_unknown_job(self):
     self.assertEqual(response.error(jobs.NoSuchJob.name),
                      jobs.delete('foo'))
Пример #40
0
 def _not_appropriatable(self, guid, vmid):
     # any error is actually fine
     return response.error('unexpected')
Пример #41
0
 def test_is_specific_error(self, actual_err, expected_err):
     match = actual_err == expected_err
     self.assertEquals(match, response.is_error(response.error(actual_err),
                                                err=expected_err))
Пример #42
0
 def test_unregister_libvirt_error(self):
     def fail(uuid):
         raise vmfakecon.Error(libvirt.VIR_ERR_INTERNAL_ERROR)
     self.connection.secretLookupByUUIDString = fail
     res = secret.unregister([str(uuid.uuid4())])
     self.assertEqual(res, response.error("secretUnregisterErr"))
Пример #43
0
def _createVm_fails(*args, **kwargs):
    return response.error('noVM')
Пример #44
0
 def test_abort_not_supported(self):
     job = jobs.Job(str(uuid.uuid4()))
     job._status = jobs.STATUS.RUNNING
     jobs.add(job)
     self.assertEqual(response.error(jobs.AbortNotSupported.name),
                      jobs.abort(job.id))
Пример #45
0
 def test_abort_not_supported(self):
     job = jobs.Job(str(uuid.uuid4()))
     job._status = jobs.STATUS.RUNNING
     jobs.add(job)
     self.assertEqual(response.error(jobs.AbortNotSupported.name),
                      jobs.abort(job.id))
Пример #46
0
def test_diskreplicatefinish_job_not_finished():
    _vm = FakeVm([make_drive(src_drive_conf)])
    _vm._dom = FakeDomain({'cur': 0, 'end': 1})
    result = _vm.diskReplicateFinish(src_drive_conf, dst_drive_conf)

    assert result == response.error("unavail")
Пример #47
0
 def test_register_validation(self):
     res = secret.register([{"invalid": "secret"}])
     self.assertEqual(res, response.error("secretBadRequestErr"))
Пример #48
0
    def snapshot(self):
        """Live snapshot command"""
        def norm_snap_drive_params(drive):
            """Normalize snapshot parameters"""

            if "baseVolumeID" in drive:
                base_drv = {
                    "device": "disk",
                    "domainID": drive["domainID"],
                    "imageID": drive["imageID"],
                    "volumeID": drive["baseVolumeID"]
                }
                target_drv = base_drv.copy()
                target_drv["volumeID"] = drive["volumeID"]

            elif "baseGUID" in drive:
                base_drv = {"GUID": drive["baseGUID"]}
                target_drv = {"GUID": drive["GUID"]}

            elif "baseUUID" in drive:
                base_drv = {"UUID": drive["baseUUID"]}
                target_drv = {"UUID": drive["UUID"]}

            else:
                base_drv, target_drv = (None, None)

            return base_drv, target_drv

        def rollback_drives(new_drives):
            """Rollback the prepared volumes for the snapshot"""

            for vm_dev_name, drive in new_drives.items():
                try:
                    self.vm.cif.teardownVolumePath(drive)
                except Exception:
                    self.vm.log.exception("Unable to teardown drive: %s",
                                          vm_dev_name)

        def memory_snapshot(memory_volume_path):
            """Libvirt snapshot XML"""

            return vmxml.Element('memory',
                                 snapshot='external',
                                 file=memory_volume_path)

        def vm_conf_for_memory_snapshot():
            """Returns the needed vm configuration with the memory snapshot"""

            return {
                'restoreFromSnapshot': True,
                '_srcDomXML': self.vm.migratable_domain_xml(),
                'elapsedTimeOffset': time.time() - self.vm.start_time
            }

        snap = vmxml.Element('domainsnapshot')
        disks = vmxml.Element('disks')
        new_drives = {}
        vm_drives = {}

        for drive in self.snap_drives:
            base_drv, tget_drv = norm_snap_drive_params(drive)

            try:
                self.vm.findDriveByUUIDs(tget_drv)
            except LookupError:
                # The vm is not already using the requested volume for the
                # snapshot, continuing.
                pass
            else:
                # The snapshot volume is the current one, skipping
                self.vm.log.debug("The volume is already in use: %s", tget_drv)
                continue  # Next drive

            try:
                vm_drive = self.vm.findDriveByUUIDs(base_drv)
            except LookupError:
                # The volume we want to snapshot doesn't exist
                self.vm.log.error("The base volume doesn't exist: %s",
                                  base_drv)
                return response.error('snapshotErr')

            if vm_drive.hasVolumeLeases:
                self.vm.log.error('disk %s has volume leases', vm_drive.name)
                return response.error('noimpl')

            if vm_drive.transientDisk:
                self.vm.log.error('disk %s is a transient disk', vm_drive.name)
                return response.error('transientErr')

            vm_dev_name = vm_drive.name

            new_drives[vm_dev_name] = tget_drv.copy()
            new_drives[vm_dev_name]["type"] = "disk"
            new_drives[vm_dev_name]["diskType"] = vm_drive.diskType
            new_drives[vm_dev_name]["poolID"] = vm_drive.poolID
            new_drives[vm_dev_name]["name"] = vm_dev_name
            new_drives[vm_dev_name]["format"] = "cow"

            # We need to keep track of the drive object because
            # it keeps original data and used to generate snapshot element.
            # We keep the old volume ID so we can clear the block threshold.
            vm_drives[vm_dev_name] = (vm_drive, base_drv["volumeID"])

        prepared_drives = {}

        for vm_dev_name, vm_device in new_drives.items():
            # Adding the device before requesting to prepare it as we want
            # to be sure to teardown it down even when prepareVolumePath
            # failed for some unknown issue that left the volume active.
            prepared_drives[vm_dev_name] = vm_device
            try:
                new_drives[vm_dev_name]["path"] = \
                    self.vm.cif.prepareVolumePath(new_drives[vm_dev_name])
            except Exception:
                self.vm.log.exception(
                    'unable to prepare the volume path for '
                    'disk %s', vm_dev_name)
                rollback_drives(prepared_drives)
                return response.error('snapshotErr')

            drive, _ = vm_drives[vm_dev_name]
            snapelem = drive.get_snapshot_xml(vm_device)
            disks.appendChild(snapelem)

        snap.appendChild(disks)

        snap_flags = (libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT
                      | libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA)

        if self.memory_params:
            # Save the needed vm configuration
            # TODO: this, as other places that use pickle.dump
            # directly to files, should be done with outOfProcess
            vm_conf_vol = self.memory_params['dstparams']
            vm_conf_vol_path = self.vm.cif.prepareVolumePath(vm_conf_vol)
            try:
                with open(vm_conf_vol_path, "rb+") as f:
                    vm_conf = vm_conf_for_memory_snapshot()
                    # protocol=2 is needed for clusters < 4.4
                    # (for Python 2 host compatibility)
                    data = pickle.dumps(vm_conf, protocol=2)

                    # Ensure that the volume is aligned; qemu-img may segfault
                    # when converting unligned images.
                    # https://bugzilla.redhat.com/1649788
                    aligned_length = utils.round(len(data), 4096)
                    data = data.ljust(aligned_length, b"\0")

                    f.write(data)
                    f.flush()
                    os.fsync(f.fileno())
            finally:
                self.vm.cif.teardownVolumePath(vm_conf_vol)

            # Adding the memory volume to the snapshot xml
            memory_vol = self.memory_params['dst']
            memory_vol_path = self.vm.cif.prepareVolumePath(memory_vol)
            snap.appendChild(memory_snapshot(memory_vol_path))
        else:
            memory_vol = memory_vol_path = None
            snap_flags |= libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY

        snapxml = xmlutils.tostring(snap)
        # TODO: this is debug information. For 3.6.x we still need to
        # see the XML even with 'info' as default level.
        self.vm.log.info("%s", snapxml)

        self._snapshot_job['memoryVolPath'] = memory_vol_path
        self._snapshot_job['memoryVol'] = memory_vol
        self._snapshot_job['newDrives'] = new_drives
        vm_drives_serialized = {}
        for k, v in vm_drives.items():
            vm_drives_serialized[k] = [xmlutils.tostring(v[0].getXML()), v[1]]
        self._snapshot_job['vmDrives'] = vm_drives_serialized
        self.vm.update_snapshot_metadata(self._snapshot_job)

        # We need to stop the drive monitoring for two reasons, one is to
        # prevent spurious libvirt errors about missing drive paths (since
        # we're changing them), and also to prevent to trigger a drive
        # extension for the new volume with the apparent size of the old one
        # (the apparentsize is updated as last step in updateDriveParameters)
        self.vm.drive_monitor.disable()

        try:
            if self.should_freeze:
                self.vm.freeze()
            try:
                self.vm.log.info(
                    "Taking a live snapshot (drives=%s,"
                    "memory=%s)",
                    ', '.join(drive["name"] for drive in new_drives.values()),
                    self.memory_params is not None)
                self.vm.run_dom_snapshot(snapxml, snap_flags)
                self.vm.log.info("Completed live snapshot")
            except libvirt.libvirtError:
                self.vm.log.exception("Unable to take snapshot")
                if self.should_freeze:
                    self.vm.thaw()
                return response.error('snapshotErr')
        except:
            # In case the VM was shutdown in the middle of the snapshot
            # operation we keep doing the finalizing and reporting the failure.
            self._finalize_vm(memory_vol)
            res = False
        else:
            res = self.teardown(memory_vol_path, memory_vol, new_drives,
                                vm_drives)
        if not res:
            raise RuntimeError("Failed to execute snapshot, "
                               "considering the operation as failure")
Пример #49
0
def test_lookup_error():
    _vm = FakeVm()
    result = _vm.diskReplicateFinish(src_drive_conf, dst_drive_conf)

    assert result == response.error('imageErr')
Пример #50
0
def test_has_volume_leases():
    _vm = FakeVm([make_drive(lease_drive_conf)])
    result = _vm.diskReplicateFinish(lease_drive_conf, dst_drive_conf)
    assert result == response.error('noimpl')
Пример #51
0
 def test_delete_unknown_job(self):
     self.assertEqual(response.error(jobs.NoSuchJob.name),
                      jobs.delete('foo'))
Пример #52
0
 def test_delete_active_job(self, status):
     job = TestingJob(status)
     jobs.add(job)
     self.assertEqual(response.error(jobs.JobNotDone.name),
                      jobs.delete(job.id))
Пример #53
0
def _createVm_fails(*args, **kwargs):
    return response.error('noVM')
Пример #54
0
 def test_register_libvirt_error(self):
     def fail(xml):
         raise vmfakecon.Error(libvirt.VIR_ERR_INTERNAL_ERROR)
     self.connection.secretDefineXML = fail
     res = secret.register([make_secret()])
     self.assertEqual(res, response.error("secretRegisterErr"))
Пример #55
0
 def test_delete_active_job(self, status):
     job = TestingJob(status)
     jobs.add(job)
     self.assertEqual(response.error(jobs.JobNotDone.name),
                      jobs.delete(job.id))
Пример #56
0
 def test_unregister_validation(self):
     res = secret.unregister(["this-is-not-a-uuid"])
     self.assertEqual(res, response.error("secretBadRequestErr"))
Пример #57
0
 def test_abort_from_invalid_state(self, status, err):
     job = TestingJob(status)
     jobs.add(job)
     res = jobs.abort(job.id)
     self.assertEqual(response.error(err), res)
Пример #58
0
 def test_abort_unknown_job(self):
     self.assertEqual(response.error(jobs.NoSuchJob.name),
                      jobs.abort('foo'))