def _startUnderlyingMigration(self, startTime): if self.hibernating: hooks.before_vm_hibernate(self._vm._dom.XMLDesc(0), self._vm.conf) try: self._vm._vmStats.pause() fname = self._vm.cif.prepareVolumePath(self._dst) try: self._vm._dom.save(fname) finally: self._vm.cif.teardownVolumePath(self._dst) except Exception: self._vm._vmStats.cont() raise else: for dev in self._vm._customDevices(): hooks.before_device_migrate_source(dev._deviceXML, self._vm.conf, dev.custom) hooks.before_vm_migrate_source(self._vm._dom.XMLDesc(0), self._vm.conf) # Do not measure the time spent for creating the VM on the # destination. In some cases some expensive operations can cause # the migration to get cancelled right after the transfer started. destCreateStartTime = time.time() result = self._destServer.migrationCreate(self._machineParams) destCreationTime = time.time() - destCreateStartTime startTime += destCreationTime self.log.info('Creation of destination VM took: %d seconds', destCreationTime) if result['status']['code']: self.status = result raise RuntimeError('migration destination error: ' + result['status']['message']) if config.getboolean('vars', 'ssl'): transport = 'tls' else: transport = 'tcp' duri = 'qemu+%s://%s/system' % (transport, self.remoteHost) if self._vm.conf['_migrationParams']['dstqemu']: muri = 'tcp://%s' % \ self._vm.conf['_migrationParams']['dstqemu'] else: muri = 'tcp://%s' % self.remoteHost self._vm.log.info('starting migration to %s ' 'with miguri %s', duri, muri) downtimeThread = DowntimeThread(self._vm, int(self._downtime)) self._monitorThread = MonitorThread(self._vm, startTime) with utils.running(downtimeThread): with utils.running(self._monitorThread): # we need to support python 2.6, so two nested with-s. self._perform_migration(duri, muri) self.log.info("migration took %d seconds to complete", (time.time() - startTime) + destCreationTime)
def _startUnderlyingMigration(self, startTime): if self.hibernating: hooks.before_vm_hibernate(self._vm._dom.XMLDesc(0), self._vm.conf) try: self._vm._vmStats.pause() fname = self._vm.cif.prepareVolumePath(self._dst) try: self._vm._dom.save(fname) finally: self._vm.cif.teardownVolumePath(self._dst) except Exception: self._vm._vmStats.cont() raise else: for dev in self._vm._customDevices(): hooks.before_device_migrate_source( dev._deviceXML, self._vm.conf, dev.custom) hooks.before_vm_migrate_source(self._vm._dom.XMLDesc(0), self._vm.conf) # Do not measure the time spent for creating the VM on the # destination. In some cases some expensive operations can cause # the migration to get cancelled right after the transfer started. destCreateStartTime = time.time() result = self._destServer.migrationCreate(self._machineParams) destCreationTime = time.time() - destCreateStartTime startTime += destCreationTime self.log.info('Creation of destination VM took: %d seconds', destCreationTime) if result['status']['code']: self.status = result raise RuntimeError('migration destination error: ' + result['status']['message']) if config.getboolean('vars', 'ssl'): transport = 'tls' else: transport = 'tcp' duri = 'qemu+%s://%s/system' % (transport, self.remoteHost) if self._vm.conf['_migrationParams']['dstqemu']: muri = 'tcp://%s' % \ self._vm.conf['_migrationParams']['dstqemu'] else: muri = 'tcp://%s' % self.remoteHost self._vm.log.info('starting migration to %s ' 'with miguri %s', duri, muri) downtimeThread = DowntimeThread(self._vm, int(self._downtime)) self._monitorThread = MonitorThread(self._vm, startTime) with utils.running(downtimeThread): with utils.running(self._monitorThread): # we need to support python 2.6, so two nested with-s. self._perform_migration(duri, muri) self.log.info("migration took %d seconds to complete", (time.time() - startTime) + destCreationTime)
def _startUnderlyingMigration(self, startTime): if self.hibernating: hooks.before_vm_hibernate(self._vm._dom.XMLDesc(0), self._vm.conf) try: self._vm._vmStats.pause() fname = self._vm.cif.prepareVolumePath(self._dst) try: self._vm._dom.save(fname) finally: self._vm.cif.teardownVolumePath(self._dst) except Exception: self._vm._vmStats.cont() raise else: for dev in self._vm._customDevices(): hooks.before_device_migrate_source( dev._deviceXML, self._vm.conf, dev.custom) hooks.before_vm_migrate_source(self._vm._dom.XMLDesc(0), self._vm.conf) response = self.destServer.migrationCreate(self._machineParams) if response['status']['code']: self.status = response raise RuntimeError('migration destination error: ' + response['status']['message']) if config.getboolean('vars', 'ssl'): transport = 'tls' else: transport = 'tcp' duri = 'qemu+%s://%s/system' % (transport, self.remoteHost) if self._vm.conf['_migrationParams']['dstqemu']: muri = 'tcp://%s' % \ self._vm.conf['_migrationParams']['dstqemu'] else: muri = 'tcp://%s' % self.remoteHost self._vm.log.debug('starting migration to %s ' 'with miguri %s', duri, muri) t = DowntimeThread(self._vm, int(self._downtime)) self._monitorThread = MonitorThread(self._vm, startTime) with utils.running(self._monitorThread): try: self._perform_migration(duri, muri) finally: t.cancel()
def _startUnderlyingMigration(self, startTime): if self._mode == 'file': hooks.before_vm_hibernate(self._vm._dom.XMLDesc(0), self._vm.conf) try: self._vm._vmStats.pause() fname = self._vm.cif.prepareVolumePath(self._dst) try: self._vm._dom.save(fname) finally: self._vm.cif.teardownVolumePath(self._dst) except Exception: self._vm._vmStats.cont() raise else: for dev in self._vm._customDevices(): hooks.before_device_migrate_source( dev._deviceXML, self._vm.conf, dev.custom) hooks.before_vm_migrate_source(self._vm._dom.XMLDesc(0), self._vm.conf) response = self.destServer.migrationCreate(self._machineParams) if response['status']['code']: self.status = response raise RuntimeError('migration destination error: ' + response['status']['message']) if config.getboolean('vars', 'ssl'): transport = 'tls' else: transport = 'tcp' duri = 'qemu+%s://%s/system' % (transport, self.remoteHost) if self._vm.conf['_migrationParams']['dstqemu']: muri = 'tcp://%s' % \ self._vm.conf['_migrationParams']['dstqemu'] else: muri = 'tcp://%s' % self.remoteHost self._vm.log.debug('starting migration to %s ' 'with miguri %s', duri, muri) t = DowntimeThread(self._vm, int(self._downtime)) self._monitorThread = MonitorThread(self._vm, startTime) self._monitorThread.start() try: if self._vm.hasSpice and self._vm.conf.get('clientIp'): SPICE_MIGRATION_HANDOVER_TIME = 120 self._vm._reviveTicket(SPICE_MIGRATION_HANDOVER_TIME) maxBandwidth = config.getint('vars', 'migration_max_bandwidth') # FIXME: there still a race here with libvirt, # if we call stop() and libvirt migrateToURI2 didn't start # we may return migration stop but it will start at libvirt # side self._preparingMigrationEvt = False if not self._migrationCanceledEvt: self._vm._dom.migrateToURI2( duri, muri, None, libvirt.VIR_MIGRATE_LIVE | libvirt.VIR_MIGRATE_PEER2PEER | (libvirt.VIR_MIGRATE_TUNNELLED if self._tunneled else 0) | (libvirt.VIR_MIGRATE_ABORT_ON_ERROR if self._abortOnError else 0), None, maxBandwidth) else: self._raiseAbortError() finally: t.cancel() self._monitorThread.stop()
def _startUnderlyingMigration(self, startTime): if self._mode == 'file': hooks.before_vm_hibernate(self._vm._dom.XMLDesc(0), self._vm.conf) try: self._vm._vmStats.pause() fname = self._vm.cif.prepareVolumePath(self._dst) try: self._vm._dom.save(fname) finally: self._vm.cif.teardownVolumePath(self._dst) except Exception: self._vm._vmStats.cont() raise else: for dev in self._vm._customDevices(): hooks.before_device_migrate_source( dev._deviceXML, self._vm.conf, dev.custom) hooks.before_vm_migrate_source(self._vm._dom.XMLDesc(0), self._vm.conf) response = self.destServer.migrationCreate(self._machineParams) if response['status']['code']: self.status = response raise RuntimeError('migration destination error: ' + response['status']['message']) if config.getboolean('vars', 'ssl'): transport = 'tls' else: transport = 'tcp' duri = 'qemu+%s://%s/system' % (transport, self.remoteHost) if self._vm.conf['_migrationParams']['dstqemu']: muri = 'tcp://%s' % \ self._vm.conf['_migrationParams']['dstqemu'] else: muri = 'tcp://%s' % self.remoteHost self._vm.log.debug('starting migration to %s ' 'with miguri %s', duri, muri) t = DowntimeThread(self._vm, int(self._downtime)) if MonitorThread._MIGRATION_MONITOR_INTERVAL: self._monitorThread = MonitorThread(self._vm, startTime) self._monitorThread.start() try: if ('qxl' in self._vm.conf['display'] and self._vm.conf.get('clientIp')): SPICE_MIGRATION_HANDOVER_TIME = 120 self._vm._reviveTicket(SPICE_MIGRATION_HANDOVER_TIME) maxBandwidth = config.getint('vars', 'migration_max_bandwidth') # FIXME: there still a race here with libvirt, # if we call stop() and libvirt migrateToURI2 didn't start # we may return migration stop but it will start at libvirt # side self._preparingMigrationEvt = False if not self._migrationCanceledEvt: self._vm._dom.migrateToURI2( duri, muri, None, libvirt.VIR_MIGRATE_LIVE | libvirt.VIR_MIGRATE_PEER2PEER | (libvirt.VIR_MIGRATE_TUNNELLED if self._tunneled else 0) | (libvirt.VIR_MIGRATE_ABORT_ON_ERROR if self._abortOnError else 0), None, maxBandwidth) else: self._raiseAbortError() finally: t.cancel() if MonitorThread._MIGRATION_MONITOR_INTERVAL: self._monitorThread.stop()