def _startUnderlyingMigration(self, startTime, migrationParams, machineParams): if self.hibernating: self._started = True self._vm.hibernate(self._dst) else: self._vm.prepare_migration() # Do not measure the time spent for creating the VM on the # destination. In some cases some expensive operations can cause # the migration to get cancelled right after the transfer started. destCreateStartTime = time.time() result = self._destServer.migrationCreate(machineParams, self._incomingLimit) destCreationTime = time.time() - destCreateStartTime startTime += destCreationTime self.log.info('Creation of destination VM took: %d seconds', destCreationTime) if response.is_error(result): self.status = result if response.is_error(result, 'migrateLimit'): raise MigrationLimitExceeded() else: raise MigrationDestinationSetupError( 'migration destination error: ' + result['status']['message']) self._started = True if config.getboolean('vars', 'ssl'): transport = 'tls' else: transport = 'tcp' duri = 'qemu+{}://{}/system'.format( transport, normalize_literal_addr(self.remoteHost)) dstqemu = migrationParams['dstqemu'] if dstqemu: muri = 'tcp://{}'.format( normalize_literal_addr(dstqemu)) else: muri = 'tcp://{}'.format( normalize_literal_addr(self.remoteHost)) self._vm.log.info('starting migration to %s ' 'with miguri %s', duri, muri) self._monitorThread = MonitorThread(self._vm, startTime, self._convergence_schedule, self._use_convergence_schedule) if self._use_convergence_schedule: self._perform_with_conv_schedule(duri, muri) else: self._perform_with_downtime_thread(duri, muri) self.log.info("migration took %d seconds to complete", (time.time() - startTime) + destCreationTime)
def _startUnderlyingMigration(self, startTime, migrationParams, machineParams): if self.hibernating: self._started = True self._vm.hibernate(self._dst) else: self._vm.prepare_migration() # Do not measure the time spent for creating the VM on the # destination. In some cases some expensive operations can cause # the migration to get cancelled right after the transfer started. destCreateStartTime = time.time() result = self._destServer.migrationCreate(machineParams, self._incomingLimit) destCreationTime = time.time() - destCreateStartTime startTime += destCreationTime self.log.info('Creation of destination VM took: %d seconds', destCreationTime) if response.is_error(result): self.status = result if response.is_error(result, 'migrateLimit'): raise MigrationLimitExceeded() else: raise MigrationDestinationSetupError( 'migration destination error: ' + result['status']['message']) self._started = True if config.getboolean('vars', 'ssl'): transport = 'tls' else: transport = 'tcp' duri = 'qemu+{}://{}/system'.format( transport, normalize_literal_addr(self.remoteHost)) dstqemu = migrationParams['dstqemu'] if dstqemu: muri = 'tcp://{}'.format( normalize_literal_addr(dstqemu)) else: muri = 'tcp://{}'.format( normalize_literal_addr(self.remoteHost)) self._vm.log.info('starting migration to %s ' 'with miguri %s', duri, muri) self._monitorThread = MonitorThread(self._vm, startTime, self._convergence_schedule, self._use_convergence_schedule) if self._use_convergence_schedule: self._perform_with_conv_schedule(duri, muri) else: self._perform_with_downtime_thread(duri, muri) self.log.info("migration took %d seconds to complete", (time.time() - startTime) + destCreationTime)
def _migration_params(self, muri): params = {libvirt.VIR_MIGRATE_PARAM_BANDWIDTH: self._maxBandwidth} if not self.tunneled: params[libvirt.VIR_MIGRATE_PARAM_URI] = str(muri) if self._consoleAddress: graphics = 'spice' if self._vm.hasSpice else 'vnc' params[libvirt.VIR_MIGRATE_PARAM_GRAPHICS_URI] = str( '%s://%s' % (graphics, self._consoleAddress)) if self._encrypted: # Use the standard host name or IP address when checking # the remote certificate. Not the migration destination, # which may be e.g. an IP address from a migration # network, not present in the certificate. params[libvirt.VIR_MIGRATE_PARAM_TLS_DESTINATION] = \ normalize_literal_addr(self.remoteHost) # REQUIRED_FOR: destination Vdsm < 4.3 if self._legacy_payload_path is not None: alias, path = self._legacy_payload_path dom = xmlutils.fromstring(self._vm.migratable_domain_xml()) source = dom.find(".//alias[@name='%s']/../source" % (alias, )) source.set('file', path) xml = xmlutils.tostring(dom) self._vm.log.debug("Migrating domain XML: %s", xml) params[libvirt.VIR_MIGRATE_PARAM_DEST_XML] = xml return params
def _migration_params(self, muri): params = {} if self._maxBandwidth: params[libvirt.VIR_MIGRATE_PARAM_BANDWIDTH] = self._maxBandwidth if self._parallel is not None: params[libvirt.VIR_MIGRATE_PARAM_PARALLEL_CONNECTIONS] = \ self._parallel if not self.tunneled: params[libvirt.VIR_MIGRATE_PARAM_URI] = str(muri) if self._consoleAddress: graphics = 'spice' if self._vm.hasSpice else 'vnc' params[libvirt.VIR_MIGRATE_PARAM_GRAPHICS_URI] = str( '%s://%s' % (graphics, self._consoleAddress) ) if self._encrypted: # Use the standard host name or IP address when checking # the remote certificate. Not the migration destination, # which may be e.g. an IP address from a migration # network, not present in the certificate. params[libvirt.VIR_MIGRATE_PARAM_TLS_DESTINATION] = \ normalize_literal_addr(self.remoteHost) xml = self._vm.migratable_domain_xml() # REQUIRED_FOR: destination Vdsm < 4.3 if self._legacy_payload_path is not None: alias, path = self._legacy_payload_path dom = xmlutils.fromstring(xml) source = dom.find(".//alias[@name='%s']/../source" % (alias,)) source.set('file', path) xml = xmlutils.tostring(dom) # Remove & replace CPU pinning added by VDSM dom = xmlutils.fromstring(xml) cputune = dom.find('cputune') if cputune is not None: for vcpu in vmxml.find_all(cputune, 'vcpupin'): vcpu_id = int(vcpu.get('vcpu')) if (self._vm.cpu_policy() == cpumanagement.CPU_POLICY_MANUAL and vcpu_id in self._vm.manually_pinned_cpus()): continue cputune.remove(vcpu) if self._destination_cpusets is not None: if cputune is None: cputune = xml.etree.ElementTree.Element('cputune') dom.append(cputune) for vcpupin in self._destination_cpusets: cputune.append(vcpupin) xml = xmlutils.tostring(dom) self._vm.log.debug("Migrating domain XML: %s", xml) params[libvirt.VIR_MIGRATE_PARAM_DEST_XML] = xml return params
def _migration_params(self, muri): params = {} if self._maxBandwidth: params[libvirt.VIR_MIGRATE_PARAM_BANDWIDTH] = self._maxBandwidth if self._parallel is not None: params[libvirt.VIR_MIGRATE_PARAM_PARALLEL_CONNECTIONS] = \ self._parallel if not self.tunneled: params[libvirt.VIR_MIGRATE_PARAM_URI] = str(muri) if self._consoleAddress: graphics = 'spice' if self._vm.hasSpice else 'vnc' params[libvirt.VIR_MIGRATE_PARAM_GRAPHICS_URI] = str( '%s://%s' % (graphics, self._consoleAddress)) if self._encrypted: # Use the standard host name or IP address when checking # the remote certificate. Not the migration destination, # which may be e.g. an IP address from a migration # network, not present in the certificate. params[libvirt.VIR_MIGRATE_PARAM_TLS_DESTINATION] = \ normalize_literal_addr(self.remoteHost) xml = self._vm.migratable_domain_xml() # REQUIRED_FOR: destination Vdsm < 4.3 dom = xmlutils.fromstring(xml) if self._legacy_payload_path is not None: alias, path = self._legacy_payload_path source = dom.find(".//alias[@name='%s']/../source" % (alias, )) source.set('file', path) # Remove & replace CPU pinning added by VDSM dom = cpumanagement.replace_cpu_pinning(self._vm, dom, self._destination_cpusets) if self._destination_numa_nodesets is not None: numatune = dom.find('numatune') if numatune is not None: for memnode in vmxml.find_all(numatune, 'memnode'): cellid = int(memnode.get('cellid')) if (cellid >= 0 and cellid < len(self._destination_numa_nodesets)): memnode.set('nodeset', self._destination_numa_nodesets[cellid]) xml = xmlutils.tostring(dom) self._vm.log.debug("Migrating domain XML: %s", xml) params[libvirt.VIR_MIGRATE_PARAM_DEST_XML] = xml return params
def _startUnderlyingMigration(self, startTime, machineParams): if self.hibernating: self._started = True self._vm.hibernate(self._dst) else: self._vm.prepare_migration() # Do not measure the time spent for creating the VM on the # destination. In some cases some expensive operations can cause # the migration to get cancelled right after the transfer started. destCreateStartTime = time.time() result = self._destServer.migrationCreate(machineParams, self._incomingLimit) destCreationTime = time.time() - destCreateStartTime startTime += destCreationTime self.log.info('Creation of destination VM took: %d seconds', destCreationTime) if response.is_error(result): self.status = result if response.is_error(result, 'migrateLimit'): raise MigrationLimitExceeded() else: raise MigrationDestinationSetupError( 'migration destination error: ' + result['status']['message']) self._started = True # REQUIRED_FOR: destination Vdsm < 4.3 if not self._vm.min_cluster_version(4, 3): payload_drives = self._vm.payload_drives() if payload_drives: # Currently, only a single payload device may be present payload_alias = payload_drives[0].alias result = self._destServer.fullList(vmList=(self._vm.id, )) vm_list = result.get('items') remote_devices = vm_list[0].get('devices') if remote_devices is not None: payload_path = next( (d['path'] for d in remote_devices if d.get('alias') == payload_alias), None) if payload_path is not None: self._legacy_payload_path = \ (payload_alias, payload_path) if config.getboolean('vars', 'ssl'): transport = 'tls' else: transport = 'tcp' duri = 'qemu+{}://{}/system'.format( transport, normalize_literal_addr(self.remoteHost)) if self._encrypted: # TODO: Stop using host names here and set the host # name based certificate verification parameter once # the corresponding functionality is available in # libvirt, see https://bugzilla.redhat.com/1754533 # # When an encrypted migration is requested, we must # use the host name (stored in 'dst') rather than the # IP address (stored in 'dstqemu') in order to match # the target certificate. That means that encrypted # migrations are incompatible with setups that require # an IP address to identify the host properly, such as # when a separate migration network should be used or # when using IPv4/IPv6 dual stack configurations. dstqemu = self.remoteHost else: dstqemu = self._dstqemu if dstqemu: muri = 'tcp://{}'.format(normalize_literal_addr(dstqemu)) else: muri = 'tcp://{}'.format( normalize_literal_addr(self.remoteHost)) self._vm.log.info('starting migration to %s ' 'with miguri %s', duri, muri) self._monitorThread = MonitorThread(self._vm, startTime, self._convergence_schedule) self._perform_with_conv_schedule(duri, muri) self.log.info("migration took %d seconds to complete", (time.time() - startTime) + destCreationTime)
def _startUnderlyingMigration(self, startTime, machineParams): if self.hibernating: self._switch_state(State.STARTED) self._vm.hibernate(self._dst) else: self._vm.prepare_migration() self._switch_state(State.PREPARED) # Do not measure the time spent for creating the VM on the # destination. In some cases some expensive operations can cause # the migration to get cancelled right after the transfer started. destCreateStartTime = time.time() result = self._destServer.migrationCreate(machineParams, self._incomingLimit) destCreationTime = time.time() - destCreateStartTime startTime += destCreationTime self.log.info('Creation of destination VM took: %d seconds', destCreationTime) if response.is_error(result): self.status = result if response.is_error(result, 'migrateLimit'): raise MigrationLimitExceeded() else: raise MigrationDestinationSetupError( 'migration destination error: ' + result['status']['message']) self._switch_state(State.STARTED) # REQUIRED_FOR: destination Vdsm < 4.3 if not self._vm.min_cluster_version(4, 3): payload_drives = self._vm.payload_drives() if payload_drives: # Currently, only a single payload device may be present payload_alias = payload_drives[0].alias result = self._destServer.fullList(vmList=(self._vm.id, )) vm_list = result.get('items') remote_devices = vm_list[0].get('devices') if remote_devices is not None: payload_path = next( (d['path'] for d in remote_devices if d.get('alias') == payload_alias), None) if payload_path is not None: self._legacy_payload_path = \ (payload_alias, payload_path) if config.getboolean('vars', 'ssl'): transport = 'tls' else: transport = 'tcp' duri = 'qemu+{}://{}/system'.format( transport, normalize_literal_addr(self.remoteHost)) dstqemu = self._dstqemu if dstqemu: muri = 'tcp://{}'.format(normalize_literal_addr(dstqemu)) else: muri = 'tcp://{}'.format( normalize_literal_addr(self.remoteHost)) self._vm.log.info('starting migration to %s ' 'with miguri %s', duri, muri) self._monitorThread = MonitorThread(self._vm, startTime, self._convergence_schedule) self._perform_with_conv_schedule(duri, muri) self.log.info("migration took %d seconds to complete", (time.time() - startTime) + destCreationTime)
def test_literal_ipv6_already_literal(self): self.assertEqual('[2001::1]', ipaddress.normalize_literal_addr('[2001::1]'))
def test_literal_namedhost(self): self.assertEqual('namedhost', ipaddress.normalize_literal_addr('namedhost'))
def test_literal_ipv4_addr(self): self.assertEqual('1.2.3.4', ipaddress.normalize_literal_addr('1.2.3.4'))