def __init__(self, vm, dst='', dstparams='', mode=MODE_REMOTE, method=METHOD_ONLINE, tunneled=False, dstqemu='', abortOnError=False, consoleAddress=None, compressed=False, autoConverge=False, **kwargs): self.log = vm.log self._vm = vm self._dst = dst self._mode = mode if method != METHOD_ONLINE: self.log.warning( 'migration method %s is deprecated, forced to "online"', method) self._dstparams = dstparams self._machineParams = {} self._tunneled = utils.tobool(tunneled) self._abortOnError = utils.tobool(abortOnError) self._consoleAddress = consoleAddress self._dstqemu = dstqemu self._downtime = kwargs.get('downtime') or \ config.get('vars', 'migration_downtime') self._autoConverge = autoConverge self._compressed = compressed self.status = { 'status': { 'code': 0, 'message': 'Migration in progress'}} self._progress = 0 threading.Thread.__init__(self) self._preparingMigrationEvt = True self._migrationCanceledEvt = False self._monitorThread = None
def __init__(self, vm, dst='', dstparams='', mode=MODE_REMOTE, method=METHOD_ONLINE, tunneled=False, dstqemu='', abortOnError=False, compressed=False, autoConverge=False, **kwargs): self.log = vm.log self._vm = vm self._dst = dst self._mode = mode if method != METHOD_ONLINE: self.log.warning( 'migration method %s is deprecated, forced to "online"', method) self._dstparams = dstparams self._machineParams = {} self._tunneled = utils.tobool(tunneled) self._abortOnError = utils.tobool(abortOnError) self._dstqemu = dstqemu self._downtime = kwargs.get('downtime') or \ config.get('vars', 'migration_downtime') self._autoConverge = autoConverge self._compressed = compressed self.status = { 'status': { 'code': 0, 'message': 'Migration in progress'}} self._progress = 0 threading.Thread.__init__(self) self._preparingMigrationEvt = True self._migrationCanceledEvt = False self._monitorThread = None
def _add_network(network, configurator, nameservers, vlan=None, bonding=None, nic=None, ipaddr=None, netmask=None, mtu=None, gateway=None, dhcpv6=None, ipv6addr=None, ipv6gateway=None, ipv6autoconf=None, bridged=True, _netinfo=None, hostQos=None, defaultRoute=None, blockingdhcp=False, **options): if _netinfo is None: _netinfo = CachingNetInfo() if dhcpv6 is not None: dhcpv6 = utils.tobool(dhcpv6) if ipv6autoconf is not None: ipv6autoconf = utils.tobool(ipv6autoconf) if network == '': raise ConfigNetworkError(ne.ERR_BAD_BRIDGE, 'Empty network names are not valid') logging.debug('Validating network...') if network in _netinfo.networks: raise ConfigNetworkError( ne.ERR_USED_BRIDGE, 'Network already exists (%s)' % (network,)) if bonding: _validate_inter_network_compatibility(_netinfo, vlan, bonding) elif nic: _validate_inter_network_compatibility(_netinfo, vlan, nic) logging.info('Adding network %s with vlan=%s, bonding=%s, nic=%s, ' 'mtu=%s, bridged=%s, defaultRoute=%s, options=%s', network, vlan, bonding, nic, mtu, bridged, defaultRoute, options) bootproto = options.pop('bootproto', None) net_ent = _objectivize_network( bridge=network if bridged else None, vlan_id=vlan, bonding=bonding, nic=nic, mtu=mtu, ipaddr=ipaddr, netmask=netmask, gateway=gateway, bootproto=bootproto, dhcpv6=dhcpv6, blockingdhcp=blockingdhcp, ipv6addr=ipv6addr, ipv6gateway=ipv6gateway, ipv6autoconf=ipv6autoconf, defaultRoute=defaultRoute, nameservers=nameservers, _netinfo=_netinfo, configurator=configurator, opts=options) if bridged and network in _netinfo.bridges: # The bridge already exists, update the configured entity to one level # below and update the mtu of the bridge. # The mtu is updated in the bridge configuration and on all the tap # devices attached to it (for the VMs). # (expecting the bridge running mtu to be updated by the kernel when # the device attached under it has its mtu updated) logging.info('Bridge %s already exists.', network) net_ent_to_configure = net_ent.port _update_mtu_for_an_existing_bridge(network, configurator, mtu) else: net_ent_to_configure = net_ent if net_ent_to_configure is not None: logging.info('Configuring device %s', net_ent_to_configure) net_ent_to_configure.configure(**options) configurator.configureLibvirtNetwork(network, net_ent) if hostQos is not None: configurator.configureQoS(hostQos, net_ent)
def addNetwork(network, vlan=None, bonding=None, nics=None, ipaddr=None, netmask=None, prefix=None, mtu=None, gateway=None, ipv6addr=None, ipv6gateway=None, force=False, configurator=None, bondingOptions=None, bridged=True, _netinfo=None, qosInbound=None, qosOutbound=None, **options): nics = nics or () if _netinfo is None: _netinfo = netinfo.NetInfo() bridged = utils.tobool(bridged) vlan = _vlanToInternalRepresentation(vlan) if mtu: mtu = int(mtu) if prefix: if netmask: raise ConfigNetworkError(ne.ERR_BAD_PARAMS, 'Both PREFIX and NETMASK supplied') else: try: netmask = netinfo.prefix2netmask(int(prefix)) except ValueError as ve: raise ConfigNetworkError(ne.ERR_BAD_ADDR, "Bad prefix: %s" % ve) if not utils.tobool(force): logging.debug('validating network...') if network in _netinfo.networks: raise ConfigNetworkError(ne.ERR_USED_BRIDGE, 'Network already exists') if bonding: _validateInterNetworkCompatibility(_netinfo, vlan, bonding, bridged) else: for nic in nics: _validateInterNetworkCompatibility(_netinfo, vlan, nic, bridged) logging.info("Adding network %s with vlan=%s, bonding=%s, nics=%s," " bondingOptions=%s, mtu=%s, bridged=%s, options=%s", network, vlan, bonding, nics, bondingOptions, mtu, bridged, options) if configurator is None: configurator = Ifcfg() bootproto = options.pop('bootproto', None) defaultRoute = network == constants.MANAGEMENT_NETWORK netEnt = objectivizeNetwork(network if bridged else None, vlan, bonding, bondingOptions, nics, mtu, ipaddr, netmask, gateway, bootproto, ipv6addr, ipv6gateway, _netinfo=_netinfo, configurator=configurator, defaultRoute=defaultRoute, **options) netEnt.configure(**options) configurator.configureLibvirtNetwork(network, netEnt, qosInbound=qosInbound, qosOutbound=qosOutbound)
def getXML(self): """ Create domxml for a graphics framebuffer. <graphics type='spice' port='5900' tlsPort='5901' autoport='yes' listen='0' keymap='en-us' passwdValidTo='1970-01-01T00:00:01'> <listen type='address' address='0'/> <clipboard copypaste='no'/> </graphics> OR <graphics type='vnc' port='5900' autoport='yes' listen='0' keymap='en-us' passwdValidTo='1970-01-01T00:00:01'> <listen type='address' address='0'/> </graphics> """ graphicsAttrs = { 'type': self.device, 'port': self.port, 'autoport': 'yes' } if self.device == 'spice': graphicsAttrs['tlsPort'] = self.tlsPort if not utils.tobool(self.specParams.get('disableTicketing', False)): graphicsAttrs['passwd'] = '*****' graphicsAttrs['passwdValidTo'] = '1970-01-01T00:00:01' if 'keyMap' in self.specParams: graphicsAttrs['keymap'] = self.specParams['keyMap'] graphics = vmxml.Element('graphics', **graphicsAttrs) if not utils.tobool(self.specParams.get('copyPasteEnable', True)): clipboard = vmxml.Element('clipboard', copypaste='no') graphics.appendChild(clipboard) if not utils.tobool(self.specParams.get('fileTransferEnable', True)): filetransfer = vmxml.Element('filetransfer', enable='no') graphics.appendChild(filetransfer) if (self.device == 'spice' and 'spiceSecureChannels' in self.specParams): for chan in self._getSpiceChannels(): graphics.appendChildWithArgs('channel', name=chan, mode='secure') if self.specParams.get('displayNetwork'): graphics.appendChildWithArgs('listen', type='network', network=netinfo.LIBVIRT_NET_PREFIX + self.specParams.get('displayNetwork')) else: graphics.setAttrs(listen='0') return graphics
def appendFeatures(self): """ Add machine features to domain xml. Currently only <features> <acpi/> <features/> for hyperv: <features> <acpi/> <hyperv> <relaxed state='on'/> </hyperv> <features/> """ if (utils.tobool(self.conf.get('acpiEnable', 'true')) or utils.tobool(self.conf.get('hypervEnable', 'false'))): features = self.dom.appendChildWithArgs('features') if utils.tobool(self.conf.get('acpiEnable', 'true')): features.appendChildWithArgs('acpi') if utils.tobool(self.conf.get('hypervEnable', 'false')): hyperv = Element('hyperv') features.appendChild(hyperv) hyperv.appendChildWithArgs('relaxed', state='on')
def __init__(self, vm, dst='', dstparams='', mode=MODE_REMOTE, method=METHOD_ONLINE, tunneled=False, dstqemu='', abortOnError=False, consoleAddress=None, compressed=False, autoConverge=False, **kwargs): self.log = vm.log self._vm = vm self._dst = dst self._mode = mode if method != METHOD_ONLINE: self.log.warning( 'migration method %s is deprecated, forced to "online"', method) self._dstparams = dstparams self._machineParams = {} self._tunneled = utils.tobool(tunneled) self._abortOnError = utils.tobool(abortOnError) self._consoleAddress = consoleAddress self._dstqemu = dstqemu self._downtime = kwargs.get('downtime') or \ config.get('vars', 'migration_downtime') self._maxBandwidth = int( kwargs.get('maxBandwidth') or config.getint('vars', 'migration_max_bandwidth') ) self._autoConverge = autoConverge self._compressed = compressed self.status = { 'status': { 'code': 0, 'message': 'Migration in progress'}} self._progress = 0 threading.Thread.__init__(self) self._preparingMigrationEvt = True self._migrationCanceledEvt = False self._monitorThread = None self._destServer = None progress_timeout = config.getint('vars', 'migration_progress_timeout') self._convergence_schedule = { 'init': [], 'stalling': [ { 'limit': progress_timeout, 'action': { 'name': CONVERGENCE_SCHEDULE_SET_ABORT, 'params': [] } } ] } self._use_convergence_schedule = False if 'convergenceSchedule' in kwargs: self._convergence_schedule = kwargs.get('convergenceSchedule') self._use_convergence_schedule = True self.log.debug('convergence schedule set to: %s', str(self._convergence_schedule))
def __init__(self, vm, dst='', dstparams='', mode=MODE_REMOTE, method=METHOD_ONLINE, tunneled=False, dstqemu='', abortOnError=False, consoleAddress=None, compressed=False, autoConverge=False, **kwargs): self.log = vm.log self._vm = vm self._dst = dst self._mode = mode if method != METHOD_ONLINE: self.log.warning( 'migration method %s is deprecated, forced to "online"', method) self._dstparams = dstparams self._enableGuestEvents = kwargs.get('enableGuestEvents', False) self._machineParams = {} # TODO: utils.tobool shouldn't be used in this constructor, the # conversions should be handled properly in the API layer self._tunneled = utils.tobool(tunneled) self._abortOnError = utils.tobool(abortOnError) self._consoleAddress = consoleAddress self._dstqemu = dstqemu self._downtime = kwargs.get('downtime') or \ config.get('vars', 'migration_downtime') self._maxBandwidth = int( kwargs.get('maxBandwidth') or config.getint('vars', 'migration_max_bandwidth')) self._autoConverge = utils.tobool(autoConverge) self._compressed = utils.tobool(compressed) self._incomingLimit = kwargs.get('incomingLimit') self._outgoingLimit = kwargs.get('outgoingLimit') self.status = { 'status': { 'code': 0, 'message': 'Migration in progress' } } self._progress = 0 self._thread = concurrent.thread(self.run, name='migsrc/' + self._vm.id[:8]) self._preparingMigrationEvt = True self._migrationCanceledEvt = threading.Event() self._monitorThread = None self._destServer = None self._convergence_schedule = {'init': [], 'stalling': []} self._use_convergence_schedule = False if 'convergenceSchedule' in kwargs: self._convergence_schedule = kwargs.get('convergenceSchedule') self._use_convergence_schedule = True self.log.debug('convergence schedule set to: %s', str(self._convergence_schedule))
def getXML(self): """ Create domxml for a graphics framebuffer. <graphics type='spice' port='5900' tlsPort='5901' autoport='yes' listen='0' keymap='en-us' passwdValidTo='1970-01-01T00:00:01'> <listen type='address' address='0'/> <clipboard copypaste='no'/> </graphics> OR <graphics type='vnc' port='5900' autoport='yes' listen='0' keymap='en-us' passwdValidTo='1970-01-01T00:00:01'> <listen type='address' address='0'/> </graphics> """ graphicsAttrs = { 'type': self.device, 'port': self.port, 'autoport': 'yes'} if self.device == 'spice': graphicsAttrs['tlsPort'] = self.tlsPort if not utils.tobool(self.specParams.get('disableTicketing', False)): graphicsAttrs['passwd'] = '*****' graphicsAttrs['passwdValidTo'] = '1970-01-01T00:00:01' if 'keyMap' in self.specParams: graphicsAttrs['keymap'] = self.specParams['keyMap'] graphics = vmxml.Element('graphics', **graphicsAttrs) if not utils.tobool(self.specParams.get('copyPasteEnable', True)): clipboard = vmxml.Element('clipboard', copypaste='no') graphics.appendChild(clipboard) if not utils.tobool(self.specParams.get('fileTransferEnable', True)): filetransfer = vmxml.Element('filetransfer', enable='no') graphics.appendChild(filetransfer) if (self.device == 'spice' and 'spiceSecureChannels' in self.specParams): for chan in self._getSpiceChannels(): graphics.appendChildWithArgs('channel', name=chan, mode='secure') if self.specParams.get('displayNetwork'): graphics.appendChildWithArgs('listen', type='network', network=netinfo.LIBVIRT_NET_PREFIX + self.specParams.get('displayNetwork')) else: graphics.setAttrs(listen='0') return graphics
def addNetwork(network, vlan=None, bonding=None, nics=None, ipaddr=None, netmask=None, prefix=None, mtu=None, gateway=None, force=False, configurator=None, bondingOptions=None, bridged=True, _netinfo=None, qosInbound=None, qosOutbound=None, **options): nics = nics or () if _netinfo is None: _netinfo = netinfo.NetInfo() bridged = utils.tobool(bridged) if mtu: mtu = int(mtu) if prefix: if netmask: raise ConfigNetworkError(ne.ERR_BAD_PARAMS, 'Both PREFIX and NETMASK supplied') else: try: netmask = netinfo.prefix2netmask(int(prefix)) except ValueError as ve: raise ConfigNetworkError(ne.ERR_BAD_ADDR, "Bad prefix: %s" % ve) if not utils.tobool(force): logging.debug('validating network...') if network in _netinfo.networks: raise ConfigNetworkError(ne.ERR_USED_BRIDGE, 'Network already exists') if bonding: _validateInterNetworkCompatibility(_netinfo, vlan, bonding, bridged) else: for nic in nics: _validateInterNetworkCompatibility(_netinfo, vlan, nic, bridged) logging.info("Adding network %s with vlan=%s, bonding=%s, nics=%s," " bondingOptions=%s, mtu=%s, bridged=%s, options=%s", network, vlan, bonding, nics, bondingOptions, mtu, bridged, options) if configurator is None: configurator = Ifcfg() bootproto = options.pop('bootproto', None) netEnt = objectivizeNetwork(network if bridged else None, vlan, bonding, bondingOptions, nics, mtu, ipaddr, netmask, gateway, bootproto, _netinfo, configurator, **options) # libvirt net addition must be done before creation so that on dhcp ifup # the dhcp hook will already see the network as belonging to vdsm. configurator.configureLibvirtNetwork(network, netEnt, qosInbound=qosInbound, qosOutbound=qosOutbound) netEnt.configure(**options)
def __init__(self, vm, dst='', dstparams='', mode=MODE_REMOTE, method=METHOD_ONLINE, tunneled=False, dstqemu='', abortOnError=False, consoleAddress=None, compressed=False, autoConverge=False, **kwargs): self.log = vm.log self._vm = vm self._dst = dst self._mode = mode if method != METHOD_ONLINE: self.log.warning( 'migration method %s is deprecated, forced to "online"', method) self._dstparams = dstparams self._enableGuestEvents = kwargs.get('enableGuestEvents', False) self._machineParams = {} # TODO: utils.tobool shouldn't be used in this constructor, the # conversions should be handled properly in the API layer self._tunneled = utils.tobool(tunneled) self._abortOnError = utils.tobool(abortOnError) self._consoleAddress = consoleAddress self._dstqemu = dstqemu self._downtime = kwargs.get('downtime') or \ config.get('vars', 'migration_downtime') self._maxBandwidth = int( kwargs.get('maxBandwidth') or config.getint('vars', 'migration_max_bandwidth') ) self._autoConverge = utils.tobool(autoConverge) self._compressed = utils.tobool(compressed) self._incomingLimit = kwargs.get('incomingLimit') self._outgoingLimit = kwargs.get('outgoingLimit') self.status = { 'status': { 'code': 0, 'message': 'Migration in progress'}} self._progress = 0 self._thread = concurrent.thread( self.run, name='migsrc/' + self._vm.id[:8]) self._preparingMigrationEvt = True self._migrationCanceledEvt = threading.Event() self._monitorThread = None self._destServer = None self._convergence_schedule = { 'init': [], 'stalling': [] } self._use_convergence_schedule = False if 'convergenceSchedule' in kwargs: self._convergence_schedule = kwargs.get('convergenceSchedule') self._use_convergence_schedule = True self.log.debug('convergence schedule set to: %s', str(self._convergence_schedule))
def delNetwork(network, force=False, configWriter=None, **options): _netinfo = NetInfo() validateBridgeName(network) if network not in networks().keys(): raise ConfigNetworkError(ne.ERR_BAD_BRIDGE, "Cannot delete network %r: It doesn't exist" % network) nics, vlan, bonding = _netinfo.getNicsVlanAndBondingForNetwork(network) bridged = networks()[network]['bridged'] logging.info("Removing network %s with vlan=%s, bonding=%s, nics=%s. options=%s"%(network, vlan, bonding, nics, options)) if not utils.tobool(force): if bonding: validateBondingName(bonding) if set(nics) != set(_netinfo.bondings[bonding]["slaves"]): raise ConfigNetworkError(ne.ERR_BAD_NIC, 'delNetwork: %s are not all nics enslaved to %s' % (nics, bonding)) if vlan: #assertVlan(vlan) validateVlanId(vlan) if bridged: assertBridgeClean(network, vlan, bonding, nics) if configWriter is None: configWriter = ConfigWriter() if not utils.tobool(options.get('skipLibvirt', False)): removeLibvirtNetwork(network) if bridged: configWriter.setNewMtu(network) if network and bridged: ifdown(network) subprocess.call([constants.EXT_BRCTL, 'delbr', network]) configWriter.removeBridge(network) if vlan: vlandev = (bonding or nics[0]) + '.' + vlan ifdown(vlandev) subprocess.call([constants.EXT_VCONFIG, 'rem', vlandev], stderr=subprocess.PIPE) configWriter.removeVlan(vlan, bonding or nics[0]) if bonding: if not bridged or not bondingOtherUsers(network, vlan, bonding): ifdown(bonding) if not bridged or not bondingOtherUsers(network, vlan, bonding): configWriter.removeBonding(bonding) for nic in nics: if not bridged or not nicOtherUsers(network, vlan, bonding, nic): ifdown(nic) if bridged and nicOtherUsers(network, vlan, bonding, nic): continue configWriter.removeNic(nic)
def check(options): if utils.tobool(options.get('connectivityCheck', True)): logging.debug('Checking connectivity...') if not _client_seen(_get_connectivity_timeout(options)): logging.info('Connectivity check failed, rolling back') raise ConfigNetworkError(ne.ERR_LOST_CONNECTION, 'connectivity check failed')
def _getDriverXML(self): driver = vmxml.Element('driver') driverAttrs = {'name': 'qemu'} if self.blockDev: driverAttrs['io'] = 'native' else: driverAttrs['io'] = 'threads' if self.format == 'cow': driverAttrs['type'] = 'qcow2' elif self.format: driverAttrs['type'] = 'raw' if hasattr(self, 'specParams') and ( 'pinToIoThread' in self.specParams): driverAttrs['iothread'] = str(self.specParams['pinToIoThread']) driverAttrs['cache'] = self.cache if (self.propagateErrors == 'on' or utils.tobool(self.propagateErrors)): driverAttrs['error_policy'] = 'enospace' else: driverAttrs['error_policy'] = 'stop' driver.setAttrs(**driverAttrs) return driver
def _canonicalize_switch_type_bond(data): options = data.get('options', '') ovs = utils.rget(bonding.parse_bond_options(options), ('custom', 'ovs')) if utils.tobool(ovs): data['switch'] = 'ovs' elif 'switch' not in data: data['switch'] = 'legacy'
def getXML(self): """ Create domxml for a host device. <devices> <hostdev mode='subsystem' type='pci' managed='no'> <source> <address domain='0x0000' bus='0x06' slot='0x02' function='0x0'/> </source> <boot order='1'/> </hostdev> </devices> """ if (CAPABILITY_TO_XML_ATTR[ self._deviceParams['capability']] == 'pci' and utils.tobool(self.specParams.get('iommuPlaceholder', False))): raise core.SkipDevice hostdev = self.createXmlElem(hwclass.HOSTDEV, None) hostdev.setAttrs( managed='no', mode='subsystem', type=CAPABILITY_TO_XML_ATTR[self._deviceParams['capability']]) source = hostdev.appendChildWithArgs('source') source.appendChildWithArgs('address', **self.hostAddress) if hasattr(self, 'bootOrder'): hostdev.appendChildWithArgs('boot', order=self.bootOrder) if hasattr(self, 'address'): hostdev.appendChildWithArgs('address', **self.address) return hostdev
def _canonicalize_ip_default_route(data): if 'defaultRoute' not in data: data['defaultRoute'] = False custom_default_route = utils.rget(data, ('custom', 'default_route')) if custom_default_route is not None: data['defaultRoute'] = utils.tobool(custom_default_route)
def _delNetwork(network, vlan=None, bonding=None, nics=None, force=False, configurator=None, implicitBonding=True, _netinfo=None, keep_bridge=False, **options): if _netinfo is None: _netinfo = netinfo.NetInfo() if configurator is None: configurator = ConfiguratorClass() if network not in _netinfo.networks: logging.info("Network %r: doesn't exist in libvirt database", network) vlan = _vlanToInternalRepresentation(vlan) _delNonVdsmNetwork(network, vlan, bonding, nics, _netinfo, configurator) return nics, vlan, bonding = _netinfo.getNicsVlanAndBondingForNetwork(network) bridged = _netinfo.networks[network]['bridged'] logging.info("Removing network %s with vlan=%s, bonding=%s, nics=%s," "keep_bridge=%s options=%s", network, vlan, bonding, nics, keep_bridge, options) if not utils.tobool(force): _validateDelNetwork(network, vlan, bonding, nics, bridged, _netinfo) net_ent = _objectivizeNetwork(bridge=network if bridged else None, vlan=vlan, bonding=bonding, nics=nics, _netinfo=_netinfo, configurator=configurator, implicitBonding=implicitBonding) net_ent.ip.bootproto = ('dhcp' if _netinfo.networks[network]['dhcpv4'] else 'none') if bridged and keep_bridge: # we now leave the bridge intact but delete everything underneath it net_ent_to_remove = net_ent.port if net_ent_to_remove is not None: # the configurator will not allow us to remove a bridge interface # (be it vlan, bond or nic) unless it is not used anymore. Since # we are interested to leave the bridge here, we have to disconnect # it from the device so that the configurator will allow its # removal. _disconnect_bridge_port(net_ent.name, net_ent_to_remove.name) else: net_ent_to_remove = net_ent # We must first remove the libvirt network and then the network entity. # Otherwise if we first remove the network entity while the libvirt # network is still up, the network entity (In some flows) thinks that # it still has users and thus does not allow its removal configurator.removeLibvirtNetwork(network) if net_ent_to_remove is not None: logging.info('Removing network entity %s', net_ent_to_remove) net_ent_to_remove.remove() # We must remove the QoS last so that no devices nor networks mark the # QoS as used backing_device = hierarchy_backing_device(net_ent) if (backing_device is not None and os.path.exists(netinfo.NET_PATH + '/' + backing_device.name)): configurator.removeQoS(net_ent)
def _top_dev(network, attrs): if utils.tobool(attrs.get('bridged')): return network # bridgeless nics, vlan, _, bonding = netinfo.cache.NetInfo( netswitch.netinfo()).getNicsVlanAndBondingForNetwork(network) return vlan or bonding or nics[0]
def _check_connectivity(networks, bondings, options, logger): if utils.tobool(options.get('connectivityCheck', True)): logger.debug('Checking connectivity...') if not _clientSeen(_get_connectivity_timeout(options)): logger.info('Connectivity check failed, rolling back') raise ConfigNetworkError(ne.ERR_LOST_CONNECTION, 'connectivity check failed')
def _getDriverXML(drive): driver = vmxml.Element('driver') driverAttrs = {'name': 'qemu'} if drive['diskType'] == DISK_TYPE.BLOCK: driverAttrs['io'] = 'native' else: driverAttrs['io'] = 'threads' if drive['format'] == 'cow': driverAttrs['type'] = 'qcow2' elif drive['format']: driverAttrs['type'] = 'raw' try: driverAttrs['iothread'] = str(drive['specParams']['pinToIoThread']) except KeyError: pass driverAttrs['cache'] = drive['cache'] if (drive['propagateErrors'] == 'on' or utils.tobool(drive['propagateErrors'])): driverAttrs['error_policy'] = 'enospace' else: driverAttrs['error_policy'] = 'stop' driver.setAttrs(**driverAttrs) return driver
def appendClock(self): """ Add <clock> element to domain: <clock offset="variable" adjustment="-3600"> <timer name="rtc" tickpolicy="catchup"> </clock> for hyperv: <clock offset="variable" adjustment="-3600"> <timer name="hypervclock" tickpolicy="catchup"> </clock> """ if utils.tobool(self.conf.get('hypervEnable', 'false')): clockName = 'hypervclock' else: clockName = 'rtc' m = Element('clock', offset='variable', adjustment=str(self.conf.get('timeOffset', 0))) m.appendChildWithArgs('timer', name=clockName, tickpolicy='catchup') m.appendChildWithArgs('timer', name='pit', tickpolicy='delay') if self.arch == caps.Architecture.X86_64: m.appendChildWithArgs('timer', name='hpet', present='no') self.dom.appendChild(m)
def __init__(self, vm, delay, message, timeout, force, event): """ :param vm: Vm undergoing power-down action :param delay: Graceful timeout for the user to close his applications (in seconds). During this time no action is taken. :param message: Message to show the user. :param timeout: Timeout for each power-down method (guestAgent, acpi) until it is considered unsuccessful and the callback chain should try another alternative. :param force: Use forceful power-down if all graceful methods fail? :param event: Event object used to detect successful power-down. """ self.vm = vm self.chain = utils.CallbackChain() self.delay = delay self.message = message self.timeout = timeout self.event = event # first try agent if vm.guestAgent.isResponsive(): self.chain.addCallback(self.guestAgentCallback) # then acpi if enabled if utils.tobool(vm.conf.get('acpiEnable', 'true')): self.chain.addCallback(self.acpiCallback) if force: self.chain.addCallback(self.forceCallback)
def _top_dev(network, attrs): if utils.tobool(attrs.get('bridged')): return network # bridgeless nics, vlan, _, bonding = netinfo.cache.CachingNetInfo().\ getNicsVlanAndBondingForNetwork(network) return vlan or bonding or nics[0]
def __init__(self, vm, delay, message, timeout, force, event): """ :param vm: Vm undergoing power-down action :param delay: Graceful timeout for the user to close his applications (in seconds). During this time no action is taken. :param message: Message to show the user. :param timeout: Timeout for each power-down method (guestAgent, acpi) until it is considered unsuccessful and the callback chain should try another alternative. :param force: Use forceful power-down if all graceful methods fail? :param event: Event object used to detect successful power-down. """ self.vm = vm self.chain = utils.CallbackChain() self.delay = delay self.message = message self.timeout = timeout self.event = event # first try agent if vm.guestAgent and vm.guestAgent.isResponsive(): self.chain.addCallback(self.guestAgentCallback) # then acpi if enabled if utils.tobool(vm.conf.get('acpiEnable', 'true')): self.chain.addCallback(self.acpiCallback) if force: self.chain.addCallback(self.forceCallback)
def getXML(self): """ Create domxml for a host device. <devices> <hostdev mode='subsystem' type='pci' managed='no'> <source> <address domain='0x0000' bus='0x06' slot='0x02' function='0x0'/> </source> <boot order='1'/> </hostdev> </devices> """ if utils.tobool(self.specParams.get('iommuPlaceholder', False)): raise core.SkipDevice hostdev = self.createXmlElem(hwclass.HOSTDEV, None) hostdev.setAttrs(managed='no', mode='subsystem', type='pci') source = hostdev.appendChildWithArgs('source') source.appendChildWithArgs('address', **self.hostAddress) if hasattr(self, 'bootOrder'): hostdev.appendChildWithArgs('boot', order=self.bootOrder) if hasattr(self, 'address'): hostdev.appendChildWithArgs('address', **self.address) return hostdev
def appendClock(self): """ Add <clock> element to domain: <clock offset="variable" adjustment="-3600"> <timer name="rtc" tickpolicy="catchup"> </clock> for hyperv: <clock offset="variable" adjustment="-3600"> <timer name="hypervclock" present="yes"> <timer name="rtc" tickpolicy="catchup"> </clock> """ m = Element('clock', offset='variable', adjustment=str(self.conf.get('timeOffset', 0))) if utils.tobool(self.conf.get('hypervEnable', 'false')): m.appendChildWithArgs('timer', name='hypervclock', present='yes') m.appendChildWithArgs('timer', name='rtc', tickpolicy='catchup') m.appendChildWithArgs('timer', name='pit', tickpolicy='delay') if cpuarch.is_x86(self.arch): m.appendChildWithArgs('timer', name='hpet', present='no') self.dom.appendChild(m)
def editNetwork(oldBridge, newBridge, vlan=None, bonding=None, nics=None, **options): configurator = Ifcfg() try: delNetwork(oldBridge, configurator=configurator, **options) addNetwork(newBridge, vlan=vlan, bonding=bonding, nics=nics, configurator=configurator, **options) except: configurator.rollback() raise if utils.tobool(options.get('connectivityCheck', False)): if not clientSeen( int( options.get('connectivityTimeout', CONNECTIVITY_TIMEOUT_DEFAULT))): delNetwork(newBridge, force=True) configurator.rollback() return define.errCode['noConPeer']['status']['code']
def _delNetwork(network, vlan=None, bonding=None, nics=None, force=False, configurator=None, implicitBonding=True, _netinfo=None, keep_bridge=False, **options): if _netinfo is None: _netinfo = netinfo.NetInfo() if configurator is None: configurator = ConfiguratorClass() if network not in _netinfo.networks: logging.info("Network %r: doesn't exist in libvirt database", network) vlan = _vlanToInternalRepresentation(vlan) _delNonVdsmNetwork(network, vlan, bonding, nics, _netinfo, configurator) return nics, vlan, bonding = _netinfo.getNicsVlanAndBondingForNetwork(network) bridged = _netinfo.networks[network]['bridged'] logging.info("Removing network %s with vlan=%s, bonding=%s, nics=%s," "keep_bridge=%s options=%s", network, vlan, bonding, nics, keep_bridge, options) if not utils.tobool(force): _validateDelNetwork(network, vlan, bonding, nics, bridged, _netinfo) net_ent = _objectivizeNetwork(bridge=network if bridged else None, vlan=vlan, bonding=bonding, nics=nics, _netinfo=_netinfo, configurator=configurator, implicitBonding=implicitBonding) net_ent.ipv4.bootproto = ( 'dhcp' if _netinfo.networks[network]['dhcpv4'] else 'none') if bridged and keep_bridge: # we now leave the bridge intact but delete everything underneath it net_ent_to_remove = net_ent.port if net_ent_to_remove is not None: # the configurator will not allow us to remove a bridge interface # (be it vlan, bond or nic) unless it is not used anymore. Since # we are interested to leave the bridge here, we have to disconnect # it from the device so that the configurator will allow its # removal. _disconnect_bridge_port(net_ent_to_remove.name) else: net_ent_to_remove = net_ent # We must first remove the libvirt network and then the network entity. # Otherwise if we first remove the network entity while the libvirt # network is still up, the network entity (In some flows) thinks that # it still has users and thus does not allow its removal configurator.removeLibvirtNetwork(network) if net_ent_to_remove is not None: logging.info('Removing network entity %s', net_ent_to_remove) net_ent_to_remove.remove() # We must remove the QoS last so that no devices nor networks mark the # QoS as used backing_device = hierarchy_backing_device(net_ent) if (backing_device is not None and os.path.exists(netinfo.NET_PATH + '/' + backing_device.name)): configurator.removeQoS(net_ent)
def __init__(self, conf, log, arch): """ Create the skeleton of a libvirt domain xml <domain type="kvm"> <name>vmName</name> <uuid>9ffe28b6-6134-4b1e-8804-1185f49c436f</uuid> <memory>262144</memory> <currentMemory>262144</currentMemory> <vcpu current='smp'>160</vcpu> <devices> </devices> </domain> """ self.conf = conf self.log = log self.arch = arch self.doc = xml.dom.minidom.Document() if utils.tobool(self.conf.get('kvmEnable', 'true')): domainType = 'kvm' else: domainType = 'qemu' domainAttrs = {'type': domainType} # Hack around libvirt issue BZ#988070, this is going to be removed as # soon as the domain XML format supports the specification of USB # keyboards if self.arch == caps.Architecture.PPC64: domainAttrs['xmlns:qemu'] = \ 'http://libvirt.org/schemas/domain/qemu/1.0' self.dom = Element('domain', **domainAttrs) self.doc.appendChild(self.dom) self.dom.appendChildWithArgs('name', text=self.conf['vmName']) self.dom.appendChildWithArgs('uuid', text=self.conf['vmId']) if 'numOfIoThreads' in self.conf: self.dom.appendChildWithArgs('iothreads', text=str(self.conf['numOfIoThreads'])) memSizeKB = str(int(self.conf.get('memSize', '256')) * 1024) self.dom.appendChildWithArgs('memory', text=memSizeKB) self.dom.appendChildWithArgs('currentMemory', text=memSizeKB) if 'maxMemSize' in self.conf: maxMemSizeKB = str(int(self.conf['maxMemSize']) * 1024) maxMemSlots = str(self.conf.get('maxMemSlots', '16')) self.dom.appendChildWithArgs('maxMemory', text=maxMemSizeKB, slots=maxMemSlots) vcpu = self.dom.appendChildWithArgs('vcpu', text=self._getMaxVCpus()) vcpu.setAttrs(**{'current': self._getSmp()}) self._devices = Element('devices') self.dom.appendChild(self._devices) self.appendMetadata()
def appendClock(self): """ Add <clock> element to domain: <clock offset="variable" adjustment="-3600"> <timer name="rtc" tickpolicy="catchup"> </clock> for hyperv: <clock offset="variable" adjustment="-3600"> <timer name="hypervclock" present="yes"> <timer name="rtc" tickpolicy="catchup"> </clock> """ m = Element('clock', offset='variable', adjustment=str(self.conf.get('timeOffset', 0))) if utils.tobool(self.conf.get('hypervEnable', 'false')): m.appendChildWithArgs('timer', name='hypervclock', present='yes') m.appendChildWithArgs('timer', name='rtc', tickpolicy='catchup') m.appendChildWithArgs('timer', name='pit', tickpolicy='delay') if self.arch == caps.Architecture.X86_64: m.appendChildWithArgs('timer', name='hpet', present='no') self.dom.appendChild(m)
def editNetwork(oldBridge, newBridge, vlan=None, bonding=None, nics=None, **options): with ConfiguratorClass() as configurator: _delNetwork(oldBridge, configurator=configurator, **options) _addNetwork(newBridge, vlan=vlan, bonding=bonding, nics=nics, configurator=configurator, **options) if utils.tobool(options.get("connectivityCheck", False)): if not clientSeen(_get_connectivity_timeout(options)): _delNetwork(newBridge, force=True) raise ConfigNetworkError(ne.ERR_LOST_CONNECTION, "connectivity check failed")
def test_create_fix_param_kvmEnable(self): vmParams = { 'vmId': self.uuid, 'memSize': 8 * 1024, 'vmType': 'kvm', } res = self.vm.create(vmParams) self.assertFalse(response.is_error(res)) self.assertTrue(utils.tobool(vmParams.get('kvmEnable')))
def appendOs(self, use_serial_console=False): """ Add <os> element to domain: <os> <type arch="x86_64" machine="pc">hvm</type> <boot dev="cdrom"/> <kernel>/tmp/vmlinuz-2.6.18</kernel> <initrd>/tmp/initrd-2.6.18.img</initrd> <cmdline>ARGs 1</cmdline> <smbios mode="sysinfo"/> </os> If 'use_serial_console' is true and we are on x86, use the console: <os> ... <bios useserial="yes"/> </os> """ oselem = Element('os') self.dom.appendChild(oselem) DEFAULT_MACHINES = {cpuarch.X86_64: 'pc', cpuarch.PPC64: 'pseries', cpuarch.PPC64LE: 'pseries'} machine = self.conf.get('emulatedMachine', DEFAULT_MACHINES[self.arch]) oselem.appendChildWithArgs('type', text='hvm', arch=self.arch, machine=machine) qemu2libvirtBoot = {'a': 'fd', 'c': 'hd', 'd': 'cdrom', 'n': 'network'} for c in self.conf.get('boot', ''): oselem.appendChildWithArgs('boot', dev=qemu2libvirtBoot[c]) if self.conf.get('initrd'): oselem.appendChildWithArgs('initrd', text=self.conf['initrd']) if self.conf.get('kernel'): oselem.appendChildWithArgs('kernel', text=self.conf['kernel']) if self.conf.get('kernelArgs'): oselem.appendChildWithArgs('cmdline', text=self.conf['kernelArgs']) if cpuarch.is_x86(self.arch): oselem.appendChildWithArgs('smbios', mode='sysinfo') if utils.tobool(self.conf.get('bootMenuEnable', False)): oselem.appendChildWithArgs('bootmenu', enable='yes', timeout=str(_BOOT_MENU_TIMEOUT)) if use_serial_console and cpuarch.is_x86(self.arch): oselem.appendChildWithArgs('bios', useserial='yes')
def _check_connectivity(connectivity_check_networks, networks, bondings, options, logger): if utils.tobool(options.get("connectivityCheck", True)): logger.debug("Checking connectivity...") if not clientSeen(_get_connectivity_timeout(options)): logger.info("Connectivity check failed, rolling back") for network in connectivity_check_networks: # If the new added network was created on top of # existing bond, we need to keep the bond on rollback # flow, else we will break the new created bond. _delNetwork(network, force=True, implicitBonding=networks[network].get("bonding") in bondings) raise ConfigNetworkError(ne.ERR_LOST_CONNECTION, "connectivity check failed")
def __init__(self, *args, **kwargs): super(Console, self).__init__(*args, **kwargs) if not hasattr(self, 'specParams'): self.specParams = {} if utils.tobool(self.specParams.get('enableSocket', False)): self._path = os.path.join( constants.P_OVIRT_VMCONSOLES, self.conf['vmId'] + self.CONSOLE_EXTENSION) else: self._path = None
def editNetwork(oldBridge, newBridge, vlan=None, bonding=None, nics=None, **options): with ConfiguratorClass() as configurator: _delNetwork(oldBridge, configurator=configurator, **options) _addNetwork(newBridge, vlan=vlan, bonding=bonding, nics=nics, configurator=configurator, **options) if utils.tobool(options.get('connectivityCheck', False)): if not clientSeen(_get_connectivity_timeout(options)): _delNetwork(newBridge, bypassValidation=True) raise ConfigNetworkError(ne.ERR_LOST_CONNECTION, 'connectivity check failed')
def __init__(self, *args, **kwargs): super(Console, self).__init__(*args, **kwargs) if not hasattr(self, 'specParams'): self.specParams = {} if utils.tobool(self.specParams.get('enableSocket', False)): self._path = os.path.join( constants.P_OVIRT_VMCONSOLES, self.conf['vmId'] + self.CONSOLE_EXTENSION ) else: self._path = None
def editNetwork(oldBridge, newBridge, vlan=None, bonding=None, nics=None, **options): with ConfiguratorClass() as configurator: delNetwork(oldBridge, configurator=configurator, **options) addNetwork(newBridge, vlan=vlan, bonding=bonding, nics=nics, configurator=configurator, **options) if utils.tobool(options.get('connectivityCheck', False)): if not clientSeen(int(options.get('connectivityTimeout', CONNECTIVITY_TIMEOUT_DEFAULT))): delNetwork(newBridge, force=True) raise ConfigNetworkError(ne.ERR_LOST_CONNECTION, 'connectivity check failed')
def __init__(self, conf, log, arch): """ Create the skeleton of a libvirt domain xml <domain type="kvm"> <name>vmName</name> <uuid>9ffe28b6-6134-4b1e-8804-1185f49c436f</uuid> <memory>262144</memory> <currentMemory>262144</currentMemory> <vcpu current='smp'>160</vcpu> <devices> </devices> </domain> """ self.conf = conf self.log = log self.arch = arch self.doc = xml.dom.minidom.Document() if utils.tobool(self.conf.get('kvmEnable', 'true')): domainType = 'kvm' else: domainType = 'qemu' domainAttrs = {'type': domainType} self.dom = Element('domain', **domainAttrs) self.doc.appendChild(self.dom) self.dom.appendChildWithArgs('name', text=self.conf['vmName']) self.dom.appendChildWithArgs('uuid', text=self.conf['vmId']) if 'numOfIoThreads' in self.conf: self.dom.appendChildWithArgs('iothreads', text=str(self.conf['numOfIoThreads'])) memSizeKB = str(int(self.conf.get('memSize', '256')) * 1024) self.dom.appendChildWithArgs('memory', text=memSizeKB) self.dom.appendChildWithArgs('currentMemory', text=memSizeKB) if 'maxMemSize' in self.conf: maxMemSizeKB = str(int(self.conf['maxMemSize']) * 1024) maxMemSlots = str(self.conf.get('maxMemSlots', '16')) self.dom.appendChildWithArgs('maxMemory', text=maxMemSizeKB, slots=maxMemSlots) vcpu = self.dom.appendChildWithArgs('vcpu', text=self._getMaxVCpus()) vcpu.setAttrs(**{'current': self._getSmp()}) self._devices = Element('devices') self.dom.appendChild(self._devices) self.appendMetadata()
def delNetwork(network, vlan=None, bonding=None, nics=None, force=False, configurator=None, implicitBonding=True, _netinfo=None, **options): if _netinfo is None: _netinfo = netinfo.NetInfo() if configurator is None: configurator = Ifcfg() if network not in _netinfo.networks: logging.info("Network %r: doesn't exist in libvirt database", network) _delNonVdsmNetwork(network, vlan, bonding, nics, _netinfo, configurator) return nics, vlan, bonding = _netinfo.getNicsVlanAndBondingForNetwork(network) bridged = _netinfo.networks[network]['bridged'] logging.info("Removing network %s with vlan=%s, bonding=%s, nics=%s," "options=%s" % (network, vlan, bonding, nics, options)) if not utils.tobool(force): _validateDelNetwork(network, vlan, bonding, nics, bridged, _netinfo) netEnt = objectivizeNetwork(bridge=network if bridged else None, vlan=vlan, bonding=bonding, nics=nics, _netinfo=_netinfo, configurator=configurator, implicitBonding=implicitBonding) netEnt.ip.bootproto = netinfo.getBootProtocol(netEnt.name) # We must first remove the libvirt network and then the network entity. # Otherwise if we first remove the network entity while the libvirt # network is still up, the network entity (In some flows) thinks that # it still has users and thus does not allow its removal configurator.removeLibvirtNetwork(network) netEnt.remove() # We need to gather NetInfo again to refresh networks info from libvirt. # The deleted bridge should never be up at this stage. _netinfo = netinfo.NetInfo() if network in _netinfo.networks: raise ConfigNetworkError( ne.ERR_USED_BRIDGE, 'delNetwork: bridge %s ' 'still exists' % network)
def editNetwork(oldBridge, newBridge, vlan=None, bonding=None, nics=None, **options): configWriter = ConfigWriter() try: delNetwork(oldBridge, configWriter=configWriter, **options) addNetwork(newBridge, vlan=vlan, bonding=bonding, nics=nics, configWriter=configWriter, **options) except: configWriter.restoreAtomicBackup() raise if utils.tobool(options.get('connectivityCheck', False)): if not clientSeen(int(options.get('connectivityTimeout', CONNECTIVITY_TIMEOUT_DEFAULT))): delNetwork(newBridge, force=True) configWriter.restoreAtomicBackup() return define.errCode['noConPeer']['status']['code']
def appendFeatures(self): """ Add machine features to domain xml. Currently only <features> <acpi/> <features/> for hyperv: <features> <acpi/> <hyperv> <relaxed state='on'/> </hyperv> <features/> """ if (utils.tobool(self.conf.get('acpiEnable', 'true')) or utils.tobool(self.conf.get('hypervEnable', 'false'))): features = self.dom.appendChildWithArgs('features') if utils.tobool(self.conf.get('acpiEnable', 'true')): features.appendChildWithArgs('acpi') if utils.tobool(self.conf.get('hypervEnable', 'false')): hyperv = Element('hyperv') features.appendChild(hyperv) hyperv.appendChildWithArgs('relaxed', state='on') # turns off an internal Windows watchdog, and by doing so avoids # some high load BSODs. hyperv.appendChildWithArgs('vapic', state='on') # magic number taken from recomendations. References: # https://bugzilla.redhat.com/show_bug.cgi?id=1083529#c10 # https://bugzilla.redhat.com/show_bug.cgi?id=1053846#c0 hyperv.appendChildWithArgs( 'spinlocks', state='on', retries='8191')
def appendInput(self): """ Add input device. <input bus="ps2" type="mouse"/> """ if utils.tobool(self.conf.get('tabletEnable')): inputAttrs = {'type': 'tablet', 'bus': 'usb'} elif self.arch == caps.Architecture.X86_64: inputAttrs = {'type': 'mouse', 'bus': 'ps2'} else: inputAttrs = {'type': 'mouse', 'bus': 'usb'} self._devices.appendChildWithArgs('input', **inputAttrs)
def __init__(self, conf, log, arch): """ Create the skeleton of a libvirt domain xml <domain type="kvm"> <name>vmName</name> <uuid>9ffe28b6-6134-4b1e-8804-1185f49c436f</uuid> <memory>262144</memory> <currentMemory>262144</currentMemory> <vcpu current='smp'>160</vcpu> <devices> </devices> </domain> """ self.conf = conf self.log = log self.arch = arch self.doc = xml.dom.minidom.Document() if utils.tobool(self.conf.get('kvmEnable', 'true')): domainType = 'kvm' else: domainType = 'qemu' domainAttrs = {'type': domainType} # Hack around libvirt issue BZ#988070, this is going to be removed as # soon as the domain XML format supports the specification of USB # keyboards if self.arch == caps.Architecture.PPC64: domainAttrs['xmlns:qemu'] = \ 'http://libvirt.org/schemas/domain/qemu/1.0' self.dom = Element('domain', **domainAttrs) self.doc.appendChild(self.dom) self.dom.appendChildWithArgs('name', text=self.conf['vmName']) self.dom.appendChildWithArgs('uuid', text=self.conf['vmId']) memSizeKB = str(int(self.conf.get('memSize', '256')) * 1024) self.dom.appendChildWithArgs('memory', text=memSizeKB) self.dom.appendChildWithArgs('currentMemory', text=memSizeKB) vcpu = self.dom.appendChildWithArgs('vcpu', text=self._getMaxVCpus()) vcpu.setAttrs(**{'current': self._getSmp()}) self._devices = Element('devices') self.dom.appendChild(self._devices)
def _check_connectivity(connectivity_check_networks, networks, bondings, options, logger): if utils.tobool(options.get('connectivityCheck', True)): logger.debug('Checking connectivity...') if not clientSeen(_get_connectivity_timeout(options)): logger.info('Connectivity check failed, rolling back') for network in connectivity_check_networks: # If the new added network was created on top of # existing bond, we need to keep the bond on rollback # flow, else we will break the new created bond. _delNetwork(network, bypassValidation=True, implicitBonding=networks[network]. get('bonding') in bondings) raise ConfigNetworkError(ne.ERR_LOST_CONNECTION, 'connectivity check failed')
def _canonize_networks(nets): """ Given networks configuration, explicitly add missing defaults. :param nets: The network configuration """ for attrs in six.itervalues(nets): # If net is marked for removal, normalize the mark to boolean and # ignore all other attributes canonization. if 'remove' in attrs: attrs['remove'] = utils.tobool(attrs['remove']) if attrs['remove']: continue attrs['mtu'] = int(attrs['mtu']) if 'mtu' in attrs else ( mtus.DEFAULT_MTU)
def __init__(self, vm, dst='', dstparams='', mode='remote', method='online', tunneled=False, dstqemu='', abortOnError=False, **kwargs): self.log = vm.log self._vm = vm self._dst = dst self._mode = mode self._method = method self._dstparams = dstparams self._machineParams = {} self._tunneled = utils.tobool(tunneled) self._abortOnError = utils.tobool(abortOnError) self._dstqemu = dstqemu self._downtime = kwargs.get('downtime') or \ config.get('vars', 'migration_downtime') self.status = { 'status': { 'code': 0, 'message': 'Migration in progress'}, 'progress': 0} threading.Thread.__init__(self) self._preparingMigrationEvt = True self._migrationCanceledEvt = False self._monitorThread = None
def appendOs(self): """ Add <os> element to domain: <os> <type arch="x86_64" machine="pc">hvm</type> <boot dev="cdrom"/> <kernel>/tmp/vmlinuz-2.6.18</kernel> <initrd>/tmp/initrd-2.6.18.img</initrd> <cmdline>ARGs 1</cmdline> <smbios mode="sysinfo"/> </os> """ oselem = Element('os') self.dom.appendChild(oselem) DEFAULT_MACHINES = { caps.Architecture.X86_64: 'pc', caps.Architecture.PPC64: 'pseries', caps.Architecture.PPC64LE: 'pseries' } machine = self.conf.get('emulatedMachine', DEFAULT_MACHINES[self.arch]) oselem.appendChildWithArgs('type', text='hvm', arch=self.arch, machine=machine) qemu2libvirtBoot = {'a': 'fd', 'c': 'hd', 'd': 'cdrom', 'n': 'network'} for c in self.conf.get('boot', ''): oselem.appendChildWithArgs('boot', dev=qemu2libvirtBoot[c]) if self.conf.get('initrd'): oselem.appendChildWithArgs('initrd', text=self.conf['initrd']) if self.conf.get('kernel'): oselem.appendChildWithArgs('kernel', text=self.conf['kernel']) if self.conf.get('kernelArgs'): oselem.appendChildWithArgs('cmdline', text=self.conf['kernelArgs']) if self.arch == caps.Architecture.X86_64: oselem.appendChildWithArgs('smbios', mode='sysinfo') if utils.tobool(self.conf.get('bootMenuEnable', False)): oselem.appendChildWithArgs('bootmenu', enable='yes')
def getXML(self): """ Create domxml for disk/cdrom/floppy. <disk type='file' device='disk' snapshot='no'> <driver name='qemu' type='qcow2' cache='none'/> <source file='/path/to/image'/> <target dev='hda' bus='ide'/> <serial>54-a672-23e5b495a9ea</serial> </disk> """ self._validate() diskelem = self.createXmlElem('disk', self.diskType, ['device', 'address', 'sgio']) diskelem.setAttrs(snapshot='no') diskelem.appendChild(_getSourceXML(self)) if self.diskType == DISK_TYPE.NETWORK and hasattr(self, 'auth'): diskelem.appendChild(self._getAuthXML()) diskelem.appendChild(self._getTargetXML()) if self.extSharedState == DRIVE_SHARED_TYPE.SHARED: diskelem.appendChildWithArgs('shareable') if hasattr(self, 'readonly') and utils.tobool(self.readonly): diskelem.appendChildWithArgs('readonly') elif self.device == 'floppy' and not hasattr(self, 'readonly'): # floppies are used only internally for sysprep, so # they are readonly unless explicitely stated otherwise diskelem.appendChildWithArgs('readonly') if hasattr(self, 'serial') and self.device != 'lun': diskelem.appendChildWithArgs('serial', text=self.serial) if hasattr(self, 'bootOrder'): diskelem.appendChildWithArgs('boot', order=self.bootOrder) if self.device == 'disk' or self.device == 'lun': diskelem.appendChild(_getDriverXML(self)) if hasattr(self, 'specParams') and 'ioTune' in self.specParams: diskelem.appendChild(self._getIotuneXML()) return diskelem
def delNetwork(network, vlan=None, bonding=None, nics=None, force=False, configurator=None, implicitBonding=True, _netinfo=None, **options): if _netinfo is None: _netinfo = netinfo.NetInfo() if configurator is None: configurator = ConfiguratorClass() if network not in _netinfo.networks: logging.info("Network %r: doesn't exist in libvirt database", network) vlan = _vlanToInternalRepresentation(vlan) _delNonVdsmNetwork(network, vlan, bonding, nics, _netinfo, configurator) return nics, vlan, bonding = _netinfo.getNicsVlanAndBondingForNetwork(network) bridged = _netinfo.networks[network]['bridged'] logging.info("Removing network %s with vlan=%s, bonding=%s, nics=%s," "options=%s" % (network, vlan, bonding, nics, options)) if not utils.tobool(force): _validateDelNetwork(network, vlan, bonding, nics, bridged, _netinfo) netEnt = objectivizeNetwork(bridge=network if bridged else None, vlan=vlan, bonding=bonding, nics=nics, _netinfo=_netinfo, configurator=configurator, implicitBonding=implicitBonding) netEnt.ip.bootproto = netinfo.getBootProtocol(netEnt.name) # We must first remove the libvirt network and then the network entity. # Otherwise if we first remove the network entity while the libvirt # network is still up, the network entity (In some flows) thinks that # it still has users and thus does not allow its removal configurator.removeLibvirtNetwork(network) netEnt.remove()