def __init__(self, vm, dst='', dstparams='', mode=MODE_REMOTE, method=METHOD_ONLINE, tunneled=False, dstqemu='', abortOnError=False, consoleAddress=None, compressed=False, autoConverge=False, recovery=False, **kwargs): self.log = vm.log self._vm = vm self._dst = dst self._mode = mode if method != METHOD_ONLINE: self.log.warning( 'migration method %s is deprecated, forced to "online"', method) self._dstparams = dstparams self._enableGuestEvents = kwargs.get('enableGuestEvents', False) self._machineParams = {} # TODO: conv.tobool shouldn't be used in this constructor, the # conversions should be handled properly in the API layer self._tunneled = conv.tobool(tunneled) self._abortOnError = conv.tobool(abortOnError) self._consoleAddress = consoleAddress self._dstqemu = dstqemu self._downtime = kwargs.get('downtime') or \ config.get('vars', 'migration_downtime') self._maxBandwidth = int( kwargs.get('maxBandwidth') or config.getint('vars', 'migration_max_bandwidth') ) self._autoConverge = conv.tobool(autoConverge) self._compressed = conv.tobool(compressed) self._incomingLimit = kwargs.get('incomingLimit') self._outgoingLimit = kwargs.get('outgoingLimit') self.status = { 'status': { 'code': 0, 'message': 'Migration in progress'}} # we need to guard against concurrent updates only self._lock = threading.Lock() self._progress = 0 self._thread = concurrent.thread( self.run, name='migsrc/' + self._vm.id[:8]) self._preparingMigrationEvt = True self._migrationCanceledEvt = threading.Event() self._monitorThread = None self._destServer = None self._convergence_schedule = { 'init': [], 'stalling': [] } self._use_convergence_schedule = False if 'convergenceSchedule' in kwargs: self._convergence_schedule = kwargs.get('convergenceSchedule') self._use_convergence_schedule = True self.log.debug('convergence schedule set to: %s', str(self._convergence_schedule)) self._started = False self._failed = False self._recovery = recovery
def __init__(self, vm, dst='', dstparams='', mode=MODE_REMOTE, method=METHOD_ONLINE, tunneled=False, dstqemu='', abortOnError=False, consoleAddress=None, compressed=False, autoConverge=False, recovery=False, **kwargs): self.log = vm.log self._vm = vm self._dst = dst self._mode = mode if method != METHOD_ONLINE: self.log.warning( 'migration method %s is deprecated, forced to "online"', method) self._dstparams = dstparams self._enableGuestEvents = kwargs.get('enableGuestEvents', False) self._machineParams = {} # TODO: conv.tobool shouldn't be used in this constructor, the # conversions should be handled properly in the API layer self._tunneled = conv.tobool(tunneled) self._abortOnError = conv.tobool(abortOnError) self._consoleAddress = consoleAddress self._dstqemu = dstqemu self._downtime = kwargs.get('downtime') or \ config.get('vars', 'migration_downtime') self._maxBandwidth = int( kwargs.get('maxBandwidth') or config.getint('vars', 'migration_max_bandwidth') ) self._autoConverge = conv.tobool(autoConverge) self._compressed = conv.tobool(compressed) self._incomingLimit = kwargs.get('incomingLimit') self._outgoingLimit = kwargs.get('outgoingLimit') self.status = { 'status': { 'code': 0, 'message': 'Migration in progress'}} # we need to guard against concurrent updates only self._lock = threading.Lock() self._progress = 0 self._thread = concurrent.thread( self.run, name='migsrc/' + self._vm.id[:8]) self._preparingMigrationEvt = True self._migrationCanceledEvt = threading.Event() self._monitorThread = None self._destServer = None self._convergence_schedule = { 'init': [], 'stalling': [] } self._use_convergence_schedule = False if 'convergenceSchedule' in kwargs: self._convergence_schedule = kwargs.get('convergenceSchedule') self._use_convergence_schedule = True self.log.debug('convergence schedule set to: %s', str(self._convergence_schedule)) self._started = False self._recovery = recovery
def _add_network(network, configurator, _netinfo, bondattr, nameservers, vlan=None, bonding=None, nic=None, ipaddr=None, netmask=None, mtu=None, gateway=None, dhcpv6=None, ipv6addr=None, ipv6gateway=None, ipv6autoconf=None, bridged=True, hostQos=None, defaultRoute=None, blockingdhcp=False, **options): if dhcpv6 is not None: dhcpv6 = tobool(dhcpv6) if ipv6autoconf is not None: ipv6autoconf = tobool(ipv6autoconf) if network == '': raise ConfigNetworkError(ne.ERR_BAD_BRIDGE, 'Empty network names are not valid') logging.debug('Validating network...') if network in _netinfo.networks: raise ConfigNetworkError( ne.ERR_USED_BRIDGE, 'Network already exists (%s)' % (network,)) logging.info('Adding network %s with vlan=%s, bonding=%s, nic=%s, ' 'mtu=%s, bridged=%s, defaultRoute=%s, options=%s', network, vlan, bonding, nic, mtu, bridged, defaultRoute, options) bootproto = options.pop('bootproto', None) net_ent = _objectivize_network( bridge=network if bridged else None, vlan_id=vlan, bonding=bonding, bondattr=bondattr, nic=nic, mtu=mtu, ipaddr=ipaddr, netmask=netmask, gateway=gateway, bootproto=bootproto, dhcpv6=dhcpv6, blockingdhcp=blockingdhcp, ipv6addr=ipv6addr, ipv6gateway=ipv6gateway, ipv6autoconf=ipv6autoconf, defaultRoute=defaultRoute, nameservers=nameservers, _netinfo=_netinfo, configurator=configurator, opts=options) if bridged and network in _netinfo.bridges and configurator.owned_device( network): # The bridge already exists, update the configured entity to one level # below and update the mtu of the bridge. # The mtu is updated in the bridge configuration and on all the tap # devices attached to it (for the VMs). # (expecting the bridge running mtu to be updated by the kernel when # the device attached under it has its mtu updated) logging.info('Bridge %s already exists.', network) net_ent_to_configure = net_ent.port _update_mtu_for_an_existing_bridge(network, configurator, mtu) else: net_ent_to_configure = net_ent if net_ent_to_configure is not None: logging.info('Configuring device %s', net_ent_to_configure) net_ent_to_configure.configure(**options) if hostQos is not None: configurator.configureQoS(hostQos, net_ent)
def _canonicalize_ip_default_route(nets): default_route_nets = [] for net, data in _entities_to_canonicalize(nets): if 'defaultRoute' not in data: data['defaultRoute'] = False custom_default_route = _rget(data, ('custom', 'default_route')) if custom_default_route is not None: logging.warning('Custom property default_route is deprecated. ' 'please use default route role.') data['defaultRoute'] = tobool(custom_default_route) if data['defaultRoute']: default_route_nets.append(net) if len(default_route_nets) > 1: raise ne.ConfigNetworkError( ne.ERR_BAD_PARAMS, 'Only a single default route network is allowed.', ) elif default_route_nets: existing_net_with_default_route = _net_with_default_route_from_config() if existing_net_with_default_route: netname, attrs = existing_net_with_default_route if netname not in nets: # Copy config from running and add to setup attrs.pop('nameservers', None) nets[netname] = attrs nets[netname]['defaultRoute'] = False
def _canonicalize_switch_type_bond(data): options = data.get('options', '') ovs = _rget(bonding.parse_bond_options(options), ('custom', 'ovs')) if tobool(ovs): data['switch'] = 'ovs' elif 'switch' not in data: data['switch'] = 'legacy'
def getXML(self): """ Create domxml for a host device. <devices> <hostdev mode='subsystem' type='pci' managed='no'> <source> <address domain='0x0000' bus='0x06' slot='0x02' function='0x0'/> </source> <boot order='1'/> </hostdev> </devices> """ if conv.tobool(self.specParams.get('iommuPlaceholder', False)): raise core.SkipDevice hostdev = self.createXmlElem(hwclass.HOSTDEV, None) hostdev.setAttrs(managed='no', mode='subsystem', type='pci') source = hostdev.appendChildWithArgs('source') source.appendChildWithArgs( 'address', **_normalize_pci_address(**self.hostAddress)) if hasattr(self, 'bootOrder'): hostdev.appendChildWithArgs('boot', order=self.bootOrder) if hasattr(self, 'address'): hostdev.appendChildWithArgs( 'address', **_normalize_pci_address(**self.address)) return hostdev
def _canonicalize_ip_default_route(data): if 'defaultRoute' not in data: data['defaultRoute'] = False custom_default_route = _rget(data, ('custom', 'default_route')) if custom_default_route is not None: data['defaultRoute'] = tobool(custom_default_route)
def getXML(self): """ Create domxml for a host device. <devices> <hostdev mode='subsystem' type='pci' managed='no'> <source> <address domain='0x0000' bus='0x06' slot='0x02' function='0x0'/> </source> <boot order='1'/> </hostdev> </devices> """ if conv.tobool(self.specParams.get('iommuPlaceholder', False)): raise core.SkipDevice hostdev = self.createXmlElem(hwclass.HOSTDEV, None) hostdev.setAttrs(managed='no', mode='subsystem', type='pci') source = hostdev.appendChildWithArgs('source') source.appendChildWithArgs('address', **self.hostAddress) if hasattr(self, 'bootOrder'): hostdev.appendChildWithArgs('boot', order=self.bootOrder) if hasattr(self, 'address'): hostdev.appendChildWithArgs('address', **self.address) return hostdev
def __init__(self, vm, delay, message, timeout, force, event): """ :param vm: Vm undergoing power-down action :param delay: Graceful timeout for the user to close his applications (in seconds). During this time no action is taken. :param message: Message to show the user. :param timeout: Timeout for each power-down method (guestAgent, acpi) until it is considered unsuccessful and the callback chain should try another alternative. :param force: Use forceful power-down if all graceful methods fail? :param event: Event object used to detect successful power-down. """ self.vm = vm self.chain = utils.CallbackChain() self.delay = delay self.message = message self.timeout = timeout self.event = event # first try agent if vm.guestAgent.isResponsive(): self.chain.addCallback(self.guestAgentCallback) # then acpi if enabled if conv.tobool(vm.conf.get('acpiEnable', 'true')): self.chain.addCallback(self.acpiCallback) if force: self.chain.addCallback(self.forceCallback)
def _canonicalize_ip_default_route(nets): default_route_nets = [] for net, data in _entities_to_canonicalize(nets): if 'defaultRoute' not in data: data['defaultRoute'] = False custom_default_route = _rget(data, ('custom', 'default_route')) if custom_default_route is not None: logging.warning('Custom property default_route is deprecated. ' 'please use default route role.') data['defaultRoute'] = tobool(custom_default_route) if data['defaultRoute']: default_route_nets.append(net) if len(default_route_nets) > 1: raise ne.ConfigNetworkError( ne.ERR_BAD_PARAMS, 'Only a single default route network is allowed.') elif default_route_nets: existing_net_with_default_route = _net_with_default_route_from_config() if existing_net_with_default_route: netname, attrs = existing_net_with_default_route if netname not in nets: # Copy config from running and add to setup attrs.pop('nameservers', None) nets[netname] = attrs nets[netname]['defaultRoute'] = False
def _getDriverXML(drive): driver = vmxml.Element('driver') driverAttrs = {'name': 'qemu'} if drive['diskType'] == DISK_TYPE.BLOCK: driverAttrs['io'] = 'native' else: driverAttrs['io'] = 'threads' if drive['format'] == 'cow': driverAttrs['type'] = 'qcow2' elif drive['format']: driverAttrs['type'] = 'raw' if 'discard' in drive and drive['discard']: driverAttrs['discard'] = 'unmap' try: driverAttrs['iothread'] = str(drive['specParams']['pinToIoThread']) except KeyError: pass driverAttrs['cache'] = drive['cache'] if (drive['propagateErrors'] == 'on' or conv.tobool(drive['propagateErrors'])): driverAttrs['error_policy'] = 'enospace' else: driverAttrs['error_policy'] = 'stop' driver.setAttrs(**driverAttrs) return driver
def appendClock(self): """ Add <clock> element to domain: <clock offset="variable" adjustment="-3600"> <timer name="rtc" tickpolicy="catchup"/> </clock> for hyperv (on x86): <clock offset="variable" adjustment="-3600"> <timer name="hypervclock" present="yes"/> <timer name="rtc" tickpolicy="catchup"/> <timer name="hpet" present="no"/> </clock> """ m = vmxml.Element('clock', offset='variable', adjustment=str(self.conf.get('timeOffset', 0))) if conv.tobool(self.conf.get('hypervEnable', 'false')): m.appendChildWithArgs('timer', name='hypervclock', present='yes') m.appendChildWithArgs('timer', name='rtc', tickpolicy='catchup') m.appendChildWithArgs('timer', name='pit', tickpolicy='delay') if cpuarch.is_x86(self.arch): m.appendChildWithArgs('timer', name='hpet', present='no') self.dom.appendChild(m)
def check(options): if tobool(options.get('connectivityCheck', True)): logging.debug('Checking connectivity...') if not _client_seen(_get_connectivity_timeout(options)): logging.info('Connectivity check failed, rolling back') raise ConfigNetworkError(ne.ERR_LOST_CONNECTION, 'connectivity check failed')
def __init__(self, log, **kwargs): if not kwargs.get('serial'): self.serial = kwargs.get('imageID'[-20:]) or '' self._lock = threading.Lock() self._path = None self._diskType = None # device needs to be initialized in prior # cause diskType setter uses self.device # in diskType validation self.device = kwargs.get('device', 'disk') super(Drive, self).__init__(log, **kwargs) if not hasattr(self, 'vm_custom'): self.vm_custom = {} self._monitorable = True self._threshold_state = BLOCK_THRESHOLD.UNSET # Keep sizes as int self.reqsize = int(kwargs.get('reqsize', '0')) # Backward compatible self.truesize = int(kwargs.get('truesize', '0')) self.apparentsize = int(kwargs.get('apparentsize', '0')) self.name = drivename.make(self.iface, self.index) self.cache = config.get('vars', 'qemu_drive_cache') self.discard = kwargs.get('discard', False) # Engine can send 'true' and 'false' as strings # floppies are used only internally for sysprep, so # they are readonly unless explicitely stated otherwise self.readonly = conv.tobool( kwargs.get('readonly', self.device == 'floppy')) # Used for chunked drives or drives replicating to chunked replica. self.blockinfo = None self._customize() self._setExtSharedState()
def _canonicalize_ip_default_route(data): if 'defaultRoute' not in data: data['defaultRoute'] = False custom_default_route = _rget(data, ('custom', 'default_route')) if custom_default_route is not None: logging.warning('Custom property default_route is deprecated. ' 'please use default route role.') data['defaultRoute'] = tobool(custom_default_route)
def test_create_fix_param_kvmEnable(self): vmParams = { 'vmId': self.uuid, 'memSize': 8 * 1024, 'vmType': 'kvm', } res = self.vm.create(vmParams) self.assertFalse(response.is_error(res)) self.assertTrue(conv.tobool(vmParams.get('kvmEnable')))
def appendOs(self, use_serial_console=False): """ Add <os> element to domain: <os> <type arch="x86_64" machine="pc">hvm</type> <boot dev="cdrom"/> <kernel>/tmp/vmlinuz-2.6.18</kernel> <initrd>/tmp/initrd-2.6.18.img</initrd> <cmdline>ARGs 1</cmdline> <smbios mode="sysinfo"/> </os> If 'use_serial_console' is true and we are on x86, use the console: <os> ... <bios useserial="yes"/> </os> """ oselem = vmxml.Element('os') self.dom.appendChild(oselem) machine = self.conf.get('emulatedMachine', _DEFAULT_MACHINES[self.arch]) oselem.appendChildWithArgs('type', text='hvm', arch=self.arch, machine=machine) qemu2libvirtBoot = {'a': 'fd', 'c': 'hd', 'd': 'cdrom', 'n': 'network'} for c in self.conf.get('boot', ''): oselem.appendChildWithArgs('boot', dev=qemu2libvirtBoot[c]) if self.conf.get('initrd'): oselem.appendChildWithArgs('initrd', text=self.conf['initrd']) if self.conf.get('kernel'): oselem.appendChildWithArgs('kernel', text=self.conf['kernel']) if self.conf.get('kernelArgs'): oselem.appendChildWithArgs('cmdline', text=self.conf['kernelArgs']) if cpuarch.is_x86(self.arch): oselem.appendChildWithArgs('smbios', mode='sysinfo') if conv.tobool(self.conf.get('bootMenuEnable', False)): oselem.appendChildWithArgs('bootmenu', enable='yes', timeout=str(_BOOT_MENU_TIMEOUT)) if use_serial_console and cpuarch.is_x86(self.arch): oselem.appendChildWithArgs('bios', useserial='yes')
def __init__(self, *args, **kwargs): super(Console, self).__init__(*args, **kwargs) if not hasattr(self, 'specParams'): self.specParams = {} if conv.tobool(self.specParams.get('enableSocket', False)): self._path = os.path.join(constants.P_OVIRT_VMCONSOLES, self.vmid + self.CONSOLE_EXTENSION) else: self._path = None
def reattach_detachable(device_name, pci_reattach=True): libvirt_device, device_params = _get_device_ref_and_params(device_name) capability = CAPABILITY_TO_XML_ATTR[device_params['capability']] if capability == 'pci' and conv.tobool(device_params['is_assignable']): if pci_reattach: libvirt_device.reAttach() elif capability == 'scsi': if 'udev_path' not in device_params: raise UnsuitableSCSIDevice
def reattach_detachable(device_name, pci_reattach=True): libvirt_device, device_params = _get_device_ref_and_params(device_name) capability = CAPABILITY_TO_XML_ATTR[device_params['capability']] if capability == 'pci' and conv.tobool( device_params['is_assignable']): if pci_reattach: libvirt_device.reAttach() elif capability == 'scsi': if 'udev_path' not in device_params: raise UnsuitableSCSIDevice
def detach_detachable(device_name): libvirt_device, device_params = _get_device_ref_and_params(device_name) capability = CAPABILITY_TO_XML_ATTR[device_params['capability']] if capability == 'pci' and conv.tobool(device_params['is_assignable']): libvirt_device.detachFlags(None) elif capability == 'scsi': if 'udev_path' not in device_params: raise UnsuitableSCSIDevice return device_params
def detach_detachable(device_name): libvirt_device, device_params = _get_device_ref_and_params(device_name) capability = CAPABILITY_TO_XML_ATTR[device_params['capability']] if capability == 'pci' and conv.tobool( device_params['is_assignable']): libvirt_device.detachFlags(None) elif capability == 'scsi': if 'udev_path' not in device_params: raise UnsuitableSCSIDevice return device_params
def __init__(self, *args, **kwargs): super(Console, self).__init__(*args, **kwargs) if not hasattr(self, 'specParams'): self.specParams = {} if conv.tobool(self.specParams.get('enableSocket', False)): self._path = os.path.join( constants.P_OVIRT_VMCONSOLES, self.vmid + self.CONSOLE_EXTENSION ) else: self._path = None
def _elem_to_keyvalue(elem): key = elem.tag value = elem.text data_type = elem.attrib.get('type') if data_type is not None: if data_type == 'bool': value = conv.tobool(value) elif data_type == 'int': value = int(value) elif data_type == 'float': value = float(value) # elif data_type == 'str': do nothing return key, value
def appendFeatures(self): """ Add machine features to domain xml. Currently only <features> <acpi/> <features/> for hyperv: <features> <acpi/> <hyperv> <relaxed state='on'/> </hyperv> <features/> """ if (conv.tobool(self.conf.get('acpiEnable', 'true')) or conv.tobool(self.conf.get('hypervEnable', 'false'))): features = self.dom.appendChildWithArgs('features') if conv.tobool(self.conf.get('acpiEnable', 'true')): features.appendChildWithArgs('acpi') if conv.tobool(self.conf.get('hypervEnable', 'false')): hyperv = vmxml.Element('hyperv') features.appendChild(hyperv) hyperv.appendChildWithArgs('relaxed', state='on') # turns off an internal Windows watchdog, and by doing so avoids # some high load BSODs. hyperv.appendChildWithArgs('vapic', state='on') # magic number taken from recomendations. References: # https://bugzilla.redhat.com/show_bug.cgi?id=1083529#c10 # https://bugzilla.redhat.com/show_bug.cgi?id=1053846#c0 hyperv.appendChildWithArgs( 'spinlocks', state='on', retries='8191')
def appendInput(self): """ Add input device. <input bus="ps2" type="mouse"/> """ if conv.tobool(self.conf.get('tabletEnable')): inputAttrs = {'type': 'tablet', 'bus': 'usb'} elif cpuarch.is_x86(self.arch): inputAttrs = {'type': 'mouse', 'bus': 'ps2'} else: inputAttrs = {'type': 'mouse', 'bus': 'usb'} self._devices.appendChildWithArgs('input', **inputAttrs)
def __init__(self, conf, log, arch): """ Create the skeleton of a libvirt domain xml <domain type="kvm"> <name>vmName</name> <uuid>9ffe28b6-6134-4b1e-8804-1185f49c436f</uuid> <memory>262144</memory> <currentMemory>262144</currentMemory> <vcpu current='smp'>160</vcpu> <devices> </devices> </domain> """ self.conf = conf self.log = log self.arch = arch if conv.tobool(self.conf.get('kvmEnable', 'true')): domainType = 'kvm' else: domainType = 'qemu' domainAttrs = {'type': domainType} self.dom = vmxml.Element('domain', **domainAttrs) self.dom.appendChildWithArgs('name', text=self.conf['vmName']) self.dom.appendChildWithArgs('uuid', text=self.conf['vmId']) if 'numOfIoThreads' in self.conf: self.dom.appendChildWithArgs('iothreads', text=str(self.conf['numOfIoThreads'])) memSizeKB = str(int(self.conf.get('memSize', '256')) * 1024) self.dom.appendChildWithArgs('memory', text=memSizeKB) self.dom.appendChildWithArgs('currentMemory', text=memSizeKB) if 'maxMemSize' in self.conf: maxMemSizeKB = str(int(self.conf['maxMemSize']) * 1024) maxMemSlots = str(self.conf.get('maxMemSlots', '16')) self.dom.appendChildWithArgs('maxMemory', text=maxMemSizeKB, slots=maxMemSlots) vcpu = self.dom.appendChildWithArgs('vcpu', text=self._getMaxVCpus()) vcpu.setAttrs(**{'current': self._getSmp()}) self._devices = vmxml.Element('devices') self.dom.appendChild(self._devices) self.appendMetadata()
def getXML(self): """ Create domxml for disk/cdrom/floppy. <disk type='file' device='disk' snapshot='no'> <driver name='qemu' type='qcow2' cache='none'/> <source file='/path/to/image'/> <target dev='hda' bus='ide'/> <serial>54-a672-23e5b495a9ea</serial> </disk> """ self._validate() diskelem = self.createXmlElem('disk', self.diskType, ['device', 'address', 'sgio']) diskelem.setAttrs(snapshot='no') diskelem.appendChild(_getSourceXML(self)) if self.diskType == DISK_TYPE.NETWORK and hasattr(self, 'auth'): diskelem.appendChild(self._getAuthXML()) diskelem.appendChild(self._getTargetXML()) if self.extSharedState == DRIVE_SHARED_TYPE.SHARED: diskelem.appendChildWithArgs('shareable') if hasattr(self, 'readonly') and conv.tobool(self.readonly): diskelem.appendChildWithArgs('readonly') elif self.device == 'floppy' and not hasattr(self, 'readonly'): # floppies are used only internally for sysprep, so # they are readonly unless explicitely stated otherwise diskelem.appendChildWithArgs('readonly') if getattr(self, 'serial', False) and self.device != 'lun': diskelem.appendChildWithArgs('serial', text=self.serial) if hasattr(self, 'bootOrder'): diskelem.appendChildWithArgs('boot', order=self.bootOrder) if self.device == 'disk' or self.device == 'lun': diskelem.appendChild(_getDriverXML(self)) if self.iotune: diskelem.appendChild(self._getIotuneXML()) return diskelem
def _get_unified_persistence_ifcfg(): """generate the set of ifcfg files that result of the current unified persistent networks""" persistent_config = PersistentConfig() if not persistent_config: return set() IFCFG_PATH = NET_CONF_PREF + '%s' RULE_PATH = os.path.join(NET_CONF_DIR, 'rule-%s') ROUTE_PATH = os.path.join(NET_CONF_DIR, 'route-%s') ifcfgs = set() for bonding, bonding_attr in persistent_config.bonds.iteritems(): bond_nics = set(bonding_attr.get('nics', [])) ifcfgs.add(IFCFG_PATH % bonding) for nic in bond_nics: ifcfgs.add(IFCFG_PATH % nic) for network, network_attr in persistent_config.networks.iteritems(): top_level_device = None nic = network_attr.get('nic') if nic: ifcfgs.add(IFCFG_PATH % nic) top_level_device = nic network_bonding = network_attr.get('bonding', None) if network_bonding: top_level_device = network_bonding vlan_id = network_attr.get('vlan') if vlan_id is not None: underlying_device = network_bonding or network_attr.get('nic', '') vlan_device = '.'.join([underlying_device, str(vlan_id)]) top_level_device = vlan_device ifcfgs.add(IFCFG_PATH % vlan_device) if tobool(network_attr.get('bridged', True)): ifcfgs.add(IFCFG_PATH % network) top_level_device = network ifcfgs.add(RULE_PATH % top_level_device) ifcfgs.add(ROUTE_PATH % top_level_device) return ifcfgs
def _get_unified_persistence_ifcfg(): """generate the set of ifcfg files that result of the current unified persistent networks""" persistent_config = PersistentConfig() if not persistent_config: return set() IFCFG_PATH = NET_CONF_PREF + '%s' RULE_PATH = os.path.join(NET_CONF_DIR, 'rule-%s') ROUTE_PATH = os.path.join(NET_CONF_DIR, 'route-%s') ifcfgs = set() for bonding, bonding_attr in six.viewitems(persistent_config.bonds): bond_nics = set(bonding_attr.get('nics', [])) ifcfgs.add(IFCFG_PATH % bonding) for nic in bond_nics: ifcfgs.add(IFCFG_PATH % nic) for network, network_attr in six.viewitems(persistent_config.networks): top_level_device = None nic = network_attr.get('nic') if nic: ifcfgs.add(IFCFG_PATH % nic) top_level_device = nic network_bonding = network_attr.get('bonding', None) if network_bonding: top_level_device = network_bonding vlan_id = network_attr.get('vlan') if vlan_id is not None: underlying_device = network_bonding or network_attr.get('nic', '') vlan_device = '.'.join([underlying_device, str(vlan_id)]) top_level_device = vlan_device ifcfgs.add(IFCFG_PATH % vlan_device) if tobool(network_attr.get('bridged', True)): ifcfgs.add(IFCFG_PATH % network) top_level_device = network ifcfgs.add(RULE_PATH % top_level_device) ifcfgs.add(ROUTE_PATH % top_level_device) return ifcfgs
def __init__(self, log, **kwargs): if not kwargs.get('serial'): self.serial = kwargs.get('imageID'[-20:]) or '' self._lock = threading.Lock() self._monitor_lock = threading.Lock() self._path = None self._diskType = None # device needs to be initialized in prior # cause diskType setter uses self.device # in diskType validation self.device = kwargs.get('device', 'disk') # Must be initialized for getXML. self.propagateErrors = 'off' self._iotune = {} self.managed = False self.scratch_disk = None self.managed_reservation = False super(Drive, self).__init__(log, **kwargs) if not hasattr(self, 'vm_custom'): self.vm_custom = {} self._monitorable = True self.threshold_state = BLOCK_THRESHOLD.UNSET self.extend_time = 0.0 # Distant past. # Keep sizes as int self.reqsize = int(kwargs.get('reqsize', '0')) # Backward compatible self.truesize = int(kwargs.get('truesize', '0')) self.apparentsize = int(kwargs.get('apparentsize', '0')) self.name = drivename.make(self.iface, self.index) if not hasattr(self, 'cache'): self._set_cache() self.discard = kwargs.get('discard', False) # Engine can send 'true' and 'false' as strings # floppies are used only internally for sysprep, so # they are readonly unless explicitely stated otherwise self.readonly = conv.tobool( kwargs.get('readonly', self.device == 'floppy')) # Used for chunked drives or drives replicating to chunked replica. self._block_info = None self._setExtSharedState()
def reattach_detachable(device_name, pci_reattach=True): libvirt_device, device_params = _get_device_ref_and_params(device_name) capability = CAPABILITY_TO_XML_ATTR[device_params['capability']] if capability == 'pci' and conv.tobool(device_params['is_assignable']): if pci_reattach: libvirt_device.reAttach() supervdsm.getProxy().rmAppropriateIommuGroup( device_params['iommu_group']) elif capability == 'usb': supervdsm.getProxy().rmAppropriateUSBDevice( device_params['address']['bus'], device_params['address']['device']) elif capability == 'scsi': if 'udev_path' not in device_params: raise UnsuitableSCSIDevice supervdsm.getProxy().rmAppropriateSCSIDevice( device_name, device_params['udev_path'])
def reattach_detachable(device_name, pci_reattach=True): libvirt_device, device_params = _get_device_ref_and_params(device_name) capability = CAPABILITY_TO_XML_ATTR[device_params['capability']] if capability == 'pci' and conv.tobool( device_params['is_assignable']): if pci_reattach: libvirt_device.reAttach() supervdsm.getProxy().rmAppropriateIommuGroup( device_params['iommu_group']) elif capability == 'usb': supervdsm.getProxy().rmAppropriateUSBDevice( device_params['address']['bus'], device_params['address']['device']) elif capability == 'scsi': if 'udev_path' not in device_params: raise UnsuitableSCSIDevice supervdsm.getProxy().rmAppropriateSCSIDevice( device_name, device_params['udev_path'])
def _elem_to_keyvalue(elem): key = elem.tag value = elem.text data_type = elem.attrib.get('type') if data_type is None: # no data_type -> fallback to string if value is None: value = '' else: if value is None: if data_type == 'str': value = '' else: raise ValueError() if data_type == 'bool': value = conv.tobool(value) elif data_type == 'int': value = int(value) elif data_type == 'float': value = float(value) return key, value
def detach_detachable(device_name): libvirt_device, device_params = _get_device_ref_and_params(device_name) capability = CAPABILITY_TO_XML_ATTR[device_params['capability']] if capability == 'pci' and conv.tobool(device_params['is_assignable']): try: iommu_group = device_params['iommu_group'] except KeyError: raise NoIOMMUSupportException('hostdev passthrough without iommu') supervdsm.getProxy().appropriateIommuGroup(iommu_group) libvirt_device.detachFlags(None) elif capability == 'usb': supervdsm.getProxy().appropriateUSBDevice( device_params['address']['bus'], device_params['address']['device']) elif capability == 'scsi': if 'udev_path' not in device_params: raise UnsuitableSCSIDevice supervdsm.getProxy().appropriateSCSIDevice(device_name, device_params['udev_path']) return device_params
def _elem_to_keyvalue(elem): key = elem.tag value = elem.text data_type = elem.attrib.get('type') if data_type is None: # no data_type -> fallback to string if value is None: value = '' else: if value is None: if data_type == 'str': value = '' else: raise ValueError( 'unknown type hint for %r (%s): %r' % ( key, elem.attrib, value)) if data_type == 'bool': value = conv.tobool(value) elif data_type == 'int': value = int(value) elif data_type == 'float': value = float(value) return key, value
def __init__(self, log, **kwargs): if not kwargs.get('serial'): self.serial = kwargs.get('imageID'[-20:]) or '' self._lock = threading.Lock() self._path = None self._diskType = None # device needs to be initialized in prior # cause diskType setter uses self.device # in diskType validation self.device = kwargs.get('device', 'disk') # Must be initialized for getXML. self.propagateErrors = 'off' self._iotune = {} super(Drive, self).__init__(log, **kwargs) if not hasattr(self, 'vm_custom'): self.vm_custom = {} self._monitorable = True self._threshold_state = BLOCK_THRESHOLD.UNSET # Keep sizes as int self.reqsize = int(kwargs.get('reqsize', '0')) # Backward compatible self.truesize = int(kwargs.get('truesize', '0')) self.apparentsize = int(kwargs.get('apparentsize', '0')) self.name = drivename.make(self.iface, self.index) if not hasattr(self, 'cache'): self._set_cache() self.discard = kwargs.get('discard', False) # Engine can send 'true' and 'false' as strings # floppies are used only internally for sysprep, so # they are readonly unless explicitely stated otherwise self.readonly = conv.tobool( kwargs.get('readonly', self.device == 'floppy')) # Used for chunked drives or drives replicating to chunked replica. self.blockinfo = None self._setExtSharedState()
def main(domain, event, phase, stdin=sys.stdin, stdout=sys.stdout): if not tobool(config.get('vars', 'migration_ovs_hook_enabled')): sys.exit(0) if event not in ('migrate', 'restore'): sys.exit(0) with _logging(_DEBUG_MODE) as log: if log: print('\nHook input args are:', domain, event, phase, file=log) tree = ET.parse(stdin) try: _process_domxml(tree) except: traceback.print_exc(file=log) raise tree.write(stdout) if log: tree.write(log) print('\nEnd of hook', file=log)
def detach_detachable(device_name): libvirt_device, device_params = _get_device_ref_and_params(device_name) capability = CAPABILITY_TO_XML_ATTR[device_params['capability']] if capability == 'pci' and conv.tobool( device_params['is_assignable']): try: iommu_group = device_params['iommu_group'] except KeyError: raise NoIOMMUSupportException('hostdev passthrough without iommu') supervdsm.getProxy().appropriateIommuGroup(iommu_group) libvirt_device.detachFlags(None) elif capability == 'usb': supervdsm.getProxy().appropriateUSBDevice( device_params['address']['bus'], device_params['address']['device']) elif capability == 'scsi': if 'udev_path' not in device_params: raise UnsuitableSCSIDevice supervdsm.getProxy().appropriateSCSIDevice(device_name, device_params['udev_path']) return device_params
def main(domain, event, phase, stdin=sys.stdin, stdout=sys.stdout, *args): if not tobool(config.get('vars', 'migration_ovs_hook_enabled')): sys.exit(0) if event not in ('migrate', 'restore'): sys.exit(0) with _logging(_DEBUG_MODE) as log: if log: print('\nHook input args are:', domain, event, phase, file=log) tree = ET.parse(stdin) try: _process_domxml(tree) except: traceback.print_exc(file=log) raise tree.write(stdout) if log: tree.write(log) print('\nEnd of hook', file=log)
def _canonicalize_remove(data): if 'remove' in data: data['remove'] = tobool(data['remove']) return data['remove'] return False
def __init__(self, vm, dst='', dstparams='', mode=MODE_REMOTE, method=METHOD_ONLINE, tunneled=False, dstqemu='', abortOnError=False, consoleAddress=None, compressed=False, autoConverge=False, recovery=False, encrypted=False, **kwargs): self.log = vm.log self._vm = vm self._dst = dst self._mode = mode self._dstparams = dstparams self._enableGuestEvents = kwargs.get('enableGuestEvents', False) # TODO: conv.tobool shouldn't be used in this constructor, the # conversions should be handled properly in the API layer self._consoleAddress = consoleAddress self._dstqemu = dstqemu self._encrypted = encrypted self._maxBandwidth = int( kwargs.get('maxBandwidth') or config.getint('vars', 'migration_max_bandwidth')) self._incomingLimit = kwargs.get('incomingLimit') self._outgoingLimit = kwargs.get('outgoingLimit') self.status = { 'status': { 'code': 0, 'message': 'Migration in progress' } } # we need to guard against concurrent updates only self._lock = threading.Lock() self._progress = 0 self._thread = concurrent.thread(self.run, name='migsrc/' + self._vm.id[:8]) self._preparingMigrationEvt = True self._migrationCanceledEvt = threading.Event() self._monitorThread = None self._destServer = None self._legacy_payload_path = None if 'convergenceSchedule' in kwargs: self._convergence_schedule = kwargs['convergenceSchedule'] else: # Needed for Engine < 4.3 or when legacy migration is used # as a supposedly rare fallback in Engine >= 4.3. self._convergence_schedule = \ self._legacy_convergence_schedule(kwargs.get('downtime')) self.log.info( 'using a computed convergence schedule for ' 'a legacy migration: %s', self._convergence_schedule) self.log.debug('convergence schedule set to: %s', str(self._convergence_schedule)) self._started = False self._failed = False self._recovery = recovery tunneled = conv.tobool(tunneled) abortOnError = conv.tobool(abortOnError) compressed = conv.tobool(compressed) autoConverge = conv.tobool(autoConverge) self._migration_flags = self._calculate_migration_flags( tunneled, abortOnError, compressed, autoConverge, encrypted)
def _objectivize_network(bridge=None, vlan=None, vlan_id=None, bonding=None, bondattr=None, nic=None, mtu=None, ipaddr=None, netmask=None, gateway=None, bootproto=None, ipv6addr=None, ipv6gateway=None, ipv6autoconf=None, dhcpv6=None, defaultRoute=None, nameservers=None, _netinfo=None, configurator=None, blockingdhcp=None, opts=None): """ Constructs an object hierarchy that describes the network configuration that is passed in the parameters. :param bridge: name of the bridge. :param vlan: vlan device name. :param vlan_id: vlan tag id. :param bonding: name of the bond. :param bondattr: bond attributes if defined. :param nic: name of the nic. :param mtu: the desired network maximum transmission unit. :param ipaddr: IPv4 address in dotted decimal format. :param netmask: IPv4 mask in dotted decimal format. :param gateway: IPv4 address in dotted decimal format. :param bootproto: protocol for getting IP config for the net, e.g., 'dhcp' :param ipv6addr: IPv6 address in format address[/prefixlen]. :param ipv6gateway: IPv6 address in format address[/prefixlen]. :param ipv6autoconf: whether to use IPv6's stateless autoconfiguration. :param dhcpv6: whether to use DHCPv6. :param nameservers: a list of DNS servers. :param _netinfo: network information snapshot. :param configurator: instance to use to apply the network configuration. :param blockingdhcp: whether to acquire dhcp IP config in a synced manner. :param defaultRoute: Should this network's gateway be set in the main routing table? :param opts: misc options received by the callee, e.g., {'stp': True}. this function can modify the dictionary. :returns: the top object of the hierarchy. """ if _netinfo is None: _netinfo = CachingNetInfo() if configurator is None: configurator = Ifcfg(_netinfo) if opts is None: opts = {} if bootproto == 'none': bootproto = None top_net_dev = None if bonding: if bondattr is None: bondattr = {} top_net_dev = Bond.objectivize( bonding, configurator, options=bondattr.get('options'), nics=bondattr.get('nics'), hwaddr=bondattr.get('hwaddr'), mtu=mtu, _netinfo=_netinfo, on_removal_just_detach_from_network=True) elif nic: bond = _netinfo.getBondingForNic(nic) if bond: raise ConfigNetworkError(ne.ERR_USED_NIC, 'Nic %s already ' 'enslaved to %s' % (nic, bond)) top_net_dev = Nic(nic, configurator, mtu=mtu, _netinfo=_netinfo) if vlan is not None: tag = _netinfo.vlans[vlan]['vlanid'] if vlan_id is None else vlan_id top_net_dev = Vlan(top_net_dev, tag, configurator, mtu=mtu, name=vlan) elif vlan_id is not None: top_net_dev = Vlan(top_net_dev, vlan_id, configurator, mtu=mtu) if bridge is not None: top_net_dev = Bridge( bridge, configurator, port=top_net_dev, mtu=mtu, stp=opts.get('stp', None)) # Inherit DUID from the port's possibly still active DHCP lease so the # bridge gets the same IP address. (BZ#1219429) if top_net_dev.port and bootproto == 'dhcp': top_net_dev.duid_source = top_net_dev.port.name if top_net_dev is None: raise ConfigNetworkError(ne.ERR_BAD_PARAMS, 'Network defined without ' 'devices.') top_net_dev.ipv4 = IPv4(ipaddr, netmask, gateway, defaultRoute, bootproto) top_net_dev.ipv6 = IPv6( ipv6addr, ipv6gateway, defaultRoute, ipv6autoconf, dhcpv6) top_net_dev.blockingdhcp = (configurator._inRollback or tobool(blockingdhcp)) top_net_dev.nameservers = nameservers return top_net_dev
def getXML(self): """ Create domxml for a graphics framebuffer. <graphics type='spice' port='5900' tlsPort='5901' autoport='yes' listen='0' keymap='en-us' passwdValidTo='1970-01-01T00:00:01'> <listen type='address' address='0'/> <clipboard copypaste='no'/> </graphics> OR <graphics type='vnc' port='5900' autoport='yes' listen='0' keymap='en-us' passwdValidTo='1970-01-01T00:00:01'> <listen type='address' address='0'/> </graphics> """ graphicsAttrs = { 'type': self.device, 'port': self.port, 'autoport': 'yes', } if config.getboolean('vars', 'ssl'): graphicsAttrs['defaultMode'] = 'secure' # the default, 'any', has automatic fallback to # insecure mode, so works with ssl off. if self.device == 'spice': graphicsAttrs['tlsPort'] = self.tlsPort self._setPasswd(graphicsAttrs) if 'keyMap' in self.specParams: graphicsAttrs['keymap'] = self.specParams['keyMap'] graphics = vmxml.Element('graphics', **graphicsAttrs) if not conv.tobool(self.specParams.get('copyPasteEnable', True)): clipboard = vmxml.Element('clipboard', copypaste='no') graphics.appendChild(clipboard) if not conv.tobool( self.specParams.get('fileTransferEnable', True)): filetransfer = vmxml.Element('filetransfer', enable='no') graphics.appendChild(filetransfer) # This list could be dropped in 4.1. We should keep only # the default mode, which is both simpler and safer. if (self.device == 'spice' and 'spiceSecureChannels' in self.specParams): for chan in self._getSpiceChannels(): graphics.appendChildWithArgs('channel', name=chan, mode='secure') # For the listen type IP to be used, the display network must be OVS. # We assume that the cluster in which the host operates is OVS enabled # and all other hosts in the cluster have the migration hook installed. # The migration hook is responsible to convert ip to net and vice versa display_network = self.specParams['displayNetwork'] display_ip = self.specParams.get('displayIp', '0') if (display_network and display_ip != '0' and supervdsm.getProxy().ovs_bridge(display_network)): graphics.appendChildWithArgs( 'listen', type='address', address=display_ip) elif display_network: graphics.appendChildWithArgs( 'listen', type='network', network=libvirtnetwork.netname_o2l(display_network)) else: graphics.setAttrs(listen='0') return graphics
def getXML(self): """ Create domxml for a graphics framebuffer. <graphics type='spice' port='5900' tlsPort='5901' autoport='yes' listen='0' keymap='en-us' passwdValidTo='1970-01-01T00:00:01'> <listen type='address' address='0'/> <clipboard copypaste='no'/> </graphics> OR <graphics type='vnc' port='5900' autoport='yes' listen='0' keymap='en-us' passwdValidTo='1970-01-01T00:00:01'> <listen type='address' address='0'/> </graphics> """ graphicsAttrs = { 'type': self.device, 'port': self.port, 'autoport': 'yes', } if config.getboolean('vars', 'ssl'): graphicsAttrs['defaultMode'] = 'secure' # the default, 'any', has automatic fallback to # insecure mode, so works with ssl off. if self.device == 'spice': graphicsAttrs['tlsPort'] = self.tlsPort self._setPasswd(graphicsAttrs) if 'keyMap' in self.specParams: graphicsAttrs['keymap'] = self.specParams['keyMap'] graphics = vmxml.Element('graphics', **graphicsAttrs) if not conv.tobool(self.specParams.get('copyPasteEnable', True)): clipboard = vmxml.Element('clipboard', copypaste='no') graphics.appendChild(clipboard) if not conv.tobool( self.specParams.get('fileTransferEnable', True)): filetransfer = vmxml.Element('filetransfer', enable='no') graphics.appendChild(filetransfer) # This list could be dropped in 4.1. We should keep only # the default mode, which is both simpler and safer. if (self.device == 'spice' and 'spiceSecureChannels' in self.specParams): for chan in self._getSpiceChannels(): graphics.appendChildWithArgs('channel', name=chan, mode='secure') display_network = self.specParams.get('displayNetwork') if display_network: graphics.appendChildWithArgs( 'listen', type='network', network=libvirtnetwork.netname_o2l(display_network)) else: graphics.setAttrs(listen='0') return graphics
def _set_blocking_dhcp(networks): for attrs in six.itervalues(networks): if attrs.get('bootproto') == 'dhcp' or tobool(attrs.get('dhcpv6')): attrs['blockingdhcp'] = True
def _canonicalize_switch_type_net(data): if tobool(_rget(data, ('custom', 'ovs'))): data['switch'] = 'ovs' elif 'switch' not in data: data['switch'] = 'legacy'
def __init__(self, vm, dst='', dstparams='', mode=MODE_REMOTE, method=METHOD_ONLINE, tunneled=False, dstqemu='', abortOnError=False, consoleAddress=None, compressed=False, autoConverge=False, recovery=False, encrypted=False, cpusets=None, parallel=None, numaNodesets=None, **kwargs): self.log = vm.log self._vm = vm self._dom = DomainAdapter(self._vm) self._dst = dst self._mode = mode self._dstparams = dstparams self._enableGuestEvents = kwargs.get('enableGuestEvents', False) # TODO: conv.tobool shouldn't be used in this constructor, the # conversions should be handled properly in the API layer self._consoleAddress = consoleAddress self._dstqemu = dstqemu self._encrypted = encrypted if parallel == self._PARALLEL_CONNECTIONS_DISABLED_VALUE: parallel = None self._parallel = parallel self._maxBandwidth = int( kwargs.get('maxBandwidth') or config.getint('vars', 'migration_max_bandwidth')) self._incomingLimit = kwargs.get('incomingLimit') self._outgoingLimit = kwargs.get('outgoingLimit') self.status = { 'status': { 'code': 0, 'message': 'Migration in progress' } } # we need to guard against concurrent updates only self._lock = threading.Lock() self._progress = 0 self._thread = concurrent.thread(self.run, name='migsrc/' + self._vm.id[:8]) self._preparingMigrationEvt = True self._migrationCanceledEvt = threading.Event() self._monitorThread = None self._destServer = None self._legacy_payload_path = None if 'convergenceSchedule' in kwargs: self._convergence_schedule = kwargs['convergenceSchedule'] else: # Needed for Engine < 4.3 or when legacy migration is used # as a supposedly rare fallback in Engine >= 4.3. self._convergence_schedule = \ self._legacy_convergence_schedule(kwargs.get('downtime')) self.log.info( 'using a computed convergence schedule for ' 'a legacy migration: %s', self._convergence_schedule) self.log.debug('convergence schedule set to: %s', str(self._convergence_schedule)) self._state = State.STARTED if recovery else State.INITIALIZED self._recovery = recovery tunneled = conv.tobool(tunneled) abortOnError = conv.tobool(abortOnError) compressed = conv.tobool(compressed) autoConverge = conv.tobool(autoConverge) parallel = self._parallel is not None self._migration_flags = self._calculate_migration_flags( tunneled, abortOnError, compressed, autoConverge, encrypted, parallel) # True if destination host supports disk refresh. Initialized before # the first extend of the disk during migration if finished. self._supports_disk_refresh = None self._destination_cpusets = cpusets self._destination_numa_nodesets = numaNodesets
def _canonicalize_bridged(data): if 'bridged' in data: data['bridged'] = tobool(data['bridged']) else: data['bridged'] = True
def __init__(self, engine_fqdn, engine_https_port=None, fingerprint=None, ssh_port=None, ssh_user=None, check_fqdn=True, vdsm_port=None, node_address=None, vdsm_uuid=None, node_name=None): """ Attributes: engine_fqdn - Engine FQDN or IP address engine_https_port - Engine https port fingeprint - Fingerprint to be validated ssh_user - SSH user that will establish the connection from Engine ssh_port - Port of ssh daemon is running check_fqdn - Validate Engine FQDN against CA (True or False) Default is TRUE vdsm_port - VDSM listen port node_address - Specify node address or FQDN node_name - Specify node name vdsm_uuid - Provide host UUID to be used instead vdsm.utils. Useful for hosts with blank or buggy DMI """ self.logger = self._set_logger() self.logger.debug("=======================================") self.logger.debug("Registering the node") self.logger.debug("=======================================") self.logger.debug("Received the following attributes:") self.engine_fqdn = engine_fqdn self.logger.debug("Engine FQDN: {fqdn}".format(fqdn=self.engine_fqdn)) self.engine_url = "https://{e}".format(e=engine_fqdn) if engine_https_port is None: self.engine_port = "443" else: self.engine_port = engine_https_port self.engine_url = "https://{e}:{p}".format(e=self.engine_fqdn, p=self.engine_port) self.logger.debug("Engine URL: {url}".format(url=self.engine_url)) self.logger.debug("Engine https port: {hp}".format( hp=self.engine_port)) if check_fqdn is None: self.check_fqdn = True else: self.check_fqdn = tobool(check_fqdn) self.logger.debug("Check FQDN: {v}".format(v=self.check_fqdn)) self.fprint = fingerprint self.logger.debug("Fingerprint: {fp}".format(fp=self.fprint)) self.node_address = node_address self.logger.debug("Node address: {nf}".format(nf=self.node_address)) self.node_name = node_name self.logger.debug("Node name: {na}".format(na=self.node_name)) if ssh_user is None: self.ssh_user = getpass.getuser() else: self.ssh_user = ssh_user self.logger.debug("SSH User: {su}".format(su=self.ssh_user)) if ssh_port is None: self.ssh_port = "22" else: self.ssh_port = ssh_port self.logger.debug("SSH Port: {sp}".format(sp=self.ssh_port)) if vdsm_port is None: self.vdsm_port = "54321" else: self.vdsm_port = vdsm_port self.logger.debug("VDSM Port: {sp}".format(sp=self.vdsm_port)) self.vdsm_uuid = vdsm_uuid self.logger.debug("VDSM UUID: {uuid_provided}".format( uuid_provided=self.vdsm_uuid)) self.ca_dir = "/etc/pki/ovirt-engine/" self.ca_engine = "{d}{f}".format(d=self.ca_dir, f="ca.pem") self.logger.debug("Engine CA: {ca}".format(ca=self.ca_engine))