def setRpFilterIfNeeded(netIfaceName, hostname, loose_mode): """ Set rp_filter to loose or strict mode if there's no session using the netIfaceName device and it's not the device used by the OS to reach the 'hostname'. loose mode is needed to allow multiple iSCSI connections in a multiple NIC per subnet configuration. strict mode is needed to avoid the security breach where an untrusted VM can DoS the host by sending it packets with spoofed random sources. Arguments: netIfaceName: the device used by the iSCSI session target: iSCSI target object cointaining the portal hostname loose_mode: boolean """ if netIfaceName is None: log.info("iSCSI iface.net_ifacename not provided. Skipping.") return sessions = _sessionsUsingNetiface(netIfaceName) if not any(sessions) and netIfaceName != getRouteDeviceTo(hostname): if loose_mode: log.info("Setting loose mode rp_filter for device %r." % netIfaceName) supervdsm.getProxy().set_rp_filter_loose(netIfaceName) else: log.info("Setting strict mode rp_filter for device %r." % netIfaceName) supervdsm.getProxy().set_rp_filter_strict(netIfaceName)
def addNetwork(self, bridge, vlan=None, bond=None, nics=None, options={}): """Add a new network to this vds. Network topology is bridge--[vlan--][bond--]nics. vlan(number) and bond are optional - pass the empty string to discard them. """ self._translateOptionsToNew(options) if not self._cif._networkSemaphore.acquire(blocking=False): self.log.warn('concurrent network verb already executing') return errCode['unavail'] try: self._cif._netConfigDirty = True if vlan: options['vlan'] = vlan if bond: options['bonding'] = bond if nics: options['nics'] = list(nics) try: supervdsm.getProxy().addNetwork(bridge, options) except configNetwork.ConfigNetworkError, e: self.log.error(e.message, exc_info=True) return {'status': {'code': e.errCode, 'message': e.message}} return {'status': doneCode}
def delNetwork(self, bridge, vlan=None, bond=None, nics=None, options={}): """Delete a network from this vds.""" self._translateOptionsToNew(options) try: if not self._cif._networkSemaphore.acquire(blocking=False): self.log.warn('concurrent network verb already executing') return errCode['unavail'] if vlan or bond or nics: # Backwards compatibility self.log.warn('Specifying vlan, bond or nics to delNetwork is deprecated') _netinfo = netinfo.NetInfo() try: if bond: configNetwork.validateBondingName(bond) if vlan: configNetwork.validateVlanId(vlan) if nics and bond and set(nics) != set(_netinfo.bondings[bond]["slaves"]): self.log.error('delNetwork: not all nics specified are enslaved (%s != %s)' % (nics, _netinfo.bondings[bond]["slaves"]) ) raise configNetwork.ConfigNetworkError(configNetwork.ne.ERR_BAD_NIC, "not all nics are enslaved") except configNetwork.ConfigNetworkError, e: self.log.error(e.message, exc_info=True) return {'status': {'code': e.errCode, 'message': e.message}} self._cif._netConfigDirty = True try: supervdsm.getProxy().delNetwork(bridge, options) except configNetwork.ConfigNetworkError, e: self.log.error(e.message, exc_info=True) return {'status': {'code': e.errCode, 'message': e.message}}
def reattach_detachable(device_name): libvirt_device, device_params = _get_device_ref_and_params(device_name) if CAPABILITY_TO_XML_ATTR[device_params['capability']] == 'pci': iommu_group = device_params['iommu_group'] supervdsm.getProxy().rmAppropriateIommuGroup(iommu_group) libvirt_device.reAttach()
def reattach_detachable(device_name): libvirt_device, device_params = _get_device_ref_and_params(device_name) iommu_group = device_params['iommu_group'] if CAPABILITY_TO_XML_ATTR[device_params['capability']] == 'pci': supervdsm.getProxy().rmAppropriateIommuGroup(iommu_group) libvirt_device.reAttach()
def detach_detachable(device_name): libvirt_device, device_params = _get_device_ref_and_params(device_name) if CAPABILITY_TO_XML_ATTR[device_params['capability']] == 'pci': iommu_group = device_params['iommu_group'] supervdsm.getProxy().appropriateIommuGroup(iommu_group) libvirt_device.detachFlags(None) return device_params
def reattach_detachable(device_name): libvirt_device, device_params = _get_device_ref_and_params(device_name) if CAPABILITY_TO_XML_ATTR[device_params['capability']] == 'pci': try: iommu_group = device_params['iommu_group'] except KeyError: raise NoIOMMUSupportException supervdsm.getProxy().rmAppropriateIommuGroup(iommu_group) libvirt_device.reAttach()
def detach_detachable(device_name): libvirt_device, device_params = _get_device_ref_and_params(device_name) iommu_group = device_params['iommu_group'] if CAPABILITY_TO_XML_ATTR[device_params['capability']] == 'pci': supervdsm.getProxy().appropriateIommuGroup(iommu_group) libvirt_device.detachFlags(None) return device_params
def setSafeNetworkConfig(self): """Declare current network configuration as 'safe'""" if not self._cif._networkSemaphore.acquire(blocking=False): self.log.warn('concurrent network verb already executing') return errCode['unavail'] try: self._cif._netConfigDirty = False supervdsm.getProxy().setSafeNetworkConfig() return {'status': doneCode} finally: self._cif._networkSemaphore.release()
def rescan(): """ Rescan HBAs discovering new devices. """ log.debug("Starting scan") try: supervdsm.getProxy().hbaRescan() except Error as e: log.error("Scan failed: %s", e) else: log.debug("Scan finished")
def reattach_detachable(device_name): libvirt_device, device_params = _get_device_ref_and_params(device_name) iommu_group = device_params['iommu_group'] if device_params['capability'] in _UDEV_REQUIRING_CAPS: supervdsm.getProxy().rmAppropriateIommuGroup(iommu_group) supervdsm.getProxy().udevTrigger(iommu_group) if device_params['capability'] in _DETACH_REQUIRING_CAPS: libvirt_device.reAttach()
def detach_detachable(device_name): libvirt_device, device_params = _get_device_ref_and_params(device_name) if CAPABILITY_TO_XML_ATTR[device_params['capability']] == 'pci': try: iommu_group = device_params['iommu_group'] except KeyError: raise NoIOMMUSupportException('hostdev passthrough without iommu') supervdsm.getProxy().appropriateIommuGroup(iommu_group) libvirt_device.detachFlags(None) return device_params
def validateDirAccess(dirPath): try: getProcPool().fileUtils.validateAccess(dirPath) supervdsm.getProxy().validateAccess( constants.QEMU_PROCESS_USER, (constants.DISKIMAGE_GROUP, constants.METADATA_GROUP), dirPath, (os.R_OK | os.X_OK)) except OSError as e: if e.errno == errno.EACCES: raise se.StorageServerAccessPermissionError(dirPath) raise return True
def rescan(): """ Forces multipath daemon to rescan the list of available devices and refresh the mapping table. New devices can be found under /dev/mapper Should only be called from hsm._rescanDevices() """ # First rescan iSCSI and FCP connections iscsi.rescan() supervdsm.getProxy().hbaRescan() # Now let multipath daemon pick up new devices misc.execCmd([constants.EXT_MULTIPATH], sudo=True)
def rescan(): """ Forces multiupath daemon to rescan the list of available devices and refresh the mapping table. New devices can be found under /dev/mapper Should only be called from hsm._rescanDevices() """ # First ask iSCSI to rescan all its sessions iscsi.rescan() supervdsm.getProxy().forceIScsiScan() # Now let multipath daemon pick up new devices misc.execCmd([constants.EXT_MULTIPATH])
def reattach_detachable(device_name): libvirt_device, device_params = _get_device_ref_and_params(device_name) capability = CAPABILITY_TO_XML_ATTR[device_params['capability']] if capability == 'pci': try: iommu_group = device_params['iommu_group'] except KeyError: raise NoIOMMUSupportException supervdsm.getProxy().rmAppropriateIommuGroup(iommu_group) libvirt_device.reAttach() elif capability == 'usb': supervdsm.getProxy().rmAppropriateUSBDevice( device_params['address']['bus'], device_params['address']['device'])
def main(): setup_nets_config = hooking.read_json() in_rollback = setup_nets_config['request']['options'].get('_inRollback') if in_rollback: log('Configuration failed with _inRollback=True.') else: log('Configuration failed. At this point, non-OVS rollback should be ' 'done. Executing OVS rollback.') supervdsm.getProxy().setupNetworks({}, {}, { 'connectivityCheck': False, '_inRollback': True, '_inOVSRollback': True })
def setupNetworks(self, networks={}, bondings={}, options={}): """Add a new network to this vds, replacing an old one.""" self._translateOptionsToNew(options) if not self._cif._networkSemaphore.acquire(blocking=False): self.log.warn('concurrent network verb already executing') return errCode['unavail'] try: self._cif._netConfigDirty = True try: supervdsm.getProxy().setupNetworks(networks, bondings, options) except configNetwork.ConfigNetworkError, e: self.log.error(e.message, exc_info=True) return {'status': {'code': e.errCode, 'message': e.message}} return {'status': doneCode}
def doUnmountMaster(cls, masterdir): """ Unmount the master metadata file system. Should be called only by SPM. """ # fuser processes holding mount point and validate that the umount # succeeded cls.__handleStuckUmount(masterdir) try: masterMount = mount.getMountFromTarget(masterdir) except OSError as ex: if ex.errno == errno.ENOENT: return raise if masterMount.isMounted(): # Try umount, take 1 try: masterMount.umount() except mount.MountError: # umount failed, try to kill that processes holding mount point svdsmp = svdsm.getProxy() pids = svdsmp.fuser(masterMount.fs_file, mountPoint=True) # It was unmounted while I was checking no need to do anything if not masterMount.isMounted(): return if len(pids) == 0: cls.log.warn("Unmount failed because of errors that fuser " "can't solve") else: for pid in pids: try: cls.log.debug("Trying to kill pid %d", pid) os.kill(pid, signal.SIGKILL) except OSError as e: if e.errno == errno.ESRCH: # No such process pass elif e.errno == errno.EPERM: # Op. not permitted cls.log.warn("Could not kill pid %d because " "operation was not permitted", pid) else: cls.log.warn("Could not kill pid %d because an" " unexpected error", exc_info=True) except: cls.log.warn("Could not kill pid %d because an " "unexpected error", exc_info=True) # Try umount, take 2 try: masterMount.umount() except mount.MountError: pass if masterMount.isMounted(): # We failed to umount masterFS # Forcibly rebooting the SPM host would be safer. ??? raise se.StorageDomainMasterUnmountError(masterdir, 1)
def detach_detachable(device_name): libvirt_device, device_params = _get_device_ref_and_params(device_name) capability = CAPABILITY_TO_XML_ATTR[device_params['capability']] if capability == 'pci': try: iommu_group = device_params['iommu_group'] except KeyError: raise NoIOMMUSupportException('hostdev passthrough without iommu') supervdsm.getProxy().appropriateIommuGroup(iommu_group) libvirt_device.detachFlags(None) elif capability == 'usb': supervdsm.getProxy().appropriateUSBDevice( device_params['address']['bus'], device_params['address']['device']) return device_params
def _resize_if_needed(guid): name = devicemapper.getDmId(guid) slaves = [(slave, getDeviceSize(slave)) for slave in devicemapper.getSlaves(name)] if len(slaves) == 0: log.warning("Map %r has no slaves" % guid) return False if len(set(size for slave, size in slaves)) != 1: raise Error("Map %r slaves size differ %s" % (guid, slaves)) map_size = getDeviceSize(name) slave_size = slaves[0][1] if map_size == slave_size: return False log.info("Resizing map %r (map_size=%d, slave_size=%d)", guid, map_size, slave_size) supervdsm.getProxy().resizeMap(name) return True
def getVmNumaNodeRuntimeInfo(vm): """ Collect vm numa nodes runtime pinning to which host numa nodes information. Host numa node topology: 'numaNodes': {'<nodeIndex>': {'cpus': [int], 'totalMemory': 'str'}, ...} We can get each physical cpu core belongs to which host numa node. Vm numa node configuration: 'guestNumaNodes': [{'cpus': 'str', 'memory': 'str'}, ...] We can get each vcpu belongs to which vm numa node. Vcpu runtime pinning to physical cpu core information: ([(0, 1, 19590000000L, 1), (1, 1, 10710000000L, 1)], [(True, True, True, True), (True, True, True, True)]) The first list element of the above tuple describe each vcpu(list[0]) runtime pinning to which physical cpu core(list[3]). Get the mapping info between vcpu and pid from /var/run/libvirt/qemu/<vmName>.xml Get each vcpu(pid) backed memory mapping to which host numa nodes info from /proc/<vm_pid>/<vcpu_pid>/numa_maps From all the above information, we can calculate each vm numa node runtime pinning to which host numa node. The output is a map like: '<vm numa node index>': [<host numa node index>, ...] """ vmNumaNodeRuntimeMap = {} vcpu_to_pcpu = _get_mapping_vcpu_to_pcpu( _get_vcpu_positioning(vm)) if vcpu_to_pcpu: vm_numa_placement = defaultdict(set) vcpu_to_pnode = supervdsm.getProxy().getVcpuNumaMemoryMapping( vm.conf['vmName'].encode('utf-8')) pcpu_to_pnode = _get_mapping_pcpu_to_pnode() vcpu_to_vnode = _get_mapping_vcpu_to_vnode(vm) for vcpu_id, pcpu_id in vcpu_to_pcpu.iteritems(): vnode_index = str(vcpu_to_vnode[vcpu_id]) vm_numa_placement[vnode_index].add(pcpu_to_pnode[pcpu_id]) vm_numa_placement[vnode_index].update( vcpu_to_pnode.get(vcpu_id, ())) vmNumaNodeRuntimeMap = dict((k, list(v)) for k, v in vm_numa_placement.iteritems()) return vmNumaNodeRuntimeMap
def getVmNumaNodeRuntimeInfo(vm): """ Collect vm numa nodes runtime pinning to which host numa nodes information. Host numa node topology: 'numaNodes': {'<nodeIndex>': {'cpus': [int], 'totalMemory': 'str'}, ...} We can get each physical cpu core belongs to which host numa node. Vm numa node configuration: 'guestNumaNodes': [{'cpus': 'str', 'memory': 'str'}, ...] We can get each vcpu belongs to which vm numa node. Vcpu runtime pinning to physical cpu core information: ([(0, 1, 19590000000L, 1), (1, 1, 10710000000L, 1)], [(True, True, True, True), (True, True, True, True)]) The first list element of the above tuple describe each vcpu(list[0]) runtime pinning to which physical cpu core(list[3]). Get the mapping info between vcpu and pid from /var/run/libvirt/qemu/<vmName>.xml Get each vcpu(pid) backed memory mapping to which host numa nodes info from /proc/<vm_pid>/<vcpu_pid>/numa_maps From all the above information, we can calculate each vm numa node runtime pinning to which host numa node. The output is a map like: '<vm numa node index>': [<host numa node index>, ...] """ vmNumaNodeRuntimeMap = {} if 'guestNumaNodes' in vm.conf: vcpu_to_pcpu = _get_mapping_vcpu_to_pcpu(vm) if vcpu_to_pcpu: vm_numa_placement = defaultdict(set) vcpu_to_pnode = supervdsm.getProxy().getVcpuNumaMemoryMapping( vm.conf['vmName'].encode('utf-8')) pcpu_to_pnode = _get_mapping_pcpu_to_pnode() vcpu_to_vnode = _get_mapping_vcpu_to_vnode(vm) for vcpu_id, pcpu_id in vcpu_to_pcpu.iteritems(): vnode_index = str(vcpu_to_vnode[vcpu_id]) vm_numa_placement[vnode_index].add(pcpu_to_pnode[pcpu_id]) vm_numa_placement[vnode_index].update( vcpu_to_pnode.get(vcpu_id, ())) vmNumaNodeRuntimeMap = dict( (k, list(v)) for k, v in vm_numa_placement.iteritems()) return vmNumaNodeRuntimeMap
def getVmNumaNodeRuntimeInfo(vm): """ Collect vm numa nodes runtime pinning to which host numa nodes information. Host numa node topology: 'numaNodes': {'<nodeIndex>': {'cpus': [int], 'totalMemory': 'str'}, ...} We can get each physical cpu core belongs to which host numa node. Vm numa node configuration: 'guestNumaNodes': [{'cpus': 'str', 'memory': 'str'}, ...] We can get each vcpu belongs to which vm numa node. Vcpu runtime pinning to physical cpu core information: ([(0, 1, 19590000000L, 1), (1, 1, 10710000000L, 1)], [(True, True, True, True), (True, True, True, True)]) The first list element of the above tuple describe each vcpu(list[0]) runtime pinning to which physical cpu core(list[3]). Get the mapping info between vcpu and pid from /var/run/libvirt/qemu/<vmName>.xml Get each vcpu(pid) backed memory mapping to which host numa nodes info from /proc/<vm_pid>/<vcpu_pid>/numa_maps From all the above information, we can calculate each vm numa node runtime pinning to which host numa node. The output is a map like: '<vm numa node index>': [<host numa node index>, ...] """ vmNumaNodeRuntimeMap = {} if 'guestNumaNodes' in vm.conf: vCpuRuntimePinMap = _getVcpuRuntimePinMap(vm) if vCpuRuntimePinMap: vmName = vm.conf['vmName'].encode('utf-8') vCpuMemoryMapping = \ supervdsm.getProxy().getVcpuNumaMemoryMapping(vmName) pNodesCpusMap = _getHostNumaNodesCpuMap() vNodesCpusMap = _getVmNumaNodesCpuMap(vm) for vCpu, pCpu in vCpuRuntimePinMap.iteritems(): vNodeIndex = str(vNodesCpusMap[vCpu]) if vNodeIndex not in vmNumaNodeRuntimeMap: vmNumaNodeRuntimeMap[vNodeIndex] = [] vmNumaNodeRuntimeMap[vNodeIndex].append(pNodesCpusMap[pCpu]) if vCpu in vCpuMemoryMapping: vmNumaNodeRuntimeMap[vNodeIndex].extend( vCpuMemoryMapping[vCpu]) vmNumaNodeRuntimeMap = dict([ (k, list(set(v))) for k, v in vmNumaNodeRuntimeMap.iteritems() ]) return vmNumaNodeRuntimeMap
def getVmNumaNodeRuntimeInfo(vm): """ Collect vm numa nodes runtime pinning to which host numa nodes information. Host numa node topology: 'numaNodes': {'<nodeIndex>': {'cpus': [int], 'totalMemory': 'str'}, ...} We can get each physical cpu core belongs to which host numa node. Vm numa node configuration: 'guestNumaNodes': [{'cpus': 'str', 'memory': 'str'}, ...] We can get each vcpu belongs to which vm numa node. Vcpu runtime pinning to physical cpu core information: ([(0, 1, 19590000000L, 1), (1, 1, 10710000000L, 1)], [(True, True, True, True), (True, True, True, True)]) The first list element of the above tuple describe each vcpu(list[0]) runtime pinning to which physical cpu core(list[3]). Get the mapping info between vcpu and pid from /var/run/libvirt/qemu/<vmName>.xml Get each vcpu(pid) backed memory mapping to which host numa nodes info from /proc/<vm_pid>/<vcpu_pid>/numa_maps From all the above information, we can calculate each vm numa node runtime pinning to which host numa node. The output is a map like: '<vm numa node index>': [<host numa node index>, ...] """ vmNumaNodeRuntimeMap = {} if 'guestNumaNodes' in vm.conf: vCpuRuntimePinMap = _getVcpuRuntimePinMap(vm) if vCpuRuntimePinMap: vmName = vm.conf['vmName'].encode('utf-8') vCpuMemoryMapping = \ supervdsm.getProxy().getVcpuNumaMemoryMapping(vmName) pNodesCpusMap = _getHostNumaNodesCpuMap() vNodesCpusMap = _getVmNumaNodesCpuMap(vm) for vCpu, pCpu in vCpuRuntimePinMap.iteritems(): vNodeIndex = str(vNodesCpusMap[vCpu]) if vNodeIndex not in vmNumaNodeRuntimeMap: vmNumaNodeRuntimeMap[vNodeIndex] = [] vmNumaNodeRuntimeMap[vNodeIndex].append(pNodesCpusMap[pCpu]) if vCpu in vCpuMemoryMapping: vmNumaNodeRuntimeMap[vNodeIndex].extend( vCpuMemoryMapping[vCpu]) vmNumaNodeRuntimeMap = dict([(k, list(set(v))) for k, v in vmNumaNodeRuntimeMap.iteritems()]) return vmNumaNodeRuntimeMap
def editNetwork(self, oldBridge, newBridge, vlan=None, bond=None, nics=None, options={}): """Add a new network to this vds, replacing an old one.""" self._translateOptionsToNew(options) if not self._cif._networkSemaphore.acquire(blocking=False): self.log.warn('concurrent network verb already executing') return errCode['unavail'] try: if vlan: options['vlan'] = vlan if bond: options['bonding'] = bond if nics: options['nics'] = list(nics) self._cif._netConfigDirty = True try: supervdsm.getProxy().editNetwork(oldBridge, newBridge, options) except configNetwork.ConfigNetworkError, e: self.log.error(e.message, exc_info=True) return {'status': {'code': e.errCode, 'message': e.message}} return {'status': doneCode}
def _prepareVolumePathFromPayload(self, vmId, device, payload): """ param vmId: VM UUID or None param device: either 'floppy' or 'cdrom' param payload: a dict formed like this: {'volId': 'volume id', # volId is optional 'file': {'filename': 'content', ...}} """ funcs = {'cdrom': 'mkIsoFs', 'floppy': 'mkFloppyFs'} if device not in funcs: raise vm.VolumeError("Unsupported 'device': %s" % device) func = getattr(supervdsm.getProxy(), funcs[device]) return func(vmId, payload['file'], payload.get('volId'))
def getVmVolumeInfo(self): """ Send info to represent Gluster volume as a network block device """ rpath = sdCache.produce(self.sdUUID).getRemotePath() rpath_list = rpath.rsplit(":", 1) volfileServer = rpath_list[0] volname = rpath_list[1] # Volume transport to Libvirt transport mapping VOLUME_TRANS_MAP = {"TCP": "tcp", "RDMA": "rdma"} # Extract the volume's transport using gluster cli svdsmProxy = svdsm.getProxy() try: volInfo = svdsmProxy.glusterVolumeInfo(volname, volfileServer) volTrans = VOLUME_TRANS_MAP[volInfo[volname]["transportType"][0]] except GlusterException: # In case of issues with finding transport type, default to tcp self.log.warning( "Unable to find transport type for GlusterFS" " volume %s. GlusterFS server = %s." "Defaulting to tcp", (volname, volfileServer), exc_info=True, ) volTrans = VOLUME_TRANS_MAP["TCP"] # Use default port volPort = "0" imgFilePath = self.getVolumePath() imgFilePath_list = imgFilePath.rsplit("/") # Extract path to the image, relative to the gluster mount imgFileRelPath = "/".join(imgFilePath_list[-4:]) glusterPath = volname + "/" + imgFileRelPath return { "volType": VmVolumeInfo.TYPE_NETWORK, "path": glusterPath, "protocol": "gluster", "volPort": volPort, "volTransport": volTrans, "volfileServer": volfileServer, }
def getVmVolumeInfo(self): """ Send info to represent Gluster volume as a network block device """ rpath = sdCache.produce(self.sdUUID).getRemotePath() rpath_list = rpath.rsplit(":", 1) volfileServer = rpath_list[0] volname = rpath_list[1] # Volume transport to Libvirt transport mapping VOLUME_TRANS_MAP = {'TCP': 'tcp', 'RDMA': 'rdma'} # Extract the volume's transport using gluster cli svdsmProxy = svdsm.getProxy() try: volInfo = svdsmProxy.glusterVolumeInfo(volname, volfileServer) volTrans = VOLUME_TRANS_MAP[volInfo[volname]['transportType'][0]] except GlusterException: # In case of issues with finding transport type, default to tcp self.log.warning( "Unable to find transport type for GlusterFS" " volume %s. GlusterFS server = %s." "Defaulting to tcp", (volname, volfileServer), exc_info=True) volTrans = VOLUME_TRANS_MAP['TCP'] # Use default port volPort = "0" imgFilePath = self.getVolumePath() imgFilePath_list = imgFilePath.rsplit("/") # Extract path to the image, relative to the gluster mount imgFileRelPath = "/".join(imgFilePath_list[-4:]) glusterPath = volname + '/' + imgFileRelPath return { 'volType': VmVolumeInfo.TYPE_NETWORK, 'path': glusterPath, 'protocol': 'gluster', 'volPort': volPort, 'volTransport': volTrans, 'volfileServer': volfileServer }
def isEnabled(): """ Check the multipath daemon configuration. The configuration file /etc/multipath.conf should contain private tag in form "RHEV REVISION X.Y" for this check to succeed. If the tag above is followed by tag "RHEV PRIVATE" the configuration should be preserved at all cost. """ if os.path.exists(MPATH_CONF): first = second = '' svdsm = supervdsm.getProxy() mpathconf = svdsm.readMultipathConf() try: first = mpathconf[0] second = mpathconf[1] except IndexError: pass if MPATH_CONF_PRIVATE_TAG in second: log.info("Manual override for multipath.conf detected - " "preserving current configuration") if MPATH_CONF_TAG not in first: log.warning("This manual override for multipath.conf " "was based on downrevved template. " "You are strongly advised to " "contact your support representatives") return True if MPATH_CONF_TAG in first: log.debug("Current revision of multipath.conf detected, " "preserving") return True for tag in OLD_TAGS: if tag in first: log.info("Downrev multipath.conf detected, upgrade required") return False log.debug("multipath Defaulting to False") return False
def getVmVolumeInfo(self): """ Send info to represent Gluster volume as a network block device """ rpath = sdCache.produce(self.sdUUID).getRealPath() volfileServer, volname = rpath.rsplit(":", 1) volname = volname.strip('/') # Volume transport to Libvirt transport mapping VOLUME_TRANS_MAP = {'TCP': 'tcp', 'RDMA': 'rdma'} # Extract the volume's transport using gluster cli svdsmProxy = svdsm.getProxy() try: volInfo = svdsmProxy.glusterVolumeInfo(volname, volfileServer) volTrans = VOLUME_TRANS_MAP[volInfo[volname]['transportType'][0]] except GlusterException: # In case of issues with finding transport type, default to tcp self.log.warning("Unable to find transport type for GlusterFS" " volume %s. GlusterFS server = %s." "Defaulting to tcp", (volname, volfileServer), exc_info=True) volTrans = VOLUME_TRANS_MAP['TCP'] # Use default port volPort = "0" imgFilePath = self.getVolumePath() imgFilePath_list = imgFilePath.rsplit("/") # Extract path to the image, relative to the gluster mount imgFileRelPath = "/".join(imgFilePath_list[-4:]) glusterPath = volname + '/' + imgFileRelPath return {'volType': VmVolumeInfo.TYPE_NETWORK, 'path': glusterPath, 'protocol': 'gluster', 'volPort': volPort, 'volTransport': volTrans, 'volfileServer': volfileServer}
def _prepare_socket(self): supervdsm.getProxy().prepareVmChannel(self._socketName)
def __init__(self, cif, log): self.cif = cif self.log = log self.svdsmProxy = svdsm.getProxy()
def restoreNetConfig(self): supervdsm.getProxy().restoreNetworks()
def prepareVolumePath(self, drive, vmId=None): if type(drive) is dict: # PDIV drive format if drive['device'] == 'disk' and vm.isVdsmImage(drive): res = self.irs.prepareImage( drive['domainID'], drive['poolID'], drive['imageID'], drive['volumeID']) if res['status']['code']: raise vm.VolumeError(drive) volPath = res['path'] # The order of imgVolumesInfo is not guaranteed drive['volumeChain'] = res['imgVolumesInfo'] drive['volumeInfo'] = res['info'] # GUID drive format elif "GUID" in drive: res = self.irs.getDevicesVisibility([drive["GUID"]]) if not res["visible"][drive["GUID"]]: raise vm.VolumeError(drive) res = self.irs.appropriateDevice(drive["GUID"], vmId) if res['status']['code']: raise vm.VolumeError(drive) volPath = os.path.join("/dev/mapper", drive["GUID"]) # UUID drive format elif "UUID" in drive: volPath = self._getUUIDSpecPath(drive["UUID"]) # leave path == '' for empty cdrom and floppy drives ... elif (drive['device'] in ('cdrom', 'floppy') and 'specParams' in drive and # next line can be removed in future, when < 3.3 engine # is not supported drive['specParams'].get('path', '') == '' and drive.get('path', '') == '' and 'vmPayload' not in drive['specParams']): volPath = '' # ... or load the drive from vmPayload: elif drive['device'] in ('cdrom', 'floppy') and \ 'specParams' in drive and \ 'vmPayload' in drive['specParams']: ''' vmPayload is a key in specParams 'vmPayload': {'volId': 'volume id', # volId is optional 'file': {'filename': 'content', ...}} ''' mkFsNames = {'cdrom': 'mkIsoFs', 'floppy': 'mkFloppyFs'} try: mkFsFunction = getattr(supervdsm.getProxy(), mkFsNames[drive['device']]) except AttributeError: raise vm.VolumeError("Unsupported 'device': %s in " "drive: %" % (drive['device'], drive)) else: files = drive['specParams']['vmPayload']['file'] volId = drive['specParams']['vmPayload'].get('volId') volPath = mkFsFunction(vmId, files, volId) elif "path" in drive: volPath = drive['path'] else: raise vm.VolumeError(drive) # For BC sake: None as argument elif not drive: volPath = drive # For BC sake: path as a string. elif os.path.exists(drive): volPath = drive else: raise vm.VolumeError(drive) self.log.info("prepared volume path: %s", volPath) return volPath
def getPathsStatus(): return getProxy().getPathsStatus()
def pathListIter(filterGuids=None): filteringOn = filterGuids is not None filterLen = len(filterGuids) if filteringOn else -1 devsFound = 0 knownSessions = {} svdsm = supervdsm.getProxy() pathStatuses = devicemapper.getPathsStatus() for dmId, guid in getMPDevsIter(): if devsFound == filterLen: break if filteringOn and guid not in filterGuids: continue devsFound += 1 devInfo = { "guid": guid, "dm": dmId, "capacity": str(getDeviceSize(dmId)), "serial": svdsm.getScsiSerial(dmId), "paths": [], "connections": [], "devtypes": [], "devtype": "", "vendor": "", "product": "", "fwrev": "", "logicalblocksize": "", "physicalblocksize": "", } for slave in devicemapper.getSlaves(dmId): if not devicemapper.isBlockDevice(slave): log.warning("No such physdev '%s' is ignored" % slave) continue if not devInfo["vendor"]: try: devInfo["vendor"] = getVendor(slave) except Exception: log.warn("Problem getting vendor from device `%s`", slave, exc_info=True) if not devInfo["product"]: try: devInfo["product"] = getModel(slave) except Exception: log.warn("Problem getting model name from device `%s`", slave, exc_info=True) if not devInfo["fwrev"]: try: devInfo["fwrev"] = getFwRev(slave) except Exception: log.warn("Problem getting fwrev from device `%s`", slave, exc_info=True) if (not devInfo["logicalblocksize"] or not devInfo["physicalblocksize"]): try: logBlkSize, phyBlkSize = getDeviceBlockSizes(slave) devInfo["logicalblocksize"] = str(logBlkSize) devInfo["physicalblocksize"] = str(phyBlkSize) except Exception: log.warn("Problem getting blocksize from device `%s`", slave, exc_info=True) pathInfo = {} pathInfo["physdev"] = slave pathInfo["state"] = pathStatuses.get(slave, "failed") try: hbtl = getHBTL(slave) except OSError as e: if e.errno == errno.ENOENT: log.warn("Device has no hbtl: %s", slave) pathInfo["lun"] = 0 else: log.error( "Error: %s while trying to get hbtl of device: " "%s", str(e.message), slave) raise else: pathInfo["lun"] = hbtl.lun if iscsi.devIsiSCSI(slave): devInfo["devtypes"].append(DEV_ISCSI) pathInfo["type"] = DEV_ISCSI sessionID = iscsi.getiScsiSession(slave) if sessionID not in knownSessions: # FIXME: This entire part is for BC. It should be moved to # hsm and not preserved for new APIs. New APIs should keep # numeric types and sane field names. sess = iscsi.getSessionInfo(sessionID) sessionInfo = { "connection": sess.target.portal.hostname, "port": str(sess.target.portal.port), "iqn": sess.target.iqn, "portal": str(sess.target.tpgt), "initiatorname": sess.iface.name } # Note that credentials must be sent back in order for # the engine to tell vdsm how to reconnect later if sess.credentials: cred = sess.credentials sessionInfo['user'] = cred.username sessionInfo['password'] = cred.password knownSessions[sessionID] = sessionInfo devInfo["connections"].append(knownSessions[sessionID]) else: devInfo["devtypes"].append(DEV_FCP) pathInfo["type"] = DEV_FCP if devInfo["devtype"] == "": devInfo["devtype"] = pathInfo["type"] elif (devInfo["devtype"] != DEV_MIXED and devInfo["devtype"] != pathInfo["type"]): devInfo["devtype"] == DEV_MIXED devInfo["paths"].append(pathInfo) yield devInfo
def pathListIter(filterGuids=None): filteringOn = filterGuids is not None filterLen = len(filterGuids) if filteringOn else -1 devsFound = 0 knownSessions = {} svdsm = supervdsm.getProxy() pathStatuses = devicemapper.getPathsStatus() for dmId, guid in getMPDevsIter(): if devsFound == filterLen: break if filteringOn and guid not in filterGuids: continue devsFound += 1 devInfo = { "guid": guid, "dm": dmId, "capacity": str(getDeviceSize(dmId)), "serial": svdsm.getScsiSerial(dmId), "paths": [], "connections": [], "devtypes": [], "devtype": "", "vendor": "", "product": "", "fwrev": "", "logicalblocksize": "", "physicalblocksize": "", } for slave in devicemapper.getSlaves(dmId): if not devicemapper.isBlockDevice(slave): log.warning("No such physdev '%s' is ignored" % slave) continue if not devInfo["vendor"]: try: devInfo["vendor"] = getVendor(slave) except Exception: log.warn("Problem getting vendor from device `%s`", slave, exc_info=True) if not devInfo["product"]: try: devInfo["product"] = getModel(slave) except Exception: log.warn("Problem getting model name from device `%s`", slave, exc_info=True) if not devInfo["fwrev"]: try: devInfo["fwrev"] = getFwRev(slave) except Exception: log.warn("Problem getting fwrev from device `%s`", slave, exc_info=True) if not devInfo["logicalblocksize"] or not devInfo[ "physicalblocksize"]: try: logBlkSize, phyBlkSize = getDeviceBlockSizes(slave) devInfo["logicalblocksize"] = str(logBlkSize) devInfo["physicalblocksize"] = str(phyBlkSize) except Exception: log.warn("Problem getting blocksize from device `%s`", slave, exc_info=True) pathInfo = {} pathInfo["physdev"] = slave pathInfo["state"] = pathStatuses.get(slave, "failed") try: pathInfo["hbtl"] = getHBTL(slave) except Exception: log.warn("Problem getting hbtl from device `%s`", slave, exc_info=True) pathInfo["devnum"] = DeviceNumber(*devicemapper.getDevNum(slave)) if iscsi.devIsiSCSI(slave): devInfo["devtypes"].append(DEV_ISCSI) pathInfo["type"] = DEV_ISCSI sessionID = iscsi.getiScsiSession(slave) if sessionID not in knownSessions: knownSessions[sessionID] = svdsm.getdeviSCSIinfo(slave) devInfo["connections"].append(knownSessions[sessionID]) else: devInfo["devtypes"].append(DEV_FCP) pathInfo["type"] = DEV_FCP if devInfo["devtype"] == "": devInfo["devtype"] = pathInfo["type"] elif devInfo["devtype"] != DEV_MIXED and devInfo[ "devtype"] != pathInfo["type"]: devInfo["devtype"] == DEV_MIXED devInfo["paths"].append(pathInfo) yield devInfo
def prepare(self): if self._path: supervdsm.getProxy().prepareVmChannel( self._path, constants.OVIRT_VMCONSOLE_GROUP)
def prepareVolumePath(self, drive, vmId=None): if type(drive) is dict: # PDIV drive format if drive['device'] == 'disk' and vm.isVdsmImage(drive): res = self.irs.prepareImage(drive['domainID'], drive['poolID'], drive['imageID'], drive['volumeID']) if res['status']['code']: raise vm.VolumeError(drive) volPath = res['path'] drive['volumeChain'] = res['chain'] drive['volumeInfo'] = res['info'] # GUID drive format elif "GUID" in drive: visible = self.irs.scanDevicesVisibility([drive["GUID"]]) if visible[drive["GUID"]] is False: self.log.error("GUID: %s is not visible", drive["GUID"]) raise vm.VolumeError(drive) volPath = os.path.join("/dev/mapper", drive["GUID"]) res = self.irs.appropriateDevice(drive["GUID"], vmId) if res['status']['code']: self.log.error("Change ownership on device %s failed", drive["GUID"]) raise vm.VolumeError(drive) # UUID drive format elif "UUID" in drive: volPath = self._getUUIDSpecPath(drive["UUID"]) # leave path == '' for empty cdrom and floppy drives ... elif drive['device'] in ('cdrom', 'floppy') and \ 'specParams' in drive and \ 'path' in drive['specParams'] and \ drive['specParams']['path'] == '': volPath = '' # ... or load the drive from vmPayload: elif drive['device'] in ('cdrom', 'floppy') and \ 'specParams' in drive and \ 'vmPayload' in drive['specParams']: ''' vmPayload is a key in specParams 'vmPayload': {'volId': 'volume id', # volId is optional 'file': {'filename': 'content', ...}} ''' mkFsNames = {'cdrom': 'mkIsoFs', 'floppy': 'mkFloppyFs'} try: mkFsFunction = getattr(supervdsm.getProxy(), mkFsNames[drive['device']]) except AttributeError: raise vm.VolumeError("Unsupported 'device': %s in " "drive: %" % (drive['device'], drive)) else: files = drive['specParams']['vmPayload']['file'] volId = drive['specParams']['vmPayload'].get('volId') volPath = mkFsFunction(vmId, files, volId) elif "path" in drive: volPath = drive['path'] else: raise vm.VolumeError(drive) # For BC sake: None as argument elif not drive: volPath = drive # For BC sake: path as a string. elif os.path.exists(drive): volPath = drive else: raise vm.VolumeError(drive) self.log.info("prepared volume path: %s", volPath) return volPath
def removeMapping(deviceName): return getProxy().removeDeviceMapping(deviceName)
def getSessionInfo(sessionID): return supervdsm.getProxy().readSessionInfo(sessionID)
def change_numvfs(device_name, numvfs): supervdsm.getProxy().changeNumvfs(_name_to_pci_path(device_name), numvfs)
def testPingCall(self): self.dropPrivileges() proxy = supervdsm.getProxy() self.assertTrue(proxy.ping())
def ksmTune(self, tuningParams): # When MOM is lauched by vdsm, it's running without root privileges. # So we need resort to supervdsm to set the KSM parameters. superVdsm = supervdsm.getProxy() superVdsm.ksmTune(tuningParams)
def pathListIter(filterGuids=None): filteringOn = filterGuids is not None filterLen = len(filterGuids) if filteringOn else -1 devsFound = 0 knownSessions = {} svdsm = supervdsm.getProxy() pathStatuses = devicemapper.getPathsStatus() for dmId, guid in getMPDevsIter(): if devsFound == filterLen: break if filteringOn and guid not in filterGuids: continue devsFound += 1 devInfo = { "guid" : guid, "dm" : dmId, "capacity" : str(getDeviceSize(dmId)), "serial" : svdsm.getScsiSerial(dmId), "paths" : [], "connections" : [], "devtypes" : [], "devtype" : "", "vendor" : "", "product" :"", "fwrev" : "", "logicalblocksize" : "", "physicalblocksize" : "", } for slave in devicemapper.getSlaves(dmId): if not devicemapper.isBlockDevice(slave): log.warning("No such physdev '%s' is ignored" % slave) continue if not devInfo["vendor"]: try: devInfo["vendor"] = getVendor(slave) except Exception: log.warn("Problem getting vendor from device `%s`", slave, exc_info=True) if not devInfo["product"]: try: devInfo["product"] = getModel(slave) except Exception: log.warn("Problem getting model name from device `%s`", slave, exc_info=True) if not devInfo["fwrev"]: try: devInfo["fwrev"] = getFwRev(slave) except Exception: log.warn("Problem getting fwrev from device `%s`", slave, exc_info=True) if not devInfo["logicalblocksize"] or not devInfo["physicalblocksize"]: try: logBlkSize, phyBlkSize = getDeviceBlockSizes(slave) devInfo["logicalblocksize"] = str(logBlkSize) devInfo["physicalblocksize"] = str(phyBlkSize) except Exception: log.warn("Problem getting blocksize from device `%s`", slave, exc_info=True) pathInfo = {} pathInfo["physdev"] = slave pathInfo["state"] = pathStatuses.get(slave, "failed") try: pathInfo["hbtl"] = getHBTL(slave) except Exception: log.warn("Problem getting hbtl from device `%s`", slave, exc_info=True) pathInfo["devnum"] = DeviceNumber(*devicemapper.getDevNum(slave)) if iscsi.devIsiSCSI(slave): devInfo["devtypes"].append(DEV_ISCSI) pathInfo["type"] = DEV_ISCSI sessionID = iscsi.getiScsiSession(slave) if sessionID not in knownSessions: knownSessions[sessionID] = svdsm.getdeviSCSIinfo(slave) devInfo["connections"].append(knownSessions[sessionID]) else: devInfo["devtypes"].append(DEV_FCP) pathInfo["type"] = DEV_FCP if devInfo["devtype"] == "": devInfo["devtype"] = pathInfo["type"] elif devInfo["devtype"] != DEV_MIXED and devInfo["devtype"] != pathInfo["type"]: devInfo["devtype"] == DEV_MIXED devInfo["paths"].append(pathInfo) yield devInfo