def lookup(self, vmid, dev_name): dom = VMModel.get_vm(vmid, self.conn) xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: hostdev = root.devices.hostdev except AttributeError: raise NotFoundError('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name }) dev_model = DeviceModel(conn=self.conn) for e in hostdev: deduced_name = DeviceModel.deduce_dev_name(e, self.conn) if deduced_name == dev_name: dev_info = dev_model.lookup(dev_name) return { 'name': dev_name, 'type': e.attrib['type'], 'product': dev_info.get('product', None), 'vendor': dev_info.get('vendor', None) } raise NotFoundError('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name })
def delete(self, vmid, dev_name): dom = VMModel.get_vm(vmid, self.conn) xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: hostdev = root.devices.hostdev except AttributeError: raise NotFoundError('KCHVMHDEV0001E', {'vmid': vmid, 'dev_name': dev_name}) pci_devs = [(DeviceModel.deduce_dev_name(e, self.conn), e) for e in hostdev if e.attrib['type'] == 'pci'] dev_model = DeviceModel(conn=self.conn) dev_info = dev_model.lookup(dev_name) is_3D_device = dev_model.is_device_3D_controller(dev_info) if is_3D_device and DOM_STATE_MAP[dom.info()[0]] != "shutoff": raise InvalidOperation('KCHVMHDEV0006E', {'name': dev_info['name']}) for e in hostdev: if DeviceModel.deduce_dev_name(e, self.conn) == dev_name: xmlstr = etree.tostring(e) dom.detachDeviceFlags( xmlstr, get_vm_config_flag(dom, mode='all')) if e.attrib['type'] == 'pci': self._delete_affected_pci_devices(dom, dev_name, pci_devs) if is_3D_device: devsmodel = VMHostDevsModel(conn=self.conn) devsmodel.update_mmio_guest(vmid, False) break else: raise NotFoundError('KCHVMHDEV0001E', {'vmid': vmid, 'dev_name': dev_name})
def delete(self, vmid, dev_name): dom = VMModel.get_vm(vmid, self.conn) xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: hostdev = root.devices.hostdev except AttributeError: raise NotFoundError('KCHVMHDEV0001E', {'vmid': vmid, 'dev_name': dev_name}) pci_devs = [(DeviceModel.deduce_dev_name(e, self.conn), e) for e in hostdev if e.attrib['type'] == 'pci'] for e in hostdev: if DeviceModel.deduce_dev_name(e, self.conn) == dev_name: xmlstr = etree.tostring(e) dom.detachDeviceFlags( xmlstr, get_vm_config_flag(dom, mode='all')) if e.attrib['type'] == 'pci': self._delete_affected_pci_devices(dom, dev_name, pci_devs) break else: raise NotFoundError('KCHVMHDEV0001E', {'vmid': vmid, 'dev_name': dev_name})
def delete(self, vmid, dev_name): dom = VMModel.get_vm(vmid, self.conn) xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: hostdev = root.devices.hostdev except AttributeError: raise NotFoundError('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name }) pci_devs = [(DeviceModel.deduce_dev_name(e, self.conn), e) for e in hostdev if e.attrib['type'] == 'pci'] for e in hostdev: if DeviceModel.deduce_dev_name(e, self.conn) == dev_name: xmlstr = etree.tostring(e) dom.detachDeviceFlags(xmlstr, get_vm_config_flag(dom, mode='all')) if e.attrib['type'] == 'pci': self._delete_affected_pci_devices(dom, dev_name, pci_devs) break else: raise NotFoundError('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name })
def _detach_device(self, cb, params): cb('Detaching device') self._cb = cb vmid = params['vmid'] dev_name = params['dev_name'] dom = params['dom'] hostdev = params['hostdev'] lock = params['lock'] with lock: pci_devs = [(DeviceModel.deduce_dev_name(e, self.conn), e) for e in hostdev if e.attrib['type'] == 'pci'] dev_info = self.dev_model.lookup(dev_name) is_3D_device = self.dev_model.is_device_3D_controller(dev_info) if is_3D_device and DOM_STATE_MAP[dom.info()[0]] != "shutoff": raise InvalidOperation('KCHVMHDEV0006E', {'name': dev_info['name']}) # check for multifunction and detach all functions together try: multi = self._unplug_multifunction_pci(dom, hostdev, dev_name) except libvirt.libvirtError: multi = False # successfully detached all functions: finish operation if multi: if is_3D_device: devsmodel = VMHostDevsModel(conn=self.conn) devsmodel.update_mmio_guest(vmid, False) if DOM_STATE_MAP[dom.info()[0]] == "shutoff": cb('OK', True) return # detach each function individually for e in hostdev: if DeviceModel.deduce_dev_name(e, self.conn) == dev_name: xmlstr = etree.tostring(e) dom.detachDeviceFlags( xmlstr, get_vm_config_flag(dom, mode='all')) if e.attrib['type'] == 'pci': self._delete_affected_pci_devices(dom, dev_name, pci_devs) if is_3D_device: devsmodel = VMHostDevsModel(conn=self.conn) devsmodel.update_mmio_guest(vmid, False) break else: msg = WokMessage('KCHVMHDEV0001E', {'vmid': vmid, 'dev_name': dev_name}) cb(msg.get_text(), False) raise NotFoundError('KCHVMHDEV0001E', {'vmid': vmid, 'dev_name': dev_name}) if DOM_STATE_MAP[dom.info()[0]] == "shutoff": cb('OK', True)
def __init__(self, **kargs): self.conn = kargs['conn'] self.objstore = kargs['objstore'] self.events = kargs['eventsloop'] self.task = TaskModel(**kargs) self.devs_model = DevicesModel(**kargs) self.dev_model = DeviceModel(**kargs) self._cb = None self.events.registerDetachDevicesEvent( self.conn, self._event_devices, self)
def __init__(self, **kargs): self.conn = kargs['conn'] self.objstore = kargs['objstore'] self.scanner = Scanner(self._clean_scan) self.scanner.delete() self.caps = CapabilitiesModel(**kargs) self.device = DeviceModel(**kargs) if self.conn.get() is not None: if self.conn.isQemuURI(): self._check_default_pools()
def _detach_device(self, cb, params): cb('Detaching device') self._cb = cb vmid = params['vmid'] dev_name = params['dev_name'] dom = params['dom'] hostdev = params['hostdev'] lock = params['lock'] with lock: pci_devs = [(DeviceModel.deduce_dev_name(e, self.conn), e) for e in hostdev if e.attrib['type'] == 'pci'] dev_info = self.dev_model.lookup(dev_name) is_3D_device = self.dev_model.is_device_3D_controller(dev_info) if is_3D_device and DOM_STATE_MAP[dom.info()[0]] != "shutoff": raise InvalidOperation('KCHVMHDEV0006E', {'name': dev_info['name']}) if self._hotunplug_multifunction_pci(dom, hostdev, dev_name): if is_3D_device: devsmodel = VMHostDevsModel(conn=self.conn) devsmodel.update_mmio_guest(vmid, False) if DOM_STATE_MAP[dom.info()[0]] == "shutoff": cb('OK', True) return for e in hostdev: if DeviceModel.deduce_dev_name(e, self.conn) == dev_name: xmlstr = etree.tostring(e) dom.detachDeviceFlags(xmlstr, get_vm_config_flag(dom, mode='all')) if e.attrib['type'] == 'pci': self._delete_affected_pci_devices( dom, dev_name, pci_devs) if is_3D_device: devsmodel = VMHostDevsModel(conn=self.conn) devsmodel.update_mmio_guest(vmid, False) break else: msg = WokMessage('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name }) cb(msg.get_text(), False) raise NotFoundError('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name }) if DOM_STATE_MAP[dom.info()[0]] == "shutoff": cb('OK', True)
def _delete_affected_pci_devices(self, dom, dev_name, pci_devs): dev_model = DeviceModel(conn=self.conn) try: dev_model.lookup(dev_name) except NotFoundError: return affected_names = set( DevicesModel(conn=self.conn).get_list( _passthrough_affected_by=dev_name)) for pci_name, e in pci_devs: if pci_name in affected_names: xmlstr = etree.tostring(e) dom.detachDeviceFlags(xmlstr, get_vm_config_flag(dom, mode='all'))
def lookup(self, vmid, dev_name): dom = VMModel.get_vm(vmid, self.conn) xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: hostdev = root.devices.hostdev except AttributeError: raise NotFoundError('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name}) for e in hostdev: deduced_name = DeviceModel.deduce_dev_name(e, self.conn) if deduced_name == dev_name: dev_info = self.dev_model.lookup(dev_name) return { 'name': dev_name, 'type': e.attrib['type'], 'product': dev_info.get('product', None), 'vendor': dev_info.get('vendor', None), 'multifunction': dev_info.get('multifunction', None), 'vga3d': dev_info.get('vga3d', None), } raise NotFoundError('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name})
def _delete_affected_pci_devices(self, dom, dev_name, pci_devs): dev_model = DeviceModel(conn=self.conn) try: dev_model.lookup(dev_name) except NotFoundError: return affected_names = set( DevicesModel( conn=self.conn).get_list(_passthrough_affected_by=dev_name)) for pci_name, e in pci_devs: if pci_name in affected_names: xmlstr = etree.tostring(e) dom.detachDeviceFlags( xmlstr, get_vm_config_flag(dom, mode='all'))
def create(self, vmid, params): dev_name = params['name'] self._passthrough_device_validate(dev_name) dev_info = DeviceModel(conn=self.conn).lookup(dev_name) if dev_info['device_type'] == 'pci': taskid = add_task( u'/plugins/kimchi/vms/%s/hostdevs/' % VMModel.get_vm(vmid, self.conn).name(), self._attach_pci_device, self.objstore, { 'vmid': vmid, 'dev_info': dev_info }) return self.task.lookup(taskid) with RollbackContext() as rollback: try: dev = self.conn.get().nodeDeviceLookupByName(dev_name) dev.dettach() except Exception: raise OperationFailed('KCHVMHDEV0005E', {'name': dev_name}) else: rollback.prependDefer(dev.reAttach) rollback.commitAll() taskid = add_task( u'/plugins/kimchi/vms/%s/hostdevs/' % VMModel.get_vm(vmid, self.conn).name(), '_attach_%s_device' % dev_info['device_type'], self.objstore, { 'vmid': vmid, 'dev_info': dev_info }) return self.task.lookup(taskid)
def _detach_device(self, cb, params): cb('Detaching device') self._cb = cb vmid = params['vmid'] dev_name = params['dev_name'] dom = params['dom'] hostdev = params['hostdev'] lock = params['lock'] with lock: pci_devs = { DeviceModel.deduce_dev_name(e, self.conn): e for e in hostdev if e.attrib['type'] == 'pci' } dev_info = self.dev_model.lookup(dev_name) is_3D_device = self.dev_model.is_device_3D_controller(dev_info) if is_3D_device and DOM_STATE_MAP[dom.info()[0]] != 'shutoff': raise InvalidOperation( 'KCHVMHDEV0006E', {'name': dev_info['name']}) if not pci_devs.get(dev_name): raise NotFoundError( 'KCHVMHDEV0001E', {'vmid': vmid, 'dev_name': dev_name} ) dev_name_elem = pci_devs[dev_name] self._managed = dev_name_elem.get('managed', 'no') == 'yes' # check for multifunction and detach all functions together try: multi = self.unplug_multifunction_pci( dom, hostdev, dev_name_elem) except libvirt.libvirtError: multi = False # successfully detached all functions: finish operation if multi: if is_3D_device: devsmodel = VMHostDevsModel(conn=self.conn) devsmodel.update_mmio_guest(vmid, False) if DOM_STATE_MAP[dom.info()[0]] == 'shutoff': cb('OK', True) return # detach individually xmlstr = etree.tostring(dev_name_elem) dom.detachDeviceFlags(xmlstr, get_vm_config_flag(dom, mode='all')) if dev_name_elem.attrib['type'] == 'pci': self._delete_affected_pci_devices(dom, dev_name, pci_devs) if is_3D_device: devsmodel = VMHostDevsModel(conn=self.conn) devsmodel.update_mmio_guest(vmid, False) if DOM_STATE_MAP[dom.info()[0]] == 'shutoff': cb('OK', True)
def _detach_device(self, cb, params): cb('Detaching device.') vmid = params['vmid'] dev_name = params['dev_name'] dom = params['dom'] hostdev = params['hostdev'] pci_devs = [(DeviceModel.deduce_dev_name(e, self.conn), e) for e in hostdev if e.attrib['type'] == 'pci'] dev_model = DeviceModel(conn=self.conn) dev_info = dev_model.lookup(dev_name) is_3D_device = dev_model.is_device_3D_controller(dev_info) if is_3D_device and DOM_STATE_MAP[dom.info()[0]] != "shutoff": raise InvalidOperation('KCHVMHDEV0006E', {'name': dev_info['name']}) if self._hotunplug_multifunction_pci(dom, hostdev, dev_name): if is_3D_device: cb('Updating MMIO from VM...') devsmodel = VMHostDevsModel(conn=self.conn) devsmodel.update_mmio_guest(vmid, False) cb('OK', True) return for e in hostdev: if DeviceModel.deduce_dev_name(e, self.conn) == dev_name: xmlstr = etree.tostring(e) cb('Detaching device from VM...') dom.detachDeviceFlags( xmlstr, get_vm_config_flag(dom, mode='all')) if e.attrib['type'] == 'pci': cb('Deleting affected PCI devices...') self._delete_affected_pci_devices(dom, dev_name, pci_devs) if is_3D_device: cb('Updating MMIO from VM...') devsmodel = VMHostDevsModel(conn=self.conn) devsmodel.update_mmio_guest(vmid, False) break else: raise NotFoundError('KCHVMHDEV0001E', {'vmid': vmid, 'dev_name': dev_name}) cb('OK', True)
def get_list(self, vmid): dom = VMModel.get_vm(vmid, self.conn) xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: hostdev = root.devices.hostdev except AttributeError: return [] return [DeviceModel.deduce_dev_name(e, self.conn) for e in hostdev]
def _count_3D_devices_attached(self, dom): counter = 0 root = objectify.fromstring(dom.XMLDesc(0)) try: hostdev = root.devices.hostdev except AttributeError: return counter for device in hostdev: if device.attrib['type'] != 'pci': continue name = DeviceModel.deduce_dev_name(device, self.conn) info = DeviceModel(conn=self.conn).lookup(name) if 'vga3d' in info and info['vga3d']: counter += 1 return counter
def _count_3D_devices_attached(self, dom): counter = 0 root = objectify.fromstring(dom.XMLDesc(0)) try: hostdev = root.devices.hostdev except AttributeError: return counter for device in hostdev: if device.attrib['type'] != 'pci': continue name = DeviceModel.deduce_dev_name(device, self.conn) info = self.dev_model.lookup(name) if 'vga3d' in info and info['vga3d']: counter += 1 return counter
def _available_slot(self, dom): xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) slots = [] try: devices = root.devices slots = [DeviceModel._toint(dev.attrib['slot']) for dev in devices.findall('.//address') if 'slot' in dev.attrib] except AttributeError: return 1 slots = sorted(slots) for free, slot in enumerate(slots, start=1): if free < slot: return free return free+1
def create(self, vmid, params): dev_name = params['name'] self._passthrough_device_validate(dev_name) dev_info = DeviceModel(conn=self.conn).lookup(dev_name) if dev_info['device_type'] == 'pci': return self._attach_pci_device(vmid, dev_info) with RollbackContext() as rollback: try: dev = self.conn.get().nodeDeviceLookupByName(dev_name) dev.dettach() except Exception: raise OperationFailed('KCHVMHDEV0005E', {'name': dev_name}) else: rollback.prependDefer(dev.reAttach) attach_device = getattr( self, '_attach_%s_device' % dev_info['device_type']) info = attach_device(vmid, dev_info) rollback.commitAll() return info
def delete(self, vmid, dev_name): dom = VMModel.get_vm(vmid, self.conn) xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: hostdev = root.devices.hostdev except AttributeError: raise NotFoundError('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name }) pci_devs = [(DeviceModel.deduce_dev_name(e, self.conn), e) for e in hostdev if e.attrib['type'] == 'pci'] dev_model = DeviceModel(conn=self.conn) dev_info = dev_model.lookup(dev_name) is_3D_device = dev_model.is_device_3D_controller(dev_info) if is_3D_device and DOM_STATE_MAP[dom.info()[0]] != "shutoff": raise InvalidOperation('KCHVMHDEV0006E', {'name': dev_info['name']}) for e in hostdev: if DeviceModel.deduce_dev_name(e, self.conn) == dev_name: xmlstr = etree.tostring(e) dom.detachDeviceFlags(xmlstr, get_vm_config_flag(dom, mode='all')) if e.attrib['type'] == 'pci': self._delete_affected_pci_devices(dom, dev_name, pci_devs) if is_3D_device: devsmodel = VMHostDevsModel(conn=self.conn) devsmodel.update_mmio_guest(vmid, False) break else: raise NotFoundError('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name })
def _detach_device(self, cb, params): cb('Detaching device.') vmid = params['vmid'] dev_name = params['dev_name'] dom = params['dom'] hostdev = params['hostdev'] pci_devs = [(DeviceModel.deduce_dev_name(e, self.conn), e) for e in hostdev if e.attrib['type'] == 'pci'] dev_model = DeviceModel(conn=self.conn) dev_info = dev_model.lookup(dev_name) is_3D_device = dev_model.is_device_3D_controller(dev_info) if is_3D_device and DOM_STATE_MAP[dom.info()[0]] != "shutoff": raise InvalidOperation('KCHVMHDEV0006E', {'name': dev_info['name']}) for e in hostdev: if DeviceModel.deduce_dev_name(e, self.conn) == dev_name: xmlstr = etree.tostring(e) cb('Detaching device from VM...') dom.detachDeviceFlags(xmlstr, get_vm_config_flag(dom, mode='all')) if e.attrib['type'] == 'pci': cb('Deleting affected PCI devices...') self._delete_affected_pci_devices(dom, dev_name, pci_devs) if is_3D_device: cb('Updating MMIO from VM...') devsmodel = VMHostDevsModel(conn=self.conn) devsmodel.update_mmio_guest(vmid, False) break else: raise NotFoundError('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name }) cb('OK', True)
class VMHostDevsModel(object): def __init__(self, **kargs): self.conn = kargs['conn'] self.objstore = kargs['objstore'] self.events = kargs['eventsloop'] self.caps = CapabilitiesModel(**kargs) self.devs_model = DevicesModel(**kargs) self.dev_model = DeviceModel(**kargs) self.task = TaskModel(**kargs) self._cb = None self.events.registerAttachDevicesEvent( self.conn, self._event_devices, self) def get_list(self, vmid): dom = VMModel.get_vm(vmid, self.conn) xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: hostdev = root.devices.hostdev except AttributeError: return [] return [DeviceModel.deduce_dev_name(e, self.conn) for e in hostdev] def _passthrough_device_validate(self, dev_name): eligible_dev_names = self.devs_model.get_list(_passthrough='true') if dev_name not in eligible_dev_names: raise InvalidParameter('KCHVMHDEV0002E', {'dev_name': dev_name}) def _event_devices(self, conn, dom, alias, opaque): """ Callback to handle add/remove devices event """ if opaque._cb is None: wok_log.error('opaque must be valid') return wok_log.info('Device %s added successfuly' % alias) opaque._cb('OK', True) def create(self, vmid, params): dev_name = params['name'] dev_info = self.dev_model.lookup(dev_name) if dev_info['device_type'] == 'pci': taskid = AsyncTask( u'/plugins/kimchi/vms/%s/hostdevs/' % VMModel.get_vm(vmid, self.conn).name(), self._attach_pci_device, {'vmid': vmid, 'dev_info': dev_info, 'lock': threading.RLock()}, ).id return self.task.lookup(taskid) with RollbackContext() as rollback: try: dev = self.conn.get().nodeDeviceLookupByName(dev_name) dev.dettach() except Exception: raise OperationFailed('KCHVMHDEV0005E', {'name': dev_name}) else: rollback.prependDefer(dev.reAttach) rollback.commitAll() taskid = AsyncTask( u'/plugins/kimchi/vms/%s/hostdevs/' % VMModel.get_vm(vmid, self.conn).name(), '_attach_%s_device' % dev_info['device_type'], {'vmid': vmid, 'dev_info': dev_info, 'lock': threading.RLock()}, ).id return self.task.lookup(taskid) def _get_pci_devices_xml(self, pci_infos, slot, driver): hostdevs = '' # all devices included in the xml will be sorted in reverse (the # function 0 will be the last one) and will include the guest # address details for dev_info in sorted(pci_infos, key=itemgetter('function'), reverse=True): dev_info['detach_driver'] = driver hostdevs += self._get_pci_device_xml(dev_info, slot, True) return '<devices>%s</devices>' % hostdevs def have_usb_controller(self, vmid): dom = VMModel.get_vm(vmid, self.conn) root = objectify.fromstring(dom.XMLDesc(0)) try: controllers = root.devices.controller except AttributeError: return False for controller in controllers: if 'model' not in controller.attrib: continue if ( controller.attrib['type'] == 'usb' and controller.attrib['model'] in USB_MODELS_PCI_HOTPLUG ): return True return False def _get_pci_device_xml(self, dev_info, slot, is_multifunction): if 'detach_driver' not in dev_info: dev_info['detach_driver'] = 'kvm' source = E.source( E.address( domain=str(dev_info['domain']), bus=str(dev_info['bus']), slot=str(dev_info['slot']), function=str(dev_info['function']), ) ) driver = E.driver(name=dev_info['detach_driver']) if is_multifunction: if dev_info['function'] == 0: multi = E.address( type='pci', domain='0', bus='0', slot=str(slot), function=str(dev_info['function']), multifunction='on', ) else: multi = E.address( type='pci', domain='0', bus='0', slot=str(slot), function=str(dev_info['function']), ) host_dev = E.hostdev( source, driver, multi, mode='subsystem', type='pci', managed='yes' ) else: host_dev = E.hostdev( source, driver, mode='subsystem', type='pci', managed='yes' ) return etree.tostring(host_dev) @staticmethod def _validate_pci_passthrough_env(): # Linux kernel < 3.5 doesn't provide /sys/kernel/iommu_groups if os.path.isdir('/sys/kernel/iommu_groups'): if not glob.glob('/sys/kernel/iommu_groups/*'): raise InvalidOperation('KCHVMHDEV0003E') # Enable virt_use_sysfs on RHEL6 and older distributions # In recent Fedora, there is no virt_use_sysfs. out, err, rc = run_command( ['getsebool', 'virt_use_sysfs'], silent=True) if rc == 0 and out.rstrip('\n') != 'virt_use_sysfs --> on': out, err, rc = run_command( ['setsebool', '-P', 'virt_use_sysfs=on']) if rc != 0: wok_log.warning('Unable to turn on sebool virt_use_sysfs') def _available_slot(self, dom): xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: devices = root.devices slots = [ self.dev_model._toint(dev.attrib['slot']) for dev in devices.findall('.//address') if 'slot' in dev.attrib ] except AttributeError: return 1 slots = sorted(slots) free = 0 for free, slot in enumerate(slots, start=1): if free < slot: return free return free + 1 def _attach_pci_device(self, cb, params): cb('Attaching PCI device') self._cb = cb vmid = params['vmid'] dev_info = params['dev_info'] lock = params['lock'] try: self._passthrough_device_validate(dev_info['name']) except InvalidParameter as e: cb(str(e), False) raise with lock: try: self._validate_pci_passthrough_env() except InvalidOperation as e: cb(str(e), False) raise dom = VMModel.get_vm(vmid, self.conn) driver = 'vfio' if self.caps.kernel_vfio else 'kvm' # 'vfio' systems requires a usb controller in order to support pci # hotplug on Power. if ( driver == 'vfio' and platform.machine().startswith('ppc') and DOM_STATE_MAP[dom.info()[0]] != 'shutoff' and not self.have_usb_controller(vmid) ): msg = WokMessage('KCHVMHDEV0008E', {'vmid': vmid}) cb(msg.get_text(), False) raise InvalidOperation('KCHVMHDEV0008E', {'vmid': vmid}) # Attach all PCI devices in the same IOMMU group affected_names = self.devs_model.get_list( _passthrough_affected_by=dev_info['name'] ) passthrough_names = self.devs_model.get_list( _cap='pci', _passthrough='true' ) group_names = list(set(affected_names) & set(passthrough_names)) pci_infos = [self.dev_model.lookup( dev_name) for dev_name in group_names] pci_infos.append(dev_info) pci_infos = sorted(pci_infos, key=itemgetter('name')) # does not allow hot-plug of 3D graphic cards is_3D_device = self.dev_model.is_device_3D_controller(dev_info) if is_3D_device and DOM_STATE_MAP[dom.info()[0]] != 'shutoff': msg = WokMessage('KCHVMHDEV0006E', {'name': dev_info['name']}) cb(msg.get_text(), False) raise InvalidOperation( 'KCHVMHDEV0006E', {'name': dev_info['name']}) # all devices in the group that is going to be attached to the vm # must be detached from the host first self._attach_all_devices(pci_infos) # when attaching a 3D graphic device it might be necessary to # increase the window size memory in order to be able to attach # more than one device to the same guest if is_3D_device: self.update_mmio_guest(vmid, True) self._attach_multifunction_devices(dom, pci_infos, driver, vmid) if DOM_STATE_MAP[dom.info()[0]] == 'shutoff': cb('OK', True) def _attach_multifunction_devices(self, dom, pci_infos, driver, vmid): slot = 0 is_multifunction = len(pci_infos) > 1 device_flags = get_vm_config_flag(dom, mode='all') with RollbackContext() as rollback: # multifuction: try to attach all functions together within one # xml file. It requires libvirt support. if is_multifunction: # search for the first available slot in guest xml slot = self._available_slot(dom) xmlstr = self._get_pci_devices_xml(pci_infos, slot, driver) try: dom.attachDeviceFlags(xmlstr, device_flags) except libvirt.libvirtError: # If operation fails, we try the other way, where each # function is attached individually pass else: rollback.prependDefer( dom.detachDeviceFlags, xmlstr, device_flags ) rollback.commitAll() if DOM_STATE_MAP[dom.info()[0]] == 'shutoff': self._cb('OK', True) return # attach each function individually (multi or single function) for pci_info in pci_infos: pci_info['detach_driver'] = driver xmlstr = self._get_pci_device_xml( pci_info, slot, is_multifunction) try: dom.attachDeviceFlags(xmlstr, device_flags) except libvirt.libvirtError: msg = WokMessage( 'KCHVMHDEV0007E', { 'device': pci_info['name'], 'vm': vmid} ) self._cb(msg.get_text(), False) wok_log.error( 'Failed to attach host device %s to VM %s: \n%s', pci_info['name'], vmid, xmlstr, ) raise rollback.prependDefer( dom.detachDeviceFlags, xmlstr, device_flags) rollback.commitAll() def _attach_all_devices(self, pci_infos): with RollbackContext() as rollback: for pci_info in pci_infos: try: dev = self.conn.get().nodeDeviceLookupByName( pci_info['name']) dev.dettach() except Exception: msg = WokMessage('KCHVMHDEV0005E', { 'name': pci_info['name']}) self._cb(msg.get_text(), False) raise OperationFailed( 'KCHVMHDEV0005E', {'name': pci_info['name']} ) else: rollback.prependDefer(dev.reAttach) rollback.commitAll() def _count_3D_devices_attached(self, dom): counter = 0 root = objectify.fromstring(dom.XMLDesc(0)) try: hostdev = root.devices.hostdev except AttributeError: return counter for device in hostdev: if device.attrib['type'] != 'pci': continue name = DeviceModel.deduce_dev_name(device, self.conn) info = self.dev_model.lookup(name) if 'vga3d' in info and info['vga3d']: counter += 1 return counter def update_mmio_guest(self, vmid, is_attaching): dom = VMModel.get_vm(vmid, self.conn) # get the number of 3D graphic cards already attached to the guest # based on this number we will decide if the memory size will be # increased or not counter = self._count_3D_devices_attached(dom) if counter == 0 and is_attaching: return size = 0 if is_attaching: # suppose this is the 3rd graphic card to be attached to the same # guest, counter will be 2+1 (2 existing + this attachment) times # 32G (0x80000000) size = hex((counter + 1) * WINDOW_SIZE_BAR) else: size = hex(counter * WINDOW_SIZE_BAR) # if the guest already has the xml file we will simply update the # value, otherwise we will add the new field new_xml = self._update_win_memory_size(dom, counter, size) if new_xml is None and is_attaching: new_xml = self._add_win_memory_size(dom, size) # update the XML if new_xml is not None: self.conn.get().defineXML(new_xml) def _update_win_memory_size(self, dom, counter, wnd_size): root = objectify.fromstring(dom.XMLDesc(0)) # look for the existing argument in <qemu:commandline> and try # to update the value (or remove if there is only one (or none) # graphic card attached. cmdline = root.findall('{%s}commandline' % QEMU_NAMESPACE) for line in cmdline: for arg in line.iterchildren(): if not arg.values()[0].startswith(CMDLINE_FIELD_NAME): continue if counter > 1: arg.set('value', CMDLINE_FIELD_NAME + '=' + wnd_size) else: line.remove(arg.getprevious()) line.remove(arg) return etree.tostring(root, encoding='unicode', pretty_print=True) return None def _add_win_memory_size(self, dom, wnd_size): root = objectify.fromstring(dom.XMLDesc(0)) val = CMDLINE_FIELD_NAME + '=' + wnd_size cmdline = root.find('{%s}commandline' % QEMU_NAMESPACE) # <qemu:commandline> doesn't exist, create the full commandline xml # with the required values and return if cmdline is None: args = {} args['-global'] = val root.append(etree.fromstring(get_qemucmdline_xml(args))) return etree.tostring(root, encoding='unicode', pretty_print=True) # <qemu:commandline> exists but there is no <qemu:arg value global> # so, we add those missing arguments inside the exising cmdline EM = ElementMaker(namespace=QEMU_NAMESPACE, nsmap={'qemu': QEMU_NAMESPACE}) cmdline.append(EM.arg(value='-global')) cmdline.append(EM.arg(value=val)) return etree.tostring(root, encoding='unicode', pretty_print=True) def _get_scsi_device_xml(self, dev_info): adapter = E.adapter(name=('scsi_host%s' % dev_info['host'])) address = E.address( type='scsi', bus=str(dev_info['bus']), target=str(dev_info['target']), unit=str(dev_info['lun']), ) host_dev = E.hostdev( E.source(adapter, address), mode='subsystem', type='scsi', sgio='unfiltered' ) return etree.tostring(host_dev) def _attach_scsi_device(self, cb, params): cb('Attaching SCSI device...') self._cb = cb vmid = params['vmid'] dev_info = params['dev_info'] lock = params['lock'] try: self._passthrough_device_validate(dev_info['name']) except InvalidParameter as e: cb(str(e), False) raise with lock: dom = VMModel.get_vm(vmid, self.conn) with RollbackContext() as rollback: xmlstr = self._get_scsi_device_xml(dev_info) device_flags = get_vm_config_flag(dom, mode='all') try: cb('Attaching device to VM') dom.attachDeviceFlags(xmlstr, device_flags) except libvirt.libvirtError: msg = WokMessage( 'KCHVMHDEV0007E', { 'device': dev_info['name'], 'vm': vmid} ) cb(msg.get_text(), False) wok_log.error( 'Failed to attach host device %s to VM %s: \n%s', dev_info['name'], vmid, xmlstr, ) raise rollback.prependDefer( dom.detachDeviceFlags, xmlstr, device_flags) rollback.commitAll() if DOM_STATE_MAP[dom.info()[0]] == 'shutoff': cb('OK', True) def _get_usb_device_xml(self, dev_info): source = E.source( E.vendor(id=dev_info['vendor']['id']), E.product(id=dev_info['product']['id']), E.address(bus=str(dev_info['bus']), device=str(dev_info['device'])), startupPolicy='optional', ) host_dev = E.hostdev(source, mode='subsystem', ype='usb', managed='yes') return etree.tostring(host_dev) def _attach_usb_device(self, cb, params): cb('Attaching USB device...') self._cb = cb vmid = params['vmid'] dev_info = params['dev_info'] dom = VMModel.get_vm(vmid, self.conn) lock = params['lock'] try: self._passthrough_device_validate(dev_info['name']) except InvalidParameter as e: cb(str(e), False) raise with lock: with RollbackContext() as rollback: xmlstr = self._get_usb_device_xml(dev_info) device_flags = get_vm_config_flag(dom, mode='all') try: cb('Attaching device to VM') dom.attachDeviceFlags(xmlstr, device_flags) except libvirt.libvirtError: msg = WokMessage( 'KCHVMHDEV0007E', { 'device': dev_info['name'], 'vm': vmid} ) cb(msg.get_text(), False) wok_log.error( 'Failed to attach host device %s to VM %s: \n%s', dev_info['name'], vmid, xmlstr, ) raise rollback.prependDefer( dom.detachDeviceFlags, xmlstr, device_flags) rollback.commitAll() if DOM_STATE_MAP[dom.info()[0]] == 'shutoff': cb('OK', True)
class VMHostDevModel(object): def __init__(self, **kargs): self.conn = kargs['conn'] self.objstore = kargs['objstore'] self.events = kargs['eventsloop'] self.task = TaskModel(**kargs) self.devs_model = DevicesModel(**kargs) self.dev_model = DeviceModel(**kargs) self._cb = None self.events.registerDetachDevicesEvent( self.conn, self._event_devices, self) def lookup(self, vmid, dev_name): dom = VMModel.get_vm(vmid, self.conn) xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: hostdev = root.devices.hostdev except AttributeError: raise NotFoundError('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name}) for e in hostdev: deduced_name = DeviceModel.deduce_dev_name(e, self.conn) if deduced_name == dev_name: dev_info = self.dev_model.lookup(dev_name) return { 'name': dev_name, 'type': e.attrib['type'], 'product': dev_info.get('product', None), 'vendor': dev_info.get('vendor', None), 'multifunction': dev_info.get('multifunction', None), 'vga3d': dev_info.get('vga3d', None), } raise NotFoundError('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name}) def delete(self, vmid, dev_name): dom = VMModel.get_vm(vmid, self.conn) xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: hostdev = root.devices.hostdev except AttributeError: raise NotFoundError('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name}) task_params = { 'vmid': vmid, 'dev_name': dev_name, 'dom': dom, 'hostdev': hostdev, 'lock': threading.RLock(), } task_uri = u'/plugins/kimchi/vms/%s/hostdevs/%s' % ( VMModel.get_vm(vmid, self.conn).name(), dev_name, ) taskid = AsyncTask(task_uri, self._detach_device, task_params).id return self.task.lookup(taskid) def _event_devices(self, conn, dom, alias, opaque): """ Callback to handle add/remove devices event """ if opaque._cb is None: wok_log.error('opaque must be valid') return wok_log.info('Device %s removed successfully' % alias) # Re-attach device to host if it's not managed mode if not opaque._managed: try: dev = conn.get().nodeDeviceLookupByName(alias) dev.reAttach() except libvirt.libvirtError as e: wok_log.error( 'Unable to attach device %s back to host. Error: %s', alias, str( e) ) else: wok_log.info( "Device %s was attached in 'managed' mode. " 'Skipping re-attach().' % alias ) opaque._cb('OK', True) def _detach_device(self, cb, params): cb('Detaching device') self._cb = cb vmid = params['vmid'] dev_name = params['dev_name'] dom = params['dom'] hostdev = params['hostdev'] lock = params['lock'] with lock: pci_devs = { DeviceModel.deduce_dev_name(e, self.conn): e for e in hostdev if e.attrib['type'] == 'pci' } dev_info = self.dev_model.lookup(dev_name) is_3D_device = self.dev_model.is_device_3D_controller(dev_info) if is_3D_device and DOM_STATE_MAP[dom.info()[0]] != 'shutoff': raise InvalidOperation( 'KCHVMHDEV0006E', {'name': dev_info['name']}) if not pci_devs.get(dev_name): raise NotFoundError( 'KCHVMHDEV0001E', {'vmid': vmid, 'dev_name': dev_name} ) dev_name_elem = pci_devs[dev_name] self._managed = dev_name_elem.get('managed', 'no') == 'yes' # check for multifunction and detach all functions together try: multi = self.unplug_multifunction_pci( dom, hostdev, dev_name_elem) except libvirt.libvirtError: multi = False # successfully detached all functions: finish operation if multi: if is_3D_device: devsmodel = VMHostDevsModel(conn=self.conn) devsmodel.update_mmio_guest(vmid, False) if DOM_STATE_MAP[dom.info()[0]] == 'shutoff': cb('OK', True) return # detach individually xmlstr = etree.tostring(dev_name_elem) dom.detachDeviceFlags(xmlstr, get_vm_config_flag(dom, mode='all')) if dev_name_elem.attrib['type'] == 'pci': self._delete_affected_pci_devices(dom, dev_name, pci_devs) if is_3D_device: devsmodel = VMHostDevsModel(conn=self.conn) devsmodel.update_mmio_guest(vmid, False) if DOM_STATE_MAP[dom.info()[0]] == 'shutoff': cb('OK', True) def get_devices_same_addr(self, hostdevs, device_elem): def elem_has_valid_address(elem): if ( elem.get('type') != 'pci' or elem.address is None or elem.address.get('domain') is None or elem.address.get('bus') is None or elem.address.get('slot') is None ): return False return True if not elem_has_valid_address(device_elem): return [] devices = [] device_domain = device_elem.address.get('domain') device_bus = device_elem.address.get('bus') device_slot = device_elem.address.get('slot') for dev in hostdevs: if not elem_has_valid_address(dev): continue dev_domain = dev.address.get('domain') dev_bus = dev.address.get('bus') dev_slot = dev.address.get('slot') if ( dev_domain == device_domain and dev_bus == device_bus and dev_slot == device_slot ): devices.append(etree.tostring(dev).decode('utf-8')) return devices def is_hostdev_multifunction(self, dev_elem): if ( dev_elem.address is None or dev_elem.address.get('multifunction') is None or dev_elem.address.get('function') is None ): return False is_multi = ( dev_elem.address.get('multifunction') == 'on' and dev_elem.address.get('function') == '0x0' ) return is_multi def unplug_multifunction_pci(self, dom, hostdevs, dev_elem): if not self.is_hostdev_multifunction(dev_elem): return False devices = self.get_devices_same_addr(hostdevs, dev_elem) if len(devices) <= 1: return False devices_xml = '<devices>%s</devices>' % ''.join(devices) dom.detachDeviceFlags(devices_xml, get_vm_config_flag(dom, mode='all')) return True def _delete_affected_pci_devices(self, dom, dev_name, pci_devs): try: self.dev_model.lookup(dev_name) except NotFoundError: return affected_names = set( DevicesModel(conn=self.conn).get_list( _passthrough_affected_by=dev_name) ) for pci_name, e in pci_devs.items(): if pci_name in affected_names: xmlstr = etree.tostring(e) dom.detachDeviceFlags( xmlstr, get_vm_config_flag(dom, mode='all'))
class VMHostDevModel(object): def __init__(self, **kargs): self.conn = kargs['conn'] self.objstore = kargs['objstore'] self.events = kargs['eventsloop'] self.task = TaskModel(**kargs) self.devs_model = DevicesModel(**kargs) self.dev_model = DeviceModel(**kargs) self._cb = None self.events.registerDetachDevicesEvent( self.conn, self._event_devices, self) def lookup(self, vmid, dev_name): dom = VMModel.get_vm(vmid, self.conn) xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: hostdev = root.devices.hostdev except AttributeError: raise NotFoundError('KCHVMHDEV0001E', {'vmid': vmid, 'dev_name': dev_name}) for e in hostdev: deduced_name = DeviceModel.deduce_dev_name(e, self.conn) if deduced_name == dev_name: dev_info = self.dev_model.lookup(dev_name) return {'name': dev_name, 'type': e.attrib['type'], 'product': dev_info.get('product', None), 'vendor': dev_info.get('vendor', None), 'multifunction': dev_info.get('multifunction', None), 'vga3d': dev_info.get('vga3d', None)} raise NotFoundError('KCHVMHDEV0001E', {'vmid': vmid, 'dev_name': dev_name}) def delete(self, vmid, dev_name): dom = VMModel.get_vm(vmid, self.conn) xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: hostdev = root.devices.hostdev except AttributeError: raise NotFoundError('KCHVMHDEV0001E', {'vmid': vmid, 'dev_name': dev_name}) task_params = {'vmid': vmid, 'dev_name': dev_name, 'dom': dom, 'hostdev': hostdev, 'lock': threading.RLock()} task_uri = u'/plugins/kimchi/vms/%s/hostdevs/%s' % \ (VMModel.get_vm(vmid, self.conn).name(), dev_name) taskid = add_task(task_uri, self._detach_device, self.objstore, task_params) return self.task.lookup(taskid) def _event_devices(self, conn, dom, alias, opaque): """ Callback to handle add/remove devices event """ if opaque._cb is None: wok_log.error('opaque must be valid') return wok_log.info("Device %s removed successfuly" % alias) opaque._cb('OK', True) def _detach_device(self, cb, params): cb('Detaching device') self._cb = cb vmid = params['vmid'] dev_name = params['dev_name'] dom = params['dom'] hostdev = params['hostdev'] lock = params['lock'] with lock: pci_devs = [(DeviceModel.deduce_dev_name(e, self.conn), e) for e in hostdev if e.attrib['type'] == 'pci'] dev_info = self.dev_model.lookup(dev_name) is_3D_device = self.dev_model.is_device_3D_controller(dev_info) if is_3D_device and DOM_STATE_MAP[dom.info()[0]] != "shutoff": raise InvalidOperation('KCHVMHDEV0006E', {'name': dev_info['name']}) if self._hotunplug_multifunction_pci(dom, hostdev, dev_name): if is_3D_device: devsmodel = VMHostDevsModel(conn=self.conn) devsmodel.update_mmio_guest(vmid, False) if DOM_STATE_MAP[dom.info()[0]] == "shutoff": cb('OK', True) return for e in hostdev: if DeviceModel.deduce_dev_name(e, self.conn) == dev_name: xmlstr = etree.tostring(e) dom.detachDeviceFlags( xmlstr, get_vm_config_flag(dom, mode='all')) if e.attrib['type'] == 'pci': self._delete_affected_pci_devices(dom, dev_name, pci_devs) if is_3D_device: devsmodel = VMHostDevsModel(conn=self.conn) devsmodel.update_mmio_guest(vmid, False) break else: msg = WokMessage('KCHVMHDEV0001E', {'vmid': vmid, 'dev_name': dev_name}) cb(msg.get_text(), False) raise NotFoundError('KCHVMHDEV0001E', {'vmid': vmid, 'dev_name': dev_name}) if DOM_STATE_MAP[dom.info()[0]] == "shutoff": cb('OK', True) def _get_devices_same_addr(self, hostdev, domain, bus, slot): devices = [] for device in hostdev: if device.attrib['type'] != 'pci': continue address = device.source.address if int(address.attrib['domain'], 16) != domain or \ int(address.attrib['bus'], 16) != bus or \ int(address.attrib['slot'], 16) != slot: continue devices.append(etree.tostring(device)) return devices def _hotunplug_multifunction_pci(self, dom, hostdev, dev_name): domain, bus, slot, _ = dev_name.split('_')[1:] # get all devices attached to the guest in the same domain+bus+slot # that the one we are going to detach because they must be detached # together devices = self._get_devices_same_addr(hostdev, int(domain, 16), int(bus, 16), int(slot, 16)) if len(devices) <= 1: return False devices_xml = '<devices>%s</devices>' % ''.join(devices) dom.detachDeviceFlags(devices_xml, get_vm_config_flag(dom, mode='all')) return True def _delete_affected_pci_devices(self, dom, dev_name, pci_devs): try: self.dev_model.lookup(dev_name) except NotFoundError: return affected_names = set( DevicesModel( conn=self.conn).get_list(_passthrough_affected_by=dev_name)) for pci_name, e in pci_devs: if pci_name in affected_names: xmlstr = etree.tostring(e) dom.detachDeviceFlags( xmlstr, get_vm_config_flag(dom, mode='all'))
class StoragePoolsModel(object): def __init__(self, **kargs): self.conn = kargs['conn'] self.objstore = kargs['objstore'] self.scanner = Scanner(self._clean_scan) self.scanner.delete() self.caps = CapabilitiesModel(**kargs) self.device = DeviceModel(**kargs) if self.conn.get() is not None: if self.conn.isQemuURI(): self._check_default_pools() def _check_default_pools(self): pools = {} # Don't create default pool if it's not # explicitly specified in template.conf if is_s390x() and 'pool' not in tmpl_defaults['disks'][0]: return default_pool = tmpl_defaults['disks'][0]['pool']['name'] default_pool = default_pool.split('/')[-1] pools[default_pool] = {} if default_pool == 'default': pools[default_pool] = {'path': '/var/lib/libvirt/images'} if config.get('kimchi', {}).get('create_iso_pool', False): pools['ISO'] = {'path': '/var/lib/kimchi/isos'} conn = self.conn.get() for pool_name in pools: error_msg = ( 'Storage pool %s does not exist or is not ' 'active. Please, check the configuration in ' '%s/template.conf to ensure it lists only valid ' 'storage.' % (pool_name, kimchiPaths.sysconf_dir) ) try: pool = conn.storagePoolLookupByName(pool_name) except libvirt.libvirtError as e: pool_path = pools[pool_name].get('path') if pool_path is None: wok_log.error( f'Fatal: Unable to find storage pool {pool_name}.') wok_log.error(f'Details: {str(e)}') raise Exception(error_msg) # Try to create the pool pool = E.pool(E.name(pool_name), type='dir') pool.append(E.target(E.path(pool_path))) xml = ET.tostring(pool) try: pool = conn.storagePoolDefineXML(xml, 0) except libvirt.libvirtError as e: wok_log.error( f'Fatal: Unable to create storage pool {pool_name}.') wok_log.error(f'Details: {str(e)}') raise Exception(error_msg) # Build and set autostart value to pool # Ignore error as the pool was already successfully created try: # Add build step to make sure target directory created # The build process may fail when the pool directory # already exists on system pool.build(libvirt.VIR_STORAGE_POOL_BUILD_NEW) pool.setAutostart(1) except Exception: pass if pool.isActive() == 0: try: pool.create(0) except libvirt.libvirtError as e: wok_log.error( f'Fatal: Unable to create storage pool {pool_name}.') wok_log.error(f'Details: {str(e)}') raise Exception(error_msg) def get_list(self): try: conn = self.conn.get() names = conn.listStoragePools() names += conn.listDefinedStoragePools() return sorted(names) except libvirt.libvirtError as e: raise OperationFailed( 'KCHPOOL0006E', {'err': e.get_error_message()}) def _check_lvm(self, name, from_vg): vgdisplay_cmd = ['vgdisplay', name] output, error, returncode = run_command(vgdisplay_cmd) # From vgdisplay error codes: # 1 error reading VGDA # 2 volume group doesn't exist # 3 not all physical volumes of volume group online # 4 volume group not found # 5 no volume groups found at all # 6 error reading VGDA from lvmtab if from_vg and returncode in [2, 4, 5]: raise InvalidOperation('KCHPOOL0038E', {'name': name}) if not from_vg and returncode not in [2, 4, 5]: raise InvalidOperation('KCHPOOL0036E', {'name': name}) def create(self, params): task_id = None conn = self.conn.get() from_vg = params.get('source', {}).get('from_vg', False) try: name = params['name'] if name == ISO_POOL_NAME: raise InvalidOperation('KCHPOOL0031E') # The user may want to create a logical pool with the same name # used before but a volume group will already exist with this name # So check the volume group does not exist to create the pool if params['type'] == 'logical': self._check_lvm(name, from_vg) if params['type'] == 'kimchi-iso': task_id = self._do_deep_scan(params) if params['type'] == 'scsi': adapter_name = params['source']['adapter_name'] extra_params = self.device.lookup(adapter_name) # Adds name, adapter_type, wwpn and wwnn to source information params['source'].update(extra_params) params['fc_host_support'] = self.caps.fc_host_support poolDef = StoragePoolDef.create(params) poolDef.prepare(conn) xml = poolDef.xml except KeyError as item: raise MissingParameter( 'KCHPOOL0004E', {'item': str(item), 'name': name}) if name in self.get_list(): raise InvalidOperation('KCHPOOL0001E', {'name': name}) try: if task_id: # Create transient pool for deep scan conn.storagePoolCreateXML(xml, 0) return name pool = conn.storagePoolDefineXML(xml, 0) except libvirt.libvirtError as e: wok_log.error(f'Problem creating Storage Pool: {str(e)}') raise OperationFailed( 'KCHPOOL0007E', {'name': name, 'err': e.get_error_message()} ) # Build and set autostart value to pool # Ignore error as the pool was already successfully created # The build process fails when the pool directory already exists try: if params['type'] in ['logical', 'dir', 'netfs', 'scsi']: pool.build(libvirt.VIR_STORAGE_POOL_BUILD_NEW) pool.setAutostart(1) else: pool.setAutostart(0) except Exception: pass if params['type'] == 'netfs': output, error, returncode = run_command( ['setsebool', '-P', 'virt_use_nfs=1'] ) if error or returncode: wok_log.error( 'Unable to set virt_use_nfs=1. If you use ' 'SELinux, this may prevent NFS pools from ' 'being used.' ) return name def _clean_scan(self, pool_name): try: conn = self.conn.get() pool = conn.storagePoolLookupByName(pool_name) pool.destroy() with self.objstore as session: session.delete('scanning', pool_name) except Exception as e: wok_log.debug(f'Exception {e} occurred when cleaning scan result') def _do_deep_scan(self, params): scan_params = dict(ignore_list=[]) scan_params['scan_path'] = params['path'] params['type'] = 'dir' for pool in self.get_list(): try: res = StoragePoolModel(conn=self.conn, objstore=self.objstore).lookup( pool ) if res['state'] == 'active': scan_params['ignore_list'].append(res['path']) except Exception as e: wok_log.debug(f'Exception {e} occured when get ignore path') params['path'] = self.scanner.scan_dir_prepare(params['name']) scan_params['pool_path'] = params['path'] task_id = AsyncTask( f'/plugins/kimchi/storagepools/{ISO_POOL_NAME}', self.scanner.start_scan, scan_params, ).id # Record scanning-task/storagepool mapping for future querying try: with self.objstore as session: session.store( 'scanning', params['name'], task_id, get_kimchi_version()) return task_id except Exception as e: raise OperationFailed('KCHPOOL0037E', {'err': e.message})
def _attach_pci_device(self, vmid, dev_info): self._validate_pci_passthrough_env() dom = VMModel.get_vm(vmid, self.conn) # Due to libvirt limitation, we don't support live assigne device to # vfio driver. driver = ('vfio' if DOM_STATE_MAP[dom.info()[0]] == "shutoff" and self.caps.kernel_vfio else 'kvm') # on powerkvm systems it must be vfio driver. distro, _, _ = platform.linux_distribution() if distro == 'IBM_PowerKVM': driver = 'vfio' # Attach all PCI devices in the same IOMMU group dev_model = DeviceModel(conn=self.conn) devs_model = DevicesModel(conn=self.conn) affected_names = devs_model.get_list( _passthrough_affected_by=dev_info['name']) passthrough_names = devs_model.get_list(_cap='pci', _passthrough='true') group_names = list(set(affected_names) & set(passthrough_names)) pci_infos = [dev_model.lookup(dev_name) for dev_name in group_names] pci_infos.append(dev_info) is_multifunction = len(pci_infos) > 1 and \ DOM_STATE_MAP[dom.info()[0]] == "shutoff" pci_infos = sorted(pci_infos, key=itemgetter('name')) # all devices in the group that is going to be attached to the vm # must be detached from the host first with RollbackContext() as rollback: for pci_info in pci_infos: try: dev = self.conn.get().nodeDeviceLookupByName( pci_info['name']) dev.dettach() except Exception: raise OperationFailed('KCHVMHDEV0005E', {'name': pci_info['name']}) else: rollback.prependDefer(dev.reAttach) rollback.commitAll() device_flags = get_vm_config_flag(dom, mode='all') slot = 0 if is_multifunction: slot = self._available_slot(dom) with RollbackContext() as rollback: for pci_info in pci_infos: pci_info['detach_driver'] = driver xmlstr = self._get_pci_device_xml(pci_info, slot, is_multifunction) try: dom.attachDeviceFlags(xmlstr, device_flags) except libvirt.libvirtError: wok_log.error( 'Failed to attach host device %s to VM %s: \n%s', pci_info['name'], vmid, xmlstr) raise rollback.prependDefer(dom.detachDeviceFlags, xmlstr, device_flags) rollback.commitAll() return dev_info['name']
class VMHostDevModel(object): def __init__(self, **kargs): self.conn = kargs['conn'] self.objstore = kargs['objstore'] self.events = kargs['eventsloop'] self.task = TaskModel(**kargs) self.devs_model = DevicesModel(**kargs) self.dev_model = DeviceModel(**kargs) self._cb = None self.events.registerDetachDevicesEvent(self.conn, self._event_devices, self) def lookup(self, vmid, dev_name): dom = VMModel.get_vm(vmid, self.conn) xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: hostdev = root.devices.hostdev except AttributeError: raise NotFoundError('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name }) for e in hostdev: deduced_name = DeviceModel.deduce_dev_name(e, self.conn) if deduced_name == dev_name: dev_info = self.dev_model.lookup(dev_name) return { 'name': dev_name, 'type': e.attrib['type'], 'product': dev_info.get('product', None), 'vendor': dev_info.get('vendor', None), 'multifunction': dev_info.get('multifunction', None), 'vga3d': dev_info.get('vga3d', None) } raise NotFoundError('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name }) def delete(self, vmid, dev_name): dom = VMModel.get_vm(vmid, self.conn) xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: hostdev = root.devices.hostdev except AttributeError: raise NotFoundError('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name }) task_params = { 'vmid': vmid, 'dev_name': dev_name, 'dom': dom, 'hostdev': hostdev, 'lock': threading.RLock() } task_uri = u'/plugins/kimchi/vms/%s/hostdevs/%s' % \ (VMModel.get_vm(vmid, self.conn).name(), dev_name) taskid = AsyncTask(task_uri, self._detach_device, task_params).id return self.task.lookup(taskid) def _event_devices(self, conn, dom, alias, opaque): """ Callback to handle add/remove devices event """ if opaque._cb is None: wok_log.error('opaque must be valid') return wok_log.info("Device %s removed successfully" % alias) # Re-attach device to host if it's not managed mode if not opaque._managed: try: dev = conn.get().nodeDeviceLookupByName(alias) dev.reAttach() except libvirt.libvirtError, e: wok_log.error( "Unable to attach device %s back to host. Error: %s", alias, e.message) else:
class VMHostDevsModel(object): def __init__(self, **kargs): self.conn = kargs['conn'] self.objstore = kargs['objstore'] self.events = kargs['eventsloop'] self.caps = CapabilitiesModel(**kargs) self.devs_model = DevicesModel(**kargs) self.dev_model = DeviceModel(**kargs) self.task = TaskModel(**kargs) self._cb = None self.events.registerAttachDevicesEvent( self.conn, self._event_devices, self) def get_list(self, vmid): dom = VMModel.get_vm(vmid, self.conn) xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: hostdev = root.devices.hostdev except AttributeError: return [] return [DeviceModel.deduce_dev_name(e, self.conn) for e in hostdev] def _passthrough_device_validate(self, dev_name): eligible_dev_names = self.devs_model.get_list(_passthrough='true') if dev_name not in eligible_dev_names: raise InvalidParameter('KCHVMHDEV0002E', {'dev_name': dev_name}) def _event_devices(self, conn, dom, alias, opaque): """ Callback to handle add/remove devices event """ if opaque._cb is None: wok_log.error('opaque must be valid') return wok_log.info("Device %s added successfuly" % alias) opaque._cb('OK', True) def create(self, vmid, params): dev_name = params['name'] dev_info = self.dev_model.lookup(dev_name) if dev_info['device_type'] == 'pci': taskid = AsyncTask(u'/plugins/kimchi/vms/%s/hostdevs/' % VMModel.get_vm(vmid, self.conn).name(), self._attach_pci_device, {'vmid': vmid, 'dev_info': dev_info, 'lock': threading.RLock()}).id return self.task.lookup(taskid) with RollbackContext() as rollback: try: dev = self.conn.get().nodeDeviceLookupByName(dev_name) dev.dettach() except Exception: raise OperationFailed('KCHVMHDEV0005E', {'name': dev_name}) else: rollback.prependDefer(dev.reAttach) rollback.commitAll() taskid = AsyncTask(u'/plugins/kimchi/vms/%s/hostdevs/' % VMModel.get_vm(vmid, self.conn).name(), '_attach_%s_device' % dev_info['device_type'], {'vmid': vmid, 'dev_info': dev_info, 'lock': threading.RLock()}).id return self.task.lookup(taskid) def _get_pci_devices_xml(self, pci_infos, slot, driver): hostdevs = '' # all devices included in the xml will be sorted in reverse (the # function 0 will be the last one) and will include the guest # address details for dev_info in sorted(pci_infos, key=itemgetter('function'), reverse=True): dev_info['detach_driver'] = driver hostdevs += self._get_pci_device_xml(dev_info, slot, True) return '<devices>%s</devices>' % hostdevs def have_usb_controller(self, vmid): dom = VMModel.get_vm(vmid, self.conn) root = objectify.fromstring(dom.XMLDesc(0)) try: controllers = root.devices.controller except AttributeError: return False for controller in controllers: if 'model' not in controller.attrib: continue if controller.attrib['type'] == 'usb' and \ controller.attrib['model'] in USB_MODELS_PCI_HOTPLUG: return True return False def _get_pci_device_xml(self, dev_info, slot, is_multifunction): if 'detach_driver' not in dev_info: dev_info['detach_driver'] = 'kvm' source = E.source(E.address(domain=str(dev_info['domain']), bus=str(dev_info['bus']), slot=str(dev_info['slot']), function=str(dev_info['function']))) driver = E.driver(name=dev_info['detach_driver']) if is_multifunction: if dev_info['function'] == 0: multi = E.address(type='pci', domain='0', bus='0', slot=str(slot), function=str(dev_info['function']), multifunction='on') else: multi = E.address(type='pci', domain='0', bus='0', slot=str(slot), function=str(dev_info['function'])) host_dev = E.hostdev(source, driver, multi, mode='subsystem', type='pci', managed='yes') else: host_dev = E.hostdev(source, driver, mode='subsystem', type='pci', managed='yes') return etree.tostring(host_dev) @staticmethod def _validate_pci_passthrough_env(): # Linux kernel < 3.5 doesn't provide /sys/kernel/iommu_groups if os.path.isdir('/sys/kernel/iommu_groups'): if not glob.glob('/sys/kernel/iommu_groups/*'): raise InvalidOperation("KCHVMHDEV0003E") # Enable virt_use_sysfs on RHEL6 and older distributions # In recent Fedora, there is no virt_use_sysfs. out, err, rc = run_command(['getsebool', 'virt_use_sysfs'], silent=True) if rc == 0 and out.rstrip('\n') != "virt_use_sysfs --> on": out, err, rc = run_command(['setsebool', '-P', 'virt_use_sysfs=on']) if rc != 0: wok_log.warning("Unable to turn on sebool virt_use_sysfs") def _available_slot(self, dom): xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) slots = [] try: devices = root.devices slots = [self.dev_model._toint(dev.attrib['slot']) for dev in devices.findall('.//address') if 'slot' in dev.attrib] except AttributeError: return 1 slots = sorted(slots) free = 0 for free, slot in enumerate(slots, start=1): if free < slot: return free return free + 1 def _attach_pci_device(self, cb, params): cb('Attaching PCI device') self._cb = cb vmid = params['vmid'] dev_info = params['dev_info'] lock = params['lock'] try: self._passthrough_device_validate(dev_info['name']) except InvalidParameter as e: cb(e.message, False) raise with lock: try: self._validate_pci_passthrough_env() except InvalidOperation as e: cb(e.message, False) raise dom = VMModel.get_vm(vmid, self.conn) driver = 'vfio' if self.caps.kernel_vfio else 'kvm' # 'vfio' systems requires a usb controller in order to support pci # hotplug on Power. if driver == 'vfio' and platform.machine().startswith('ppc') and \ DOM_STATE_MAP[dom.info()[0]] != "shutoff" and \ not self.have_usb_controller(vmid): msg = WokMessage('KCHVMHDEV0008E', {'vmid': vmid}) cb(msg.get_text(), False) raise InvalidOperation("KCHVMHDEV0008E", {'vmid': vmid}) # Attach all PCI devices in the same IOMMU group affected_names = self.devs_model.get_list( _passthrough_affected_by=dev_info['name']) passthrough_names = self.devs_model.get_list( _cap='pci', _passthrough='true') group_names = list(set(affected_names) & set(passthrough_names)) pci_infos = [self.dev_model.lookup(dev_name) for dev_name in group_names] pci_infos.append(dev_info) is_multifunction = len(pci_infos) > 1 pci_infos = sorted(pci_infos, key=itemgetter('name')) # does not allow hot-plug of 3D graphic cards is_3D_device = self.dev_model.is_device_3D_controller(dev_info) if is_3D_device and DOM_STATE_MAP[dom.info()[0]] != "shutoff": msg = WokMessage('KCHVMHDEV0006E', {'name': dev_info['name']}) cb(msg.get_text(), False) raise InvalidOperation('KCHVMHDEV0006E', {'name': dev_info['name']}) # all devices in the group that is going to be attached to the vm # must be detached from the host first with RollbackContext() as rollback: for pci_info in pci_infos: try: dev = self.conn.get().nodeDeviceLookupByName( pci_info['name']) dev.dettach() except Exception: msg = WokMessage('KCHVMHDEV0005E', {'name': pci_info['name']}) cb(msg.get_text(), False) raise OperationFailed('KCHVMHDEV0005E', {'name': pci_info['name']}) else: rollback.prependDefer(dev.reAttach) rollback.commitAll() device_flags = get_vm_config_flag(dom, mode='all') # when attaching a 3D graphic device it might be necessary to # increase the window size memory in order to be able to attach # more than one device to the same guest if is_3D_device: self.update_mmio_guest(vmid, True) slot = 0 if is_multifunction: # search for the first available slot in guest xml slot = self._available_slot(dom) with RollbackContext() as rollback: # multifuction: try to attach all functions together within one # xml file. It requires libvirt support. if is_multifunction: xmlstr = self._get_pci_devices_xml(pci_infos, slot, driver) try: dom.attachDeviceFlags(xmlstr, device_flags) except libvirt.libvirtError: # If operation fails, we try the other way, where each # function is attached individually pass else: rollback.prependDefer(dom.detachDeviceFlags, xmlstr, device_flags) rollback.commitAll() if DOM_STATE_MAP[dom.info()[0]] == "shutoff": cb('OK', True) return # attach each function individually (multi or single function) for pci_info in pci_infos: pci_info['detach_driver'] = driver xmlstr = self._get_pci_device_xml(pci_info, slot, is_multifunction) try: dom.attachDeviceFlags(xmlstr, device_flags) except libvirt.libvirtError: msg = WokMessage('KCHVMHDEV0007E', {'device': pci_info['name'], 'vm': vmid}) cb(msg.get_text(), False) wok_log.error( 'Failed to attach host device %s to VM %s: \n%s', pci_info['name'], vmid, xmlstr) raise rollback.prependDefer(dom.detachDeviceFlags, xmlstr, device_flags) rollback.commitAll() if DOM_STATE_MAP[dom.info()[0]] == "shutoff": cb('OK', True) def _count_3D_devices_attached(self, dom): counter = 0 root = objectify.fromstring(dom.XMLDesc(0)) try: hostdev = root.devices.hostdev except AttributeError: return counter for device in hostdev: if device.attrib['type'] != 'pci': continue name = DeviceModel.deduce_dev_name(device, self.conn) info = self.dev_model.lookup(name) if 'vga3d' in info and info['vga3d']: counter += 1 return counter def update_mmio_guest(self, vmid, is_attaching): dom = VMModel.get_vm(vmid, self.conn) # get the number of 3D graphic cards already attached to the guest # based on this number we will decide if the memory size will be # increased or not counter = self._count_3D_devices_attached(dom) if counter == 0 and is_attaching: return size = 0 if is_attaching: # suppose this is the 3rd graphic card to be attached to the same # guest, counter will be 2+1 (2 existing + this attachment) times # 32G (0x80000000) size = hex((counter + 1) * WINDOW_SIZE_BAR) else: size = hex(counter * WINDOW_SIZE_BAR) # if the guest already has the xml file we will simply update the # value, otherwise we will add the new field new_xml = self._update_win_memory_size(dom, counter, size) if new_xml is None and is_attaching: new_xml = self._add_win_memory_size(dom, size) # update the XML if new_xml is not None: self.conn.get().defineXML(new_xml) def _update_win_memory_size(self, dom, counter, wnd_size): root = objectify.fromstring(dom.XMLDesc(0)) # look for the existing argument in <qemu:commandline> and try # to update the value (or remove if there is only one (or none) # graphic card attached. cmdline = root.findall('{%s}commandline' % QEMU_NAMESPACE) for line in cmdline: for arg in line.iterchildren(): if not arg.values()[0].startswith(CMDLINE_FIELD_NAME): continue if counter > 1: arg.set('value', CMDLINE_FIELD_NAME + '=' + wnd_size) else: line.remove(arg.getprevious()) line.remove(arg) return etree.tostring(root, encoding='utf-8', pretty_print=True) return None def _add_win_memory_size(self, dom, wnd_size): root = objectify.fromstring(dom.XMLDesc(0)) val = CMDLINE_FIELD_NAME + '=' + wnd_size cmdline = root.find('{%s}commandline' % QEMU_NAMESPACE) # <qemu:commandline> doesn't exist, create the full commandline xml # with the required values and return if cmdline is None: args = {} args['-global'] = val root.append(etree.fromstring(get_qemucmdline_xml(args))) return etree.tostring(root, encoding='utf-8', pretty_print=True) # <qemu:commandline> exists but there is no <qemu:arg value global> # so, we add those missing arguments inside the exising cmdline EM = ElementMaker(namespace=QEMU_NAMESPACE, nsmap={'qemu': QEMU_NAMESPACE}) cmdline.append(EM.arg(value='-global')) cmdline.append(EM.arg(value=val)) return etree.tostring(root, encoding='utf-8', pretty_print=True) def _get_scsi_device_xml(self, dev_info): adapter = E.adapter(name=('scsi_host%s' % dev_info['host'])) address = E.address(type='scsi', bus=str(dev_info['bus']), target=str(dev_info['target']), unit=str(dev_info['lun'])) host_dev = E.hostdev(E.source(adapter, address), mode='subsystem', type='scsi', sgio='unfiltered') return etree.tostring(host_dev) def _attach_scsi_device(self, cb, params): cb('Attaching SCSI device...') self._cb = cb vmid = params['vmid'] dev_info = params['dev_info'] lock = params['lock'] try: self._passthrough_device_validate(dev_info['name']) except InvalidParameter as e: cb(e.message, False) raise with lock: dom = VMModel.get_vm(vmid, self.conn) with RollbackContext() as rollback: xmlstr = self._get_scsi_device_xml(dev_info) device_flags = get_vm_config_flag(dom, mode='all') try: cb('Attaching device to VM') dom.attachDeviceFlags(xmlstr, device_flags) except libvirt.libvirtError: msg = WokMessage('KCHVMHDEV0007E', {'device': dev_info['name'], 'vm': vmid}) cb(msg.get_text(), False) wok_log.error( 'Failed to attach host device %s to VM %s: \n%s', dev_info['name'], vmid, xmlstr) raise rollback.prependDefer(dom.detachDeviceFlags, xmlstr, device_flags) rollback.commitAll() if DOM_STATE_MAP[dom.info()[0]] == "shutoff": cb('OK', True) def _get_usb_device_xml(self, dev_info): source = E.source( E.vendor(id=dev_info['vendor']['id']), E.product(id=dev_info['product']['id']), E.address(bus=str(dev_info['bus']), device=str(dev_info['device'])), startupPolicy='optional') host_dev = E.hostdev(source, mode='subsystem', ype='usb', managed='yes') return etree.tostring(host_dev) def _attach_usb_device(self, cb, params): cb('Attaching USB device...') self._cb = cb vmid = params['vmid'] dev_info = params['dev_info'] dom = VMModel.get_vm(vmid, self.conn) lock = params['lock'] try: self._passthrough_device_validate(dev_info['name']) except InvalidParameter as e: cb(e.message, False) raise with lock: with RollbackContext() as rollback: xmlstr = self._get_usb_device_xml(dev_info) device_flags = get_vm_config_flag(dom, mode='all') try: cb('Attaching device to VM') dom.attachDeviceFlags(xmlstr, device_flags) except libvirt.libvirtError: msg = WokMessage('KCHVMHDEV0007E', {'device': dev_info['name'], 'vm': vmid}) cb(msg.get_text(), False) wok_log.error( 'Failed to attach host device %s to VM %s: \n%s', dev_info['name'], vmid, xmlstr) raise rollback.prependDefer(dom.detachDeviceFlags, xmlstr, device_flags) rollback.commitAll() if DOM_STATE_MAP[dom.info()[0]] == "shutoff": cb('OK', True)
def _attach_pci_device(self, cb, params): cb('Attaching PCI device') vmid = params['vmid'] dev_info = params['dev_info'] self._validate_pci_passthrough_env() dom = VMModel.get_vm(vmid, self.conn) # Due to libvirt limitation, we don't support live assigne device to # vfio driver. driver = ('vfio' if DOM_STATE_MAP[dom.info()[0]] == "shutoff" and self.caps.kernel_vfio else 'kvm') # on powerkvm systems it must be vfio driver. distro, _, _ = platform.linux_distribution() if distro == 'IBM_PowerKVM': driver = 'vfio' # Attach all PCI devices in the same IOMMU group dev_model = DeviceModel(conn=self.conn) devs_model = DevicesModel(conn=self.conn) affected_names = devs_model.get_list( _passthrough_affected_by=dev_info['name']) passthrough_names = devs_model.get_list(_cap='pci', _passthrough='true') group_names = list(set(affected_names) & set(passthrough_names)) pci_infos = [dev_model.lookup(dev_name) for dev_name in group_names] pci_infos.append(dev_info) is_multifunction = len(pci_infos) > 1 pci_infos = sorted(pci_infos, key=itemgetter('name')) # does not allow hot-plug of 3D graphic cards is_3D_device = dev_model.is_device_3D_controller(dev_info) if is_3D_device and DOM_STATE_MAP[dom.info()[0]] != "shutoff": raise InvalidOperation('KCHVMHDEV0006E', {'name': dev_info['name']}) # all devices in the group that is going to be attached to the vm # must be detached from the host first with RollbackContext() as rollback: for pci_info in pci_infos: try: dev = self.conn.get().nodeDeviceLookupByName( pci_info['name']) dev.dettach() except Exception: raise OperationFailed('KCHVMHDEV0005E', {'name': pci_info['name']}) else: rollback.prependDefer(dev.reAttach) rollback.commitAll() device_flags = get_vm_config_flag(dom, mode='all') # when attaching a 3D graphic device it might be necessary to increase # the window size memory in order to be able to attach more than one # device to the same guest if is_3D_device: self.update_mmio_guest(vmid, True) slot = 0 if is_multifunction: # search for the first available slot in guest xml slot = self._available_slot(dom) with RollbackContext() as rollback: # multifunction hotplug is a special case where all functions # must be attached together within one xml file, the same does # not happen to multifunction coldplug - where each function is # attached individually if DOM_STATE_MAP[dom.info()[0]] != 'shutoff' and is_multifunction: xmlstr = self._get_pci_devices_xml(pci_infos, slot, driver) try: dom.attachDeviceFlags(xmlstr, device_flags) except libvirt.libvirtError: wok_log.error( 'Failed to attach mutifunction device VM %s: \n%s', vmid, xmlstr) raise rollback.prependDefer(dom.detachDeviceFlags, xmlstr, device_flags) rollback.commitAll() cb('OK', True) return for pci_info in pci_infos: pci_info['detach_driver'] = driver cb('Reading source device XML') xmlstr = self._get_pci_device_xml(pci_info, slot, is_multifunction) try: cb('Attaching device to VM') dom.attachDeviceFlags(xmlstr, device_flags) except libvirt.libvirtError: wok_log.error( 'Failed to attach host device %s to VM %s: \n%s', pci_info['name'], vmid, xmlstr) raise rollback.prependDefer(dom.detachDeviceFlags, xmlstr, device_flags) rollback.commitAll() cb('OK', True)
class VMHostDevModel(object): def __init__(self, **kargs): self.conn = kargs['conn'] self.objstore = kargs['objstore'] self.events = kargs['eventsloop'] self.task = TaskModel(**kargs) self.devs_model = DevicesModel(**kargs) self.dev_model = DeviceModel(**kargs) self._cb = None self.events.registerDetachDevicesEvent(self.conn, self._event_devices, self) def lookup(self, vmid, dev_name): dom = VMModel.get_vm(vmid, self.conn) xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: hostdev = root.devices.hostdev except AttributeError: raise NotFoundError('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name }) for e in hostdev: deduced_name = DeviceModel.deduce_dev_name(e, self.conn) if deduced_name == dev_name: dev_info = self.dev_model.lookup(dev_name) return { 'name': dev_name, 'type': e.attrib['type'], 'product': dev_info.get('product', None), 'vendor': dev_info.get('vendor', None), 'multifunction': dev_info.get('multifunction', None), 'vga3d': dev_info.get('vga3d', None) } raise NotFoundError('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name }) def delete(self, vmid, dev_name): dom = VMModel.get_vm(vmid, self.conn) xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: hostdev = root.devices.hostdev except AttributeError: raise NotFoundError('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name }) task_params = { 'vmid': vmid, 'dev_name': dev_name, 'dom': dom, 'hostdev': hostdev, 'lock': threading.RLock() } task_uri = u'/plugins/kimchi/vms/%s/hostdevs/%s' % \ (VMModel.get_vm(vmid, self.conn).name(), dev_name) taskid = AsyncTask(task_uri, self._detach_device, task_params).id return self.task.lookup(taskid) def _event_devices(self, conn, dom, alias, opaque): """ Callback to handle add/remove devices event """ if opaque._cb is None: wok_log.error('opaque must be valid') return wok_log.info("Device %s removed successfuly" % alias) opaque._cb('OK', True) def _detach_device(self, cb, params): cb('Detaching device') self._cb = cb vmid = params['vmid'] dev_name = params['dev_name'] dom = params['dom'] hostdev = params['hostdev'] lock = params['lock'] with lock: pci_devs = [(DeviceModel.deduce_dev_name(e, self.conn), e) for e in hostdev if e.attrib['type'] == 'pci'] dev_info = self.dev_model.lookup(dev_name) is_3D_device = self.dev_model.is_device_3D_controller(dev_info) if is_3D_device and DOM_STATE_MAP[dom.info()[0]] != "shutoff": raise InvalidOperation('KCHVMHDEV0006E', {'name': dev_info['name']}) # check for multifunction and detach all functions together try: multi = self._unplug_multifunction_pci(dom, hostdev, dev_name) except libvirt.libvirtError: multi = False # successfully detached all functions: finish operation if multi: if is_3D_device: devsmodel = VMHostDevsModel(conn=self.conn) devsmodel.update_mmio_guest(vmid, False) if DOM_STATE_MAP[dom.info()[0]] == "shutoff": cb('OK', True) return # detach each function individually for e in hostdev: if DeviceModel.deduce_dev_name(e, self.conn) == dev_name: xmlstr = etree.tostring(e) dom.detachDeviceFlags(xmlstr, get_vm_config_flag(dom, mode='all')) if e.attrib['type'] == 'pci': self._delete_affected_pci_devices( dom, dev_name, pci_devs) if is_3D_device: devsmodel = VMHostDevsModel(conn=self.conn) devsmodel.update_mmio_guest(vmid, False) break else: msg = WokMessage('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name }) cb(msg.get_text(), False) raise NotFoundError('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name }) if DOM_STATE_MAP[dom.info()[0]] == "shutoff": cb('OK', True) def _get_devices_same_addr(self, hostdev, domain, bus, slot): devices = [] for device in hostdev: if device.attrib['type'] != 'pci': continue address = device.source.address if int(address.attrib['domain'], 16) != domain or \ int(address.attrib['bus'], 16) != bus or \ int(address.attrib['slot'], 16) != slot: continue devices.append(etree.tostring(device)) return devices def _unplug_multifunction_pci(self, dom, hostdev, dev_name): # get all devices attached to the guest in the same domain+bus+slot # that the one we are going to detach because they must be detached # together domain, bus, slot, _ = dev_name.split('_')[1:] devices = self._get_devices_same_addr(hostdev, int(domain, 16), int(bus, 16), int(slot, 16)) if len(devices) <= 1: return False devices_xml = '<devices>%s</devices>' % ''.join(devices) dom.detachDeviceFlags(devices_xml, get_vm_config_flag(dom, mode='all')) return True def _delete_affected_pci_devices(self, dom, dev_name, pci_devs): try: self.dev_model.lookup(dev_name) except NotFoundError: return affected_names = set( DevicesModel(conn=self.conn).get_list( _passthrough_affected_by=dev_name)) for pci_name, e in pci_devs: if pci_name in affected_names: xmlstr = etree.tostring(e) dom.detachDeviceFlags(xmlstr, get_vm_config_flag(dom, mode='all'))
class VMHostDevModel(object): def __init__(self, **kargs): self.conn = kargs['conn'] self.objstore = kargs['objstore'] self.events = kargs['eventsloop'] self.task = TaskModel(**kargs) self.devs_model = DevicesModel(**kargs) self.dev_model = DeviceModel(**kargs) self._cb = None self.events.registerDetachDevicesEvent( self.conn, self._event_devices, self) def lookup(self, vmid, dev_name): dom = VMModel.get_vm(vmid, self.conn) xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: hostdev = root.devices.hostdev except AttributeError: raise NotFoundError('KCHVMHDEV0001E', {'vmid': vmid, 'dev_name': dev_name}) for e in hostdev: deduced_name = DeviceModel.deduce_dev_name(e, self.conn) if deduced_name == dev_name: dev_info = self.dev_model.lookup(dev_name) return {'name': dev_name, 'type': e.attrib['type'], 'product': dev_info.get('product', None), 'vendor': dev_info.get('vendor', None), 'multifunction': dev_info.get('multifunction', None), 'vga3d': dev_info.get('vga3d', None)} raise NotFoundError('KCHVMHDEV0001E', {'vmid': vmid, 'dev_name': dev_name}) def delete(self, vmid, dev_name): dom = VMModel.get_vm(vmid, self.conn) xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: hostdev = root.devices.hostdev except AttributeError: raise NotFoundError('KCHVMHDEV0001E', {'vmid': vmid, 'dev_name': dev_name}) task_params = {'vmid': vmid, 'dev_name': dev_name, 'dom': dom, 'hostdev': hostdev, 'lock': threading.RLock()} task_uri = u'/plugins/kimchi/vms/%s/hostdevs/%s' % \ (VMModel.get_vm(vmid, self.conn).name(), dev_name) taskid = AsyncTask(task_uri, self._detach_device, task_params).id return self.task.lookup(taskid) def _event_devices(self, conn, dom, alias, opaque): """ Callback to handle add/remove devices event """ if opaque._cb is None: wok_log.error('opaque must be valid') return wok_log.info("Device %s removed successfully" % alias) # Re-attach device to host if it's not managed mode if not opaque._managed: try: dev = conn.get().nodeDeviceLookupByName(alias) dev.reAttach() except libvirt.libvirtError, e: wok_log.error( "Unable to attach device %s back to host. Error: %s", alias, e.message ) else:
class StoragePoolsModel(object): def __init__(self, **kargs): self.conn = kargs['conn'] self.objstore = kargs['objstore'] self.scanner = Scanner(self._clean_scan) self.scanner.delete() self.caps = CapabilitiesModel(**kargs) self.device = DeviceModel(**kargs) if self.conn.get() is not None: if self.conn.isQemuURI(): self._check_default_pools() def _check_default_pools(self): pools = {} # Don't create default pool if it's not # explicitly specified in template.conf if is_s390x() and 'pool' not in tmpl_defaults['disks'][0]: return default_pool = tmpl_defaults['disks'][0]['pool']['name'] default_pool = default_pool.split('/')[-1] pools[default_pool] = {} if default_pool == 'default': pools[default_pool] = {'path': '/var/lib/libvirt/images'} if config.get('kimchi', {}).get('create_iso_pool', False): pools['ISO'] = {'path': '/var/lib/kimchi/isos'} conn = self.conn.get() for pool_name in pools: error_msg = ('Storage pool %s does not exist or is not ' 'active. Please, check the configuration in ' '%s/template.conf to ensure it lists only valid ' 'storage.' % (pool_name, kimchiPaths.sysconf_dir)) try: pool = conn.storagePoolLookupByName(pool_name) except libvirt.libvirtError as e: pool_path = pools[pool_name].get('path') if pool_path is None: wok_log.error( f'Fatal: Unable to find storage pool {pool_name}.') wok_log.error(f'Details: {str(e)}') raise Exception(error_msg) # Try to create the pool pool = E.pool(E.name(pool_name), type='dir') pool.append(E.target(E.path(pool_path))) xml = ET.tostring(pool) try: pool = conn.storagePoolDefineXML(xml, 0) except libvirt.libvirtError as e: wok_log.error( f'Fatal: Unable to create storage pool {pool_name}.') wok_log.error(f'Details: {str(e)}') raise Exception(error_msg) # Build and set autostart value to pool # Ignore error as the pool was already successfully created try: # Add build step to make sure target directory created # The build process may fail when the pool directory # already exists on system pool.build(libvirt.VIR_STORAGE_POOL_BUILD_NEW) pool.setAutostart(1) except Exception: pass if pool.isActive() == 0: try: pool.create(0) except libvirt.libvirtError as e: wok_log.error( f'Fatal: Unable to create storage pool {pool_name}.') wok_log.error(f'Details: {str(e)}') raise Exception(error_msg) def get_list(self): try: conn = self.conn.get() names = conn.listStoragePools() names += conn.listDefinedStoragePools() return sorted(names) except libvirt.libvirtError as e: raise OperationFailed('KCHPOOL0006E', {'err': e.get_error_message()}) def _check_lvm(self, name, from_vg): vgdisplay_cmd = ['vgdisplay', name] output, error, returncode = run_command(vgdisplay_cmd) # From vgdisplay error codes: # 1 error reading VGDA # 2 volume group doesn't exist # 3 not all physical volumes of volume group online # 4 volume group not found # 5 no volume groups found at all # 6 error reading VGDA from lvmtab if from_vg and returncode in [2, 4, 5]: raise InvalidOperation('KCHPOOL0038E', {'name': name}) if not from_vg and returncode not in [2, 4, 5]: raise InvalidOperation('KCHPOOL0036E', {'name': name}) def create(self, params): task_id = None conn = self.conn.get() from_vg = params.get('source', {}).get('from_vg', False) try: name = params['name'] if name == ISO_POOL_NAME: raise InvalidOperation('KCHPOOL0031E') # The user may want to create a logical pool with the same name # used before but a volume group will already exist with this name # So check the volume group does not exist to create the pool if params['type'] == 'logical': self._check_lvm(name, from_vg) if params['type'] == 'kimchi-iso': task_id = self._do_deep_scan(params) if params['type'] == 'scsi': adapter_name = params['source']['adapter_name'] extra_params = self.device.lookup(adapter_name) # Adds name, adapter_type, wwpn and wwnn to source information params['source'].update(extra_params) params['fc_host_support'] = self.caps.fc_host_support poolDef = StoragePoolDef.create(params) poolDef.prepare(conn) xml = poolDef.xml except KeyError as item: raise MissingParameter('KCHPOOL0004E', { 'item': str(item), 'name': name }) if name in self.get_list(): raise InvalidOperation('KCHPOOL0001E', {'name': name}) try: if task_id: # Create transient pool for deep scan conn.storagePoolCreateXML(xml, 0) return name pool = conn.storagePoolDefineXML(xml, 0) except libvirt.libvirtError as e: wok_log.error(f'Problem creating Storage Pool: {str(e)}') raise OperationFailed('KCHPOOL0007E', { 'name': name, 'err': e.get_error_message() }) # Build and set autostart value to pool # Ignore error as the pool was already successfully created # The build process fails when the pool directory already exists try: if params['type'] in ['logical', 'dir', 'netfs', 'scsi']: pool.build(libvirt.VIR_STORAGE_POOL_BUILD_NEW) pool.setAutostart(1) else: pool.setAutostart(0) except Exception: pass if params['type'] == 'netfs': output, error, returncode = run_command( ['setsebool', '-P', 'virt_use_nfs=1']) if error or returncode: wok_log.error('Unable to set virt_use_nfs=1. If you use ' 'SELinux, this may prevent NFS pools from ' 'being used.') return name def _clean_scan(self, pool_name): try: conn = self.conn.get() pool = conn.storagePoolLookupByName(pool_name) pool.destroy() with self.objstore as session: session.delete('scanning', pool_name) except Exception as e: wok_log.debug(f'Exception {e} occurred when cleaning scan result') def _do_deep_scan(self, params): scan_params = dict(ignore_list=[]) scan_params['scan_path'] = params['path'] params['type'] = 'dir' for pool in self.get_list(): try: res = StoragePoolModel(conn=self.conn, objstore=self.objstore).lookup(pool) if res['state'] == 'active': scan_params['ignore_list'].append(res['path']) except Exception as e: wok_log.debug(f'Exception {e} occured when get ignore path') params['path'] = self.scanner.scan_dir_prepare(params['name']) scan_params['pool_path'] = params['path'] task_id = AsyncTask( f'/plugins/kimchi/storagepools/{ISO_POOL_NAME}', self.scanner.start_scan, scan_params, ).id # Record scanning-task/storagepool mapping for future querying try: with self.objstore as session: session.store('scanning', params['name'], task_id, get_kimchi_version()) return task_id except Exception as e: raise OperationFailed('KCHPOOL0037E', {'err': e.message})
def _attach_pci_device(self, cb, params): cb('Attaching PCI device') vmid = params['vmid'] dev_info = params['dev_info'] self._validate_pci_passthrough_env() dom = VMModel.get_vm(vmid, self.conn) # Due to libvirt limitation, we don't support live assigne device to # vfio driver. driver = ('vfio' if DOM_STATE_MAP[dom.info()[0]] == "shutoff" and self.caps.kernel_vfio else 'kvm') # on powerkvm systems it must be vfio driver. distro, _, _ = platform.linux_distribution() if distro == 'IBM_PowerKVM': driver = 'vfio' # Attach all PCI devices in the same IOMMU group dev_model = DeviceModel(conn=self.conn) devs_model = DevicesModel(conn=self.conn) affected_names = devs_model.get_list( _passthrough_affected_by=dev_info['name']) passthrough_names = devs_model.get_list( _cap='pci', _passthrough='true') group_names = list(set(affected_names) & set(passthrough_names)) pci_infos = [dev_model.lookup(dev_name) for dev_name in group_names] pci_infos.append(dev_info) is_multifunction = len(pci_infos) > 1 pci_infos = sorted(pci_infos, key=itemgetter('name')) # does not allow hot-plug of 3D graphic cards is_3D_device = dev_model.is_device_3D_controller(dev_info) if is_3D_device and DOM_STATE_MAP[dom.info()[0]] != "shutoff": raise InvalidOperation('KCHVMHDEV0006E', {'name': dev_info['name']}) # all devices in the group that is going to be attached to the vm # must be detached from the host first with RollbackContext() as rollback: for pci_info in pci_infos: try: dev = self.conn.get().nodeDeviceLookupByName( pci_info['name']) dev.dettach() except Exception: raise OperationFailed('KCHVMHDEV0005E', {'name': pci_info['name']}) else: rollback.prependDefer(dev.reAttach) rollback.commitAll() device_flags = get_vm_config_flag(dom, mode='all') # when attaching a 3D graphic device it might be necessary to increase # the window size memory in order to be able to attach more than one # device to the same guest if is_3D_device: self.update_mmio_guest(vmid, True) slot = 0 if is_multifunction: # search for the first available slot in guest xml slot = self._available_slot(dom) with RollbackContext() as rollback: # multifunction hotplug is a special case where all functions # must be attached together within one xml file, the same does # not happen to multifunction coldplug - where each function is # attached individually if DOM_STATE_MAP[dom.info()[0]] != 'shutoff' and is_multifunction: xmlstr = self._get_pci_devices_xml(pci_infos, slot, driver) try: dom.attachDeviceFlags(xmlstr, device_flags) except libvirt.libvirtError: wok_log.error( 'Failed to attach mutifunction device VM %s: \n%s', vmid, xmlstr) raise rollback.prependDefer(dom.detachDeviceFlags, xmlstr, device_flags) rollback.commitAll() cb('OK', True) return for pci_info in pci_infos: pci_info['detach_driver'] = driver cb('Reading source device XML') xmlstr = self._get_pci_device_xml(pci_info, slot, is_multifunction) try: cb('Attaching device to VM') dom.attachDeviceFlags(xmlstr, device_flags) except libvirt.libvirtError: wok_log.error( 'Failed to attach host device %s to VM %s: \n%s', pci_info['name'], vmid, xmlstr) raise rollback.prependDefer(dom.detachDeviceFlags, xmlstr, device_flags) rollback.commitAll() cb('OK', True)
def _attach_pci_device(self, vmid, dev_info): self._validate_pci_passthrough_env() dom = VMModel.get_vm(vmid, self.conn) # Due to libvirt limitation, we don't support live assigne device to # vfio driver. driver = ('vfio' if DOM_STATE_MAP[dom.info()[0]] == "shutoff" and self.caps.kernel_vfio else 'kvm') # on powerkvm systems it must be vfio driver. distro, _, _ = platform.linux_distribution() if distro == 'IBM_PowerKVM': driver = 'vfio' # Attach all PCI devices in the same IOMMU group dev_model = DeviceModel(conn=self.conn) devs_model = DevicesModel(conn=self.conn) affected_names = devs_model.get_list( _passthrough_affected_by=dev_info['name']) passthrough_names = devs_model.get_list( _cap='pci', _passthrough='true') group_names = list(set(affected_names) & set(passthrough_names)) pci_infos = [dev_model.lookup(dev_name) for dev_name in group_names] pci_infos.append(dev_info) is_multifunction = len(pci_infos) > 1 and \ DOM_STATE_MAP[dom.info()[0]] == "shutoff" pci_infos = sorted(pci_infos, key=itemgetter('name')) # all devices in the group that is going to be attached to the vm # must be detached from the host first with RollbackContext() as rollback: for pci_info in pci_infos: try: dev = self.conn.get().nodeDeviceLookupByName( pci_info['name']) dev.dettach() except Exception: raise OperationFailed('KCHVMHDEV0005E', {'name': pci_info['name']}) else: rollback.prependDefer(dev.reAttach) rollback.commitAll() device_flags = get_vm_config_flag(dom, mode='all') slot = 0 if is_multifunction: slot = self._available_slot(dom) with RollbackContext() as rollback: for pci_info in pci_infos: pci_info['detach_driver'] = driver xmlstr = self._get_pci_device_xml(pci_info, slot, is_multifunction) try: dom.attachDeviceFlags(xmlstr, device_flags) except libvirt.libvirtError: wok_log.error( 'Failed to attach host device %s to VM %s: \n%s', pci_info['name'], vmid, xmlstr) raise rollback.prependDefer(dom.detachDeviceFlags, xmlstr, device_flags) rollback.commitAll() return dev_info['name']