def create(self, vm, params): conn = self.conn.get() networks = conn.listNetworks() + conn.listDefinedNetworks() if params["type"] == "network" and params["network"] not in networks: raise InvalidParameter("KCHVMIF0002E", {'name': vm, 'network': params["network"]}) dom = VMModel.get_vm(vm, self.conn) if DOM_STATE_MAP[dom.info()[0]] != "shutoff": raise InvalidOperation("KCHVMIF0003E") macs = (iface.mac.get('address') for iface in self.get_vmifaces(vm, self.conn)) while True: params['mac'] = VMIfacesModel.random_mac() if params['mac'] not in macs: break os_data = VMModel.vm_get_os_metadata(dom, self.caps.metadata_support) os_distro, os_version = os_data xml = get_iface_xml(params, conn.getInfo()[0], os_distro, os_version) dom.attachDeviceFlags(xml, libvirt.VIR_DOMAIN_AFFECT_CURRENT) return params['mac']
def create(self, vm, params): conn = self.conn.get() networks = conn.listNetworks() + conn.listDefinedNetworks() networks = map(lambda x: x.decode('utf-8'), networks) if params['type'] == 'network': network = params.get("network") if network is None: raise MissingParameter('KCHVMIF0007E') if network not in networks: raise InvalidParameter('KCHVMIF0002E', { 'name': vm, 'network': network }) macs = (iface.mac.get('address') for iface in self.get_vmifaces(vm, self.conn)) # user defined customized mac address if 'mac' in params and params['mac']: # make sure it is unique if params['mac'] in macs: raise InvalidParameter('KCHVMIF0009E', { 'name': vm, 'mac': params['mac'] }) # otherwise choose a random mac address else: while True: params['mac'] = VMIfacesModel.random_mac() if params['mac'] not in macs: break dom = VMModel.get_vm(vm, self.conn) os_data = VMModel.vm_get_os_metadata(dom, self.caps.metadata_support) os_version, os_distro = os_data xml = get_iface_xml(params, conn.getInfo()[0], os_distro, os_version) flags = 0 if dom.isPersistent(): flags |= libvirt.VIR_DOMAIN_AFFECT_CONFIG if DOM_STATE_MAP[dom.info()[0]] != "shutoff": flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE dom.attachDeviceFlags(xml, flags) return params['mac']
def update(self, vm, mac, params): dom = VMModel.get_vm(vm, self.conn) iface = self._get_vmiface(vm, mac) if iface is None: raise NotFoundError("KCHVMIF0001E", {'name': vm, 'iface': mac}) # cannot change mac address in a running system if DOM_STATE_MAP[dom.info()[0]] != "shutoff": raise InvalidOperation('KCHVMIF0011E') # mac address is a required parameter if 'mac' not in params: raise MissingParameter('KCHVMIF0008E') # new mac address must be unique if self._get_vmiface(vm, params['mac']) is not None: raise InvalidParameter('KCHVMIF0009E', {'name': vm, 'mac': params['mac']}) flags = 0 if dom.isPersistent(): flags |= libvirt.VIR_DOMAIN_AFFECT_CONFIG # remove the current nic xml = etree.tostring(iface) dom.detachDeviceFlags(xml, flags=flags) # add the nic with the desired mac address iface.mac.attrib['address'] = params['mac'] xml = etree.tostring(iface) dom.attachDeviceFlags(xml, flags=flags) return [vm, params['mac']]
def _get_available_bus_address(self, bus_type, vm_name): if bus_type not in ['ide']: return dict() # libvirt limitation of just 1 ide controller # each controller have at most 2 buses and each bus 2 units. dom = VMModel.get_vm(vm_name, self.conn) disks = self.get_list(vm_name) valid_id = [('0', '0'), ('0', '1'), ('1', '0'), ('1', '1')] controller_id = '0' for dev_name in disks: disk = get_device_xml(dom, dev_name) if disk.target.attrib['bus'] == 'ide': controller_id = disk.address.attrib['controller'] bus_id = disk.address.attrib['bus'] unit_id = disk.address.attrib['unit'] if (bus_id, unit_id) in valid_id: valid_id.remove((bus_id, unit_id)) continue if not valid_id: raise OperationFailed('KCHVMSTOR0014E', {'type': 'ide', 'limit': 4}) else: address = {'controller': controller_id, 'bus': valid_id[0][0], 'unit': valid_id[0][1]} return dict(address=address)
def lookup(self, vmid, dev_name): dom = VMModel.get_vm(vmid, self.conn) xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: hostdev = root.devices.hostdev except AttributeError: raise NotFoundError('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name }) dev_model = DeviceModel(conn=self.conn) for e in hostdev: deduced_name = DeviceModel.deduce_dev_name(e, self.conn) if deduced_name == dev_name: dev_info = dev_model.lookup(dev_name) return { 'name': dev_name, 'type': e.attrib['type'], 'product': dev_info.get('product', None), 'vendor': dev_info.get('vendor', None) } raise NotFoundError('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name })
def delete(self, vmid, dev_name): dom = VMModel.get_vm(vmid, self.conn) xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: hostdev = root.devices.hostdev except AttributeError: raise NotFoundError('KCHVMHDEV0001E', {'vmid': vmid, 'dev_name': dev_name}) devsmodel = VMHostDevsModel(conn=self.conn) pci_devs = [(devsmodel._deduce_dev_name(e), e) for e in hostdev if e.attrib['type'] == 'pci'] for e in hostdev: if devsmodel._deduce_dev_name(e) == dev_name: xmlstr = etree.tostring(e) dom.detachDeviceFlags( xmlstr, get_vm_config_flag(dom, mode='all')) if e.attrib['type'] == 'pci': self._delete_affected_pci_devices(dom, dev_name, pci_devs) break else: raise NotFoundError('KCHVMHDEV0001E', {'vmid': vmid, 'dev_name': dev_name})
def delete(self, vmid, dev_name): dom = VMModel.get_vm(vmid, self.conn) xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: hostdev = root.devices.hostdev except AttributeError: raise NotFoundError('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name }) devsmodel = VMHostDevsModel(conn=self.conn) pci_devs = [(devsmodel._deduce_dev_name(e), e) for e in hostdev if e.attrib['type'] == 'pci'] for e in hostdev: if devsmodel._deduce_dev_name(e) == dev_name: xmlstr = etree.tostring(e) dom.detachDeviceFlags(xmlstr, get_vm_config_flag(dom, mode='all')) if e.attrib['type'] == 'pci': self._delete_affected_pci_devices(dom, dev_name, pci_devs) break else: raise NotFoundError('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name })
def get_disk_ref_cnt(objstore, conn, path): try: with objstore as session: try: ref_cnt = session.get('storagevolume', path)['ref_cnt'] except NotFoundError: kimchi_log.info('Volume %s not found in obj store.' % path) ref_cnt = 0 # try to find this volume in existing vm vms_list = VMsModel.get_vms(conn) for vm in vms_list: dom = VMModel.get_vm(vm, conn) storages = get_vm_disks(dom) for disk in storages.keys(): d_info = get_vm_disk_info(dom, disk) if path == d_info['path']: ref_cnt = ref_cnt + 1 try: session.store('storagevolume', path, {'ref_cnt': ref_cnt}) except Exception as e: # Let the exception be raised. If we allow disks' # ref_cnts to be out of sync, data corruption could # occour if a disk is added to two guests # unknowingly. kimchi_log.error( 'Unable to store storage volume id in' ' objectstore due error: %s', e.message) raise OperationFailed('KCHVOL0017E', {'err': e.message}) except Exception as e: # This exception is going to catch errors returned by 'with', # specially ones generated by 'session.store'. It is outside # to avoid conflict with the __exit__ function of 'with' raise OperationFailed('KCHVOL0017E', {'err': e.message}) return ref_cnt
def create(self, vm_name, params={}): """Create a snapshot with the current domain state. The VM must be stopped and contain only disks with format 'qcow2'; otherwise an exception will be raised. Parameters: vm_name -- the name of the VM where the snapshot will be created. params -- a dict with the following values: "name": The snapshot name (optional). If omitted, a default value based on the current time will be used. Return: A Task running the operation. """ vir_dom = VMModel.get_vm(vm_name, self.conn) if DOM_STATE_MAP[vir_dom.info()[0]] != u'shutoff': raise InvalidOperation('KCHSNAP0001E', {'vm': vm_name}) # if the VM has a non-CDROM disk with type 'raw', abort. for storage_name in self.vmstorages.get_list(vm_name): storage = self.vmstorage.lookup(vm_name, storage_name) type = storage['type'] format = storage['format'] if type != u'cdrom' and format != u'qcow2': raise InvalidOperation('KCHSNAP0010E', {'vm': vm_name, 'format': format}) name = params.get('name', unicode(int(time.time()))) task_params = {'vm_name': vm_name, 'name': name} taskid = add_task(u'/vms/%s/snapshots/%s' % (vm_name, name), self._create_task, self.objstore, task_params) return self.task.lookup(taskid)
def create(self, vm_name, params): dom = VMModel.get_vm(vm_name, self.conn) if DOM_STATE_MAP[dom.info()[0]] != 'shutoff': raise InvalidOperation('KCHCDROM0011E') # Use device name passed or pick next dev_name = params.get('dev', None) if dev_name is None: params['dev'] = self._get_storage_device_name(vm_name) else: devices = self.get_list(vm_name) if dev_name in devices: raise OperationFailed('KCHCDROM0004E', {'dev_name': dev_name, 'vm_name': vm_name}) # Path will never be blank due to API.json verification. # There is no need to cover this case here. path = params['path'] params['src_type'] = _check_cdrom_path(path) # Check if path is an url # Add device to VM dev_xml = _get_storage_xml(params) try: conn = self.conn.get() dom = conn.lookupByName(vm_name) dom.attachDeviceFlags(dev_xml, libvirt.VIR_DOMAIN_AFFECT_CURRENT) except Exception as e: raise OperationFailed("KCHCDROM0008E", {'error': e.message}) return params['dev']
def _get_ref_cnt(self, pool, name, path): vol_id = '%s:%s' % (pool, name) try: with self.objstore as session: try: ref_cnt = session.get('storagevolume', vol_id)['ref_cnt'] except NotFoundError: # Fix storage volume created outside kimchi scope ref_cnt = 0 # try to find this volume in exsisted vm vms = VMsModel.get_vms(self.conn) for vm in vms: dom = VMModel.get_vm(vm, self.conn) storages = get_vm_disk_list(dom) for disk in storages: d_info = get_vm_disk(dom, disk) if path == d_info['path']: ref_cnt = ref_cnt + 1 session.store('storagevolume', vol_id, {'ref_cnt': ref_cnt}) except Exception as e: # This exception is going to catch errors returned by 'with', # specially ones generated by 'session.store'. It is outside # to avoid conflict with the __exit__ function of 'with' raise OperationFailed('KCHVOL0017E', {'err': e.message}) return ref_cnt
def update(self, vm, mac, params): dom = VMModel.get_vm(vm, self.conn) iface = self._get_vmiface(vm, mac) if iface is None: raise NotFoundError("KCHVMIF0001E", {'name': vm, 'iface': mac}) flags = 0 if dom.isPersistent(): flags |= libvirt.VIR_DOMAIN_AFFECT_CONFIG if DOM_STATE_MAP[dom.info()[0]] != "shutoff": flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE if iface.attrib['type'] == 'network' and 'network' in params: iface.source.attrib['network'] = params['network'] xml = etree.tostring(iface) dom.updateDeviceFlags(xml, flags=flags) if 'model' in params: iface.model.attrib["type"] = params['model'] xml = etree.tostring(iface) dom.updateDeviceFlags(xml, flags=flags) return mac
def create(self, vm, params): def randomMAC(): mac = [0x52, 0x54, 0x00, random.randint(0x00, 0x7F), random.randint(0x00, 0xFF), random.randint(0x00, 0xFF)] return ":".join(map(lambda x: "%02x" % x, mac)) conn = self.conn.get() networks = conn.listNetworks() + conn.listDefinedNetworks() if params["type"] == "network" and params["network"] not in networks: raise InvalidParameter("KCHVMIF0002E", {"name": vm, "network": params["network"]}) dom = VMModel.get_vm(vm, self.conn) if DOM_STATE_MAP[dom.info()[0]] != "shutoff": raise InvalidOperation("KCHVMIF0003E") macs = (iface.mac.get("address") for iface in self.get_vmifaces(vm, self.conn)) mac = randomMAC() while True: if mac not in macs: break mac = randomMAC() children = [E.mac(address=mac)] ("network" in params.keys() and children.append(E.source(network=params["network"]))) ("model" in params.keys() and children.append(E.model(type=params["model"]))) attrib = {"type": params["type"]} xml = etree.tostring(E.interface(*children, **attrib)) dom.attachDeviceFlags(xml, libvirt.VIR_DOMAIN_AFFECT_CURRENT) return mac
def _create_task(self, cb, params): """Asynchronous function which actually creates the snapshot. Parameters: cb -- a callback function to signal the Task's progress. params -- a dict with the following values: "vm_name": the name of the VM where the snapshot will be created. "name": the snapshot name. """ vm_name = params['vm_name'] name = params['name'] cb('building snapshot XML') root_elem = E.domainsnapshot() root_elem.append(E.name(name)) xml = ET.tostring(root_elem, encoding='utf-8') try: cb('fetching snapshot domain') vir_dom = VMModel.get_vm(vm_name, self.conn) cb('creating snapshot') vir_dom.snapshotCreateXML(xml, 0) except (NotFoundError, OperationFailed, libvirt.libvirtError), e: raise OperationFailed('KCHSNAP0002E', {'name': name, 'vm': vm_name, 'err': e.message})
def delete(self, vm_name, dev_name): try: bus_type = self.lookup(vm_name, dev_name)['bus'] dom = VMModel.get_vm(vm_name, self.conn) except NotFoundError: raise if (bus_type not in HOTPLUG_TYPE and DOM_STATE_MAP[dom.info()[0]] != 'shutoff'): raise InvalidOperation('KCHVMSTOR0011E') try: disk = get_device_node(dom, dev_name) path = get_vm_disk_info(dom, dev_name)['path'] if path is None or len(path) < 1: path = self.lookup(vm_name, dev_name)['path'] # This has to be done before it's detached. If it wasn't # in the obj store, its ref count would have been updated # by get_disk_used_by() if path is not None: used_by = get_disk_used_by(self.objstore, self.conn, path) else: kimchi_log.error("Unable to decrement volume used_by on" " delete because no path could be found.") dom.detachDeviceFlags(etree.tostring(disk), get_vm_config_flag(dom, 'all')) except Exception as e: raise OperationFailed("KCHVMSTOR0010E", {'error': e.message}) if used_by is not None and vm_name in used_by: used_by.remove(vm_name) set_disk_used_by(self.objstore, path, used_by) else: kimchi_log.error("Unable to update %s:%s used_by on delete." % (vm_name, dev_name))
def _get_available_bus_address(self, bus_type, vm_name): if bus_type not in ['ide']: return dict() # libvirt limitation of just 1 ide controller # each controller have at most 2 buses and each bus 2 units. dom = VMModel.get_vm(vm_name, self.conn) disks = self.get_list(vm_name) valid_id = [('0', '0'), ('0', '1'), ('1', '0'), ('1', '1')] controller_id = '0' for dev_name in disks: disk = get_device_node(dom, dev_name) if disk.target.attrib['bus'] == 'ide': controller_id = disk.address.attrib['controller'] bus_id = disk.address.attrib['bus'] unit_id = disk.address.attrib['unit'] if (bus_id, unit_id) in valid_id: valid_id.remove((bus_id, unit_id)) continue if not valid_id: raise OperationFailed('KCHVMSTOR0014E', { 'type': 'ide', 'limit': 4 }) else: address = { 'controller': controller_id, 'bus': valid_id[0][0], 'unit': valid_id[0][1] } return dict(address=address)
def _create_task(self, cb, params): """Asynchronous function which actually creates the snapshot. Parameters: cb -- a callback function to signal the Task's progress. params -- a dict with the following values: "vm_name": the name of the VM where the snapshot will be created. "name": the snapshot name. """ vm_name = params['vm_name'] name = params['name'] cb('building snapshot XML') root_elem = E.domainsnapshot() root_elem.append(E.name(name)) xml = ET.tostring(root_elem, encoding='utf-8') try: cb('fetching snapshot domain') vir_dom = VMModel.get_vm(vm_name, self.conn) cb('creating snapshot') vir_dom.snapshotCreateXML(xml, 0) except (NotFoundError, OperationFailed, libvirt.libvirtError), e: raise OperationFailed('KCHSNAP0002E', { 'name': name, 'vm': vm_name, 'err': e.message })
def create(self, vm_name, params): dom = VMModel.get_vm(vm_name, self.conn) if DOM_STATE_MAP[dom.info()[0]] != 'shutoff': raise InvalidOperation('KCHCDROM0011E') # Use device name passed or pick next dev_name = params.get('dev', None) if dev_name is None: params['dev'] = self._get_storage_device_name(vm_name) else: devices = self.get_list(vm_name) if dev_name in devices: raise OperationFailed('KCHCDROM0004E', { 'dev_name': dev_name, 'vm_name': vm_name }) # Path will never be blank due to API.json verification. # There is no need to cover this case here. path = params['path'] params['src_type'] = _check_cdrom_path(path) # Check if path is an url # Add device to VM dev_xml = _get_storage_xml(params) try: conn = self.conn.get() dom = conn.lookupByName(vm_name) dom.attachDeviceFlags(dev_xml, libvirt.VIR_DOMAIN_AFFECT_CURRENT) except Exception as e: raise OperationFailed("KCHCDROM0008E", {'error': e.message}) return params['dev']
def create(self, vm, params): conn = self.conn.get() networks = conn.listNetworks() + conn.listDefinedNetworks() networks = map(lambda x: x.decode('utf-8'), networks) if params['type'] == 'network': network = params.get("network") if network is None: raise MissingParameter('KCHVMIF0007E') if network not in networks: raise InvalidParameter('KCHVMIF0002E', {'name': vm, 'network': network}) macs = (iface.mac.get('address') for iface in self.get_vmifaces(vm, self.conn)) # user defined customized mac address if 'mac' in params and params['mac']: # make sure it is unique if params['mac'] in macs: raise InvalidParameter('KCHVMIF0009E', {'name': vm, 'mac': params['mac']}) # otherwise choose a random mac address else: while True: params['mac'] = VMIfacesModel.random_mac() if params['mac'] not in macs: break dom = VMModel.get_vm(vm, self.conn) os_data = VMModel.vm_get_os_metadata(dom) os_version, os_distro = os_data xml = get_iface_xml(params, conn.getInfo()[0], os_distro, os_version) flags = 0 if dom.isPersistent(): flags |= libvirt.VIR_DOMAIN_AFFECT_CONFIG if DOM_STATE_MAP[dom.info()[0]] != "shutoff": flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE dom.attachDeviceFlags(xml, flags) return params['mac']
def _attach_pci_device(self, vmid, dev_info): self._validate_pci_passthrough_env() dom = VMModel.get_vm(vmid, self.conn) # Due to libvirt limitation, we don't support live assigne device to # vfio driver. driver = ('vfio' if DOM_STATE_MAP[dom.info()[0]] == "shutoff" and self.caps.kernel_vfio else 'kvm') # on powerkvm systems it must be vfio driver. distro, _, _ = platform.linux_distribution() if distro == 'IBM_PowerKVM': driver = 'vfio' # Attach all PCI devices in the same IOMMU group dev_model = DeviceModel(conn=self.conn) devs_model = DevicesModel(conn=self.conn) affected_names = devs_model.get_list( _passthrough_affected_by=dev_info['name']) passthrough_names = devs_model.get_list(_cap='pci', _passthrough='true') group_names = list(set(affected_names) & set(passthrough_names)) pci_infos = [dev_model.lookup(dev_name) for dev_name in group_names] pci_infos.append(dev_info) # all devices in the group that is going to be attached to the vm # must be detached from the host first with RollbackContext() as rollback: for pci_info in pci_infos: try: dev = self.conn.get().nodeDeviceLookupByName( pci_info['name']) dev.dettach() except Exception: raise OperationFailed('KCHVMHDEV0005E', {'name': pci_info['name']}) else: rollback.prependDefer(dev.reAttach) rollback.commitAll() device_flags = get_vm_config_flag(dom, mode='all') with RollbackContext() as rollback: for pci_info in pci_infos: pci_info['detach_driver'] = driver xmlstr = self._get_pci_device_xml(pci_info) try: dom.attachDeviceFlags(xmlstr, device_flags) except libvirt.libvirtError: kimchi_log.error( 'Failed to attach host device %s to VM %s: \n%s', pci_info['name'], vmid, xmlstr) raise rollback.prependDefer(dom.detachDeviceFlags, xmlstr, device_flags) rollback.commitAll() return dev_info['name']
def _attach_pci_device(self, vmid, dev_info): self._validate_pci_passthrough_env() dom = VMModel.get_vm(vmid, self.conn) # Due to libvirt limitation, we don't support live assigne device to # vfio driver. driver = ('vfio' if DOM_STATE_MAP[dom.info()[0]] == "shutoff" and self.caps.kernel_vfio else 'kvm') # on powerkvm systems it must be vfio driver. distro, _, _ = platform.linux_distribution() if distro == 'IBM_PowerKVM': driver = 'vfio' # Attach all PCI devices in the same IOMMU group dev_model = DeviceModel(conn=self.conn) devs_model = DevicesModel(conn=self.conn) affected_names = devs_model.get_list( _passthrough_affected_by=dev_info['name']) passthrough_names = devs_model.get_list( _cap='pci', _passthrough='true') group_names = list(set(affected_names) & set(passthrough_names)) pci_infos = [dev_model.lookup(dev_name) for dev_name in group_names] pci_infos.append(dev_info) # all devices in the group that is going to be attached to the vm # must be detached from the host first with RollbackContext() as rollback: for pci_info in pci_infos: try: dev = self.conn.get().nodeDeviceLookupByName( pci_info['name']) dev.dettach() except Exception: raise OperationFailed('KCHVMHDEV0005E', {'name': pci_info['name']}) else: rollback.prependDefer(dev.reAttach) rollback.commitAll() device_flags = get_vm_config_flag(dom, mode='all') with RollbackContext() as rollback: for pci_info in pci_infos: pci_info['detach_driver'] = driver xmlstr = self._get_pci_device_xml(pci_info) try: dom.attachDeviceFlags(xmlstr, device_flags) except libvirt.libvirtError: kimchi_log.error( 'Failed to attach host device %s to VM %s: \n%s', pci_info['name'], vmid, xmlstr) raise rollback.prependDefer(dom.detachDeviceFlags, xmlstr, device_flags) rollback.commitAll() return dev_info['name']
def revert(self, vm_name, name): try: vir_dom = VMModel.get_vm(vm_name, self.conn) vir_snap = self.get_vmsnapshot(vm_name, name) vir_dom.revertToSnapshot(vir_snap, 0) except libvirt.libvirtError, e: raise OperationFailed('KCHSNAP0009E', {'name': name, 'vm': vm_name, 'err': e.message})
def get_list(self, vm_name): dom = VMModel.get_vm(vm_name, self.conn) xml = dom.XMLDesc(0) devices = objectify.fromstring(xml).devices storages = [disk.target.attrib['dev'] for disk in devices.xpath("./disk[@device='disk']")] storages += [disk.target.attrib['dev'] for disk in devices.xpath("./disk[@device='cdrom']")] return storages
def _get_device_xml(self, vm_name, dev_name): # Get VM xml and then devices xml dom = VMModel.get_vm(vm_name, self.conn) xml = dom.XMLDesc(0) devices = objectify.fromstring(xml).devices disk = devices.xpath("./disk/target[@dev='%s']/.." % dev_name) if not disk: return None return disk[0]
def get_list(self, vmid): dom = VMModel.get_vm(vmid, self.conn) xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: hostdev = root.devices.hostdev except AttributeError: return [] return [self._deduce_dev_name(e) for e in hostdev]
def get_list(self, vm_name): vir_dom = VMModel.get_vm(vm_name, self.conn) try: vir_snaps = vir_dom.listAllSnapshots(0) return sorted([s.getName().decode('utf-8') for s in vir_snaps], key=unicode.lower) except libvirt.libvirtError, e: raise OperationFailed('KCHSNAP0005E', {'vm': vm_name, 'err': e.message})
def delete(self, vm, mac): dom = VMModel.get_vm(vm, self.conn) iface = self._get_vmiface(vm, mac) if DOM_STATE_MAP[dom.info()[0]] != "shutoff": raise InvalidOperation("KCHVMIF0003E") if iface is None: raise NotFoundError("KCHVMIF0001E", {"name": vm, "iface": mac}) dom.detachDeviceFlags(etree.tostring(iface), libvirt.VIR_DOMAIN_AFFECT_CURRENT)
def delete(self, vm, mac): dom = VMModel.get_vm(vm, self.conn) iface = self._get_vmiface(vm, mac) if DOM_STATE_MAP[dom.info()[0]] != "shutoff": raise InvalidOperation("KCHVMIF0003E") if iface is None: raise NotFoundError("KCHVMIF0001E", {'name': vm, 'iface': mac}) dom.detachDeviceFlags(etree.tostring(iface), libvirt.VIR_DOMAIN_AFFECT_CURRENT)
def delete(self, vm_name, dev_name): # Get storage device xml dom = VMModel.get_vm(vm_name, self.conn) try: bus_type = self.lookup(vm_name, dev_name)['bus'] except NotFoundError: raise dom = VMModel.get_vm(vm_name, self.conn) if (bus_type not in HOTPLUG_TYPE and DOM_STATE_MAP[dom.info()[0]] != 'shutoff'): raise InvalidOperation('KCHVMSTOR0011E') try: conn = self.conn.get() dom = conn.lookupByName(vm_name) disk = get_device_xml(dom, dev_name) dom.detachDeviceFlags(etree.tostring(disk), libvirt.VIR_DOMAIN_AFFECT_CURRENT) except Exception as e: raise OperationFailed("KCHVMSTOR0010E", {'error': e.message})
def delete(self, vm, mac): dom = VMModel.get_vm(vm, self.conn) iface = self._get_vmiface(vm, mac) if DOM_STATE_MAP[dom.info()[0]] != "shutoff": raise InvalidOperation("do not support hot plugging detach " "guest interface") if iface is None: raise NotFoundError('iface: "%s"' % mac) dom.detachDeviceFlags(etree.tostring(iface), libvirt.VIR_DOMAIN_AFFECT_CURRENT)
def get_list(self, vm_name): vir_dom = VMModel.get_vm(vm_name, self.conn) try: vir_snaps = vir_dom.listAllSnapshots(0) return sorted([s.getName().decode('utf-8') for s in vir_snaps], key=unicode.lower) except libvirt.libvirtError, e: raise OperationFailed('KCHSNAP0005E', { 'vm': vm_name, 'err': e.message })
def delete(self, vm_name, dev_name): # Get storage device xml dom = VMModel.get_vm(vm_name, self.conn) try: bus_type = self.lookup(vm_name, dev_name)['bus'] except NotFoundError: raise dom = VMModel.get_vm(vm_name, self.conn) if (bus_type not in HOTPLUG_TYPE and DOM_STATE_MAP[dom.info()[0]] != 'shutoff'): raise InvalidOperation('KCHVMSTOR0011E') try: conn = self.conn.get() dom = conn.lookupByName(vm_name) disk = get_device_xml(dom, dev_name) dom.detachDeviceFlags(etree.tostring(disk), get_vm_config_flag(dom, 'all')) except Exception as e: raise OperationFailed("KCHVMSTOR0010E", {'error': e.message})
def lookup(self, vm_name): vir_dom = VMModel.get_vm(vm_name, self.conn) try: vir_snap = vir_dom.snapshotCurrent(0) snap_name = vir_snap.getName().decode('utf-8') except libvirt.libvirtError, e: if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN_SNAPSHOT: return {} raise OperationFailed('KCHSNAP0008E', {'vm': vm_name, 'err': e.message})
def update(self, vm_name, dev_name, params): params['src_type'] = _check_cdrom_path(params['path']) dom = VMModel.get_vm(vm_name, self.conn) dev_info = self.lookup(vm_name, dev_name) dev_info.update(params) xml = _get_storage_xml(dev_info) try: dom.updateDeviceFlags(xml, libvirt.VIR_DOMAIN_AFFECT_CURRENT) except Exception as e: raise OperationFailed("KCHCDROM0009E", {'error': e.message}) return dev_name
def get_list(self, vm_name): dom = VMModel.get_vm(vm_name, self.conn) xml = dom.XMLDesc(0) devices = objectify.fromstring(xml).devices storages = [ disk.target.attrib['dev'] for disk in devices.xpath("./disk[@device='disk']") ] storages += [ disk.target.attrib['dev'] for disk in devices.xpath("./disk[@device='cdrom']") ] return storages
def revert(self, vm_name, name): try: vir_dom = VMModel.get_vm(vm_name, self.conn) vir_snap = self.get_vmsnapshot(vm_name, name) vir_dom.revertToSnapshot(vir_snap, 0) # get vm name recorded in the snapshot and return new uri params vm_new_name = xpath_get_text(vir_snap.getXMLDesc(0), 'domain/name')[0] return [vm_new_name, name] except libvirt.libvirtError, e: raise OperationFailed('KCHSNAP0009E', {'name': name, 'vm': vm_name, 'err': e.message})
def delete(self, vm, mac): dom = VMModel.get_vm(vm, self.conn) iface = self._get_vmiface(vm, mac) if iface is None: raise NotFoundError("KCHVMIF0001E", {'name': vm, 'iface': mac}) flags = 0 if dom.isPersistent(): flags |= libvirt.VIR_DOMAIN_AFFECT_CONFIG if DOM_STATE_MAP[dom.info()[0]] != "shutoff": flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE dom.detachDeviceFlags(etree.tostring(iface), flags)
def get_vmsnapshot(self, vm_name, name): vir_dom = VMModel.get_vm(vm_name, self.conn) try: return vir_dom.snapshotLookupByName(name, 0) except libvirt.libvirtError, e: code = e.get_error_code() if code == libvirt.VIR_ERR_NO_DOMAIN_SNAPSHOT: raise NotFoundError('KCHSNAP0003E', {'name': name, 'vm': vm_name}) else: raise OperationFailed('KCHSNAP0004E', {'name': name, 'vm': vm_name, 'err': e.message})
def lookup(self, vm_name): vir_dom = VMModel.get_vm(vm_name, self.conn) try: vir_snap = vir_dom.snapshotCurrent(0) snap_name = vir_snap.getName().decode('utf-8') except libvirt.libvirtError, e: if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN_SNAPSHOT: return {} raise OperationFailed('KCHSNAP0008E', { 'vm': vm_name, 'err': e.message })
def create(self, vm_name, params): dom = VMModel.get_vm(vm_name, self.conn) # Use device name passed or pick next dev_name = params.get('dev', None) if dev_name is None: params['dev'] = self._get_storage_device_name(vm_name) else: devices = self.get_list(vm_name) if dev_name in devices: raise OperationFailed( 'KCHVMSTOR0004E', {'dev_name': dev_name, 'vm_name': vm_name}) # Path will never be blank due to API.json verification. # There is no need to cover this case here. params['format'] = 'raw' if not ('vol' in params) ^ ('path' in params): raise InvalidParameter("KCHVMSTOR0017E") if params.get('vol'): try: pool = params['pool'] vol_info = StorageVolumeModel( conn=self.conn, objstore=self.objstore).lookup(pool, params['vol']) except KeyError: raise InvalidParameter("KCHVMSTOR0012E") except Exception as e: raise InvalidParameter("KCHVMSTOR0015E", {'error': e}) if vol_info['ref_cnt'] != 0: raise InvalidParameter("KCHVMSTOR0016E") params['format'] = vol_info['format'] params['path'] = vol_info['path'] params['src_type'] = _check_path(params['path']) params.setdefault( 'bus', _get_device_bus(params['type'], dom)) if (params['bus'] not in HOTPLUG_TYPE and DOM_STATE_MAP[dom.info()[0]] != 'shutoff'): raise InvalidOperation('KCHVMSTOR0011E') params.update(self._get_available_bus_address(params['bus'], vm_name)) # Add device to VM dev_xml = _get_storage_xml(params) try: conn = self.conn.get() dom = conn.lookupByName(vm_name) dom.attachDeviceFlags(dev_xml, get_vm_config_flag(dom, 'all')) except Exception as e: raise OperationFailed("KCHVMSTOR0008E", {'error': e.message}) return params['dev']
def create(self, vm_name, params): dom = VMModel.get_vm(vm_name, self.conn) params['bus'] = _get_device_bus(params['type'], dom) self._get_storage_device_name(vm_name, params) # Path will never be blank due to API.json verification. # There is no need to cover this case here. params['format'] = 'raw' if not ('vol' in params) ^ ('path' in params): raise InvalidParameter("KCHVMSTOR0017E") if params.get('vol'): try: pool = params['pool'] vol_info = StorageVolumeModel( conn=self.conn, objstore=self.objstore).lookup(pool, params['vol']) except KeyError: raise InvalidParameter("KCHVMSTOR0012E") except Exception as e: raise InvalidParameter("KCHVMSTOR0015E", {'error': e}) if vol_info['ref_cnt'] != 0: raise InvalidParameter("KCHVMSTOR0016E") supported_format = { "disk": ["raw", "bochs", "qcow", "qcow2", "qed", "vmdk"], "cdrom": "iso"} if vol_info['format'] in supported_format[params['type']]: if params['type'] == 'disk': params['format'] = vol_info['format'] else: raise InvalidParameter("KCHVMSTOR0018E", {"format": vol_info['format'], "type": params['type']}) params['path'] = vol_info['path'] params['src_type'] = _check_path(params['path']) if (params['bus'] not in HOTPLUG_TYPE and DOM_STATE_MAP[dom.info()[0]] != 'shutoff'): raise InvalidOperation('KCHVMSTOR0011E') params.update(self._get_available_bus_address(params['bus'], vm_name)) # Add device to VM dev_xml = _get_storage_xml(params) try: conn = self.conn.get() dom = conn.lookupByName(vm_name) dom.attachDeviceFlags(dev_xml, get_vm_config_flag(dom, 'all')) except Exception as e: raise OperationFailed("KCHVMSTOR0008E", {'error': e.message}) return params['dev']
def revert(self, vm_name, name): try: vir_dom = VMModel.get_vm(vm_name, self.conn) vir_snap = self.get_vmsnapshot(vm_name, name) vir_dom.revertToSnapshot(vir_snap, 0) # get vm name recorded in the snapshot and return new uri params vm_new_name = xpath_get_text(vir_snap.getXMLDesc(0), 'domain/name')[0] return [vm_new_name, name] except libvirt.libvirtError, e: raise OperationFailed('KCHSNAP0009E', { 'name': name, 'vm': vm_name, 'err': e.message })
def create(self, vm_name, params): dom = VMModel.get_vm(vm_name, self.conn) # Use device name passed or pick next dev_name = params.get('dev', None) if dev_name is None: params['dev'] = self._get_storage_device_name(vm_name) else: devices = self.get_list(vm_name) if dev_name in devices: raise OperationFailed('KCHVMSTOR0004E', { 'dev_name': dev_name, 'vm_name': vm_name }) # Path will never be blank due to API.json verification. # There is no need to cover this case here. params['format'] = 'raw' if params.get('vol'): try: pool = params['pool'] vol_info = StorageVolumeModel(conn=self.conn, objstore=self.objstore).lookup( pool, params['vol']) except KeyError: raise InvalidParameter("KCHVMSTOR0012E") except Exception as e: raise InvalidParameter("KCHVMSTOR0015E", {'error': e}) if vol_info['ref_cnt'] != 0: raise InvalidParameter("KCHVMSTOR0016E") params['format'] = vol_info['format'] params['path'] = vol_info['path'] params['src_type'] = _check_path(params['path']) params.setdefault('bus', _get_device_bus(params['type'], dom)) if (params['bus'] not in HOTPLUG_TYPE and DOM_STATE_MAP[dom.info()[0]] != 'shutoff'): raise InvalidOperation('KCHVMSTOR0011E') params.update(self._get_available_bus_address(params['bus'], vm_name)) # Add device to VM dev_xml = _get_storage_xml(params) try: conn = self.conn.get() dom = conn.lookupByName(vm_name) dom.attachDeviceFlags(dev_xml, libvirt.VIR_DOMAIN_AFFECT_CURRENT) except Exception as e: raise OperationFailed("KCHVMSTOR0008E", {'error': e.message}) return params['dev']
def get_vmsnapshot(self, vm_name, name): vir_dom = VMModel.get_vm(vm_name, self.conn) try: return vir_dom.snapshotLookupByName(name, 0) except libvirt.libvirtError, e: code = e.get_error_code() if code == libvirt.VIR_ERR_NO_DOMAIN_SNAPSHOT: raise NotFoundError('KCHSNAP0003E', { 'name': name, 'vm': vm_name }) else: raise OperationFailed('KCHSNAP0004E', { 'name': name, 'vm': vm_name, 'err': e.message })
def update(self, vm_name, dev_name, params): old_disk_ref_cnt = None new_disk_ref_cnt = None dom = VMModel.get_vm(vm_name, self.conn) dev_info = self.lookup(vm_name, dev_name) if dev_info['type'] != 'cdrom': raise InvalidOperation("KCHVMSTOR0006E") params['path'] = check_remote_disk_path(params.get('path', ''), self.caps.qemu_stream_dns) old_disk_path = dev_info['path'] new_disk_path = params['path'] if new_disk_path != old_disk_path: # An empty path means a CD-ROM was empty or ejected: if old_disk_path is not '': old_disk_ref_cnt = get_disk_ref_cnt(self.objstore, self.conn, old_disk_path) if new_disk_path is not '': new_disk_ref_cnt = get_disk_ref_cnt(self.objstore, self.conn, new_disk_path) dev_info.update(params) dev, xml = get_disk_xml(dev_info) try: dom.updateDeviceFlags(xml, get_vm_config_flag(dom, 'all')) except Exception as e: raise OperationFailed("KCHVMSTOR0009E", {'error': e.message}) try: if old_disk_ref_cnt is not None and \ old_disk_ref_cnt > 0: set_disk_ref_cnt(self.objstore, old_disk_path, old_disk_ref_cnt - 1) if new_disk_ref_cnt is not None: set_disk_ref_cnt(self.objstore, new_disk_path, new_disk_ref_cnt + 1) except Exception as e: kimchi_log.error("Unable to update dev ref_cnt on update due to" " %s:" % e.message) return dev
def update(self, vm_name, dev_name, params): old_disk_used_by = None new_disk_used_by = None dom = VMModel.get_vm(vm_name, self.conn) dev_info = self.lookup(vm_name, dev_name) if dev_info['type'] != 'cdrom': raise InvalidOperation("KCHVMSTOR0006E") params['path'] = params.get('path', '') old_disk_path = dev_info['path'] new_disk_path = params['path'] if new_disk_path != old_disk_path: # An empty path means a CD-ROM was empty or ejected: if old_disk_path is not '': old_disk_used_by = get_disk_used_by( self.objstore, self.conn, old_disk_path) if new_disk_path is not '': new_disk_used_by = get_disk_used_by( self.objstore, self.conn, new_disk_path) dev_info.update(params) dev, xml = get_disk_xml(dev_info) try: dom.updateDeviceFlags(xml, get_vm_config_flag(dom, 'all')) except Exception as e: raise OperationFailed("KCHVMSTOR0009E", {'error': e.message}) try: if old_disk_used_by is not None and \ vm_name in old_disk_used_by: old_disk_used_by.remove(vm_name) set_disk_used_by(self.objstore, old_disk_path, old_disk_used_by) if new_disk_used_by is not None: new_disk_used_by.append(vm_name) set_disk_used_by(self.objstore, new_disk_path, new_disk_used_by) except Exception as e: kimchi_log.error("Unable to update dev used_by on update due to" " %s:" % e.message) return dev
def update(self, vm_name, dev_name, params): old_disk_ref_cnt = None new_disk_ref_cnt = None dom = VMModel.get_vm(vm_name, self.conn) dev_info = self.lookup(vm_name, dev_name) if dev_info['type'] != 'cdrom': raise InvalidOperation("KCHVMSTOR0006E") params['path'] = check_remote_disk_path(params.get('path', ''), self.caps.qemu_stream_dns) old_disk_path = dev_info['path'] new_disk_path = params['path'] if new_disk_path != old_disk_path: # An empty path means a CD-ROM was empty or ejected: if old_disk_path is not '': old_disk_ref_cnt = get_disk_ref_cnt( self.objstore, self.conn, old_disk_path) if new_disk_path is not '': new_disk_ref_cnt = get_disk_ref_cnt( self.objstore, self.conn, new_disk_path) dev_info.update(params) dev, xml = get_disk_xml(dev_info) try: dom.updateDeviceFlags(xml, get_vm_config_flag(dom, 'all')) except Exception as e: raise OperationFailed("KCHVMSTOR0009E", {'error': e.message}) try: if old_disk_ref_cnt is not None and \ old_disk_ref_cnt > 0: set_disk_ref_cnt(self.objstore, old_disk_path, old_disk_ref_cnt - 1) if new_disk_ref_cnt is not None: set_disk_ref_cnt(self.objstore, new_disk_path, new_disk_ref_cnt + 1) except Exception as e: kimchi_log.error("Unable to update dev ref_cnt on update due to" " %s:" % e.message) return dev
def update(self, vm_name, dev_name, params): old_disk_used_by = None new_disk_used_by = None dom = VMModel.get_vm(vm_name, self.conn) dev_info = self.lookup(vm_name, dev_name) if dev_info['type'] != 'cdrom': raise InvalidOperation("KCHVMSTOR0006E") params['path'] = params.get('path', '') old_disk_path = dev_info['path'] new_disk_path = params['path'] if new_disk_path != old_disk_path: # An empty path means a CD-ROM was empty or ejected: if old_disk_path is not '': old_disk_used_by = get_disk_used_by(self.objstore, self.conn, old_disk_path) if new_disk_path is not '': new_disk_used_by = get_disk_used_by(self.objstore, self.conn, new_disk_path) dev_info.update(params) dev, xml = get_disk_xml(dev_info) try: dom.updateDeviceFlags(xml, get_vm_config_flag(dom, 'all')) except Exception as e: raise OperationFailed("KCHVMSTOR0009E", {'error': e.message}) try: if old_disk_used_by is not None and \ vm_name in old_disk_used_by: old_disk_used_by.remove(vm_name) set_disk_used_by(self.objstore, old_disk_path, old_disk_used_by) if new_disk_used_by is not None: new_disk_used_by.append(vm_name) set_disk_used_by(self.objstore, new_disk_path, new_disk_used_by) except Exception as e: kimchi_log.error("Unable to update dev used_by on update due to" " %s:" % e.message) return dev
def _get_unavailable_devices(self): vm_list = VMsModel.get_vms(self.conn) unavailable_devs = [] for vm in vm_list: dom = VMModel.get_vm(vm, self.conn) xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: hostdev = root.devices.hostdev except AttributeError: continue vm_devs = [DeviceModel.deduce_dev_name(e, self.conn) for e in hostdev] for dev in vm_devs: unavailable_devs.append(dev) return unavailable_devs
def update(self, vm, mac, params): dom = VMModel.get_vm(vm, self.conn) iface = self._get_vmiface(vm, mac) if iface is None: raise NotFoundError("KCHVMIF0001E", {'name': vm, 'iface': mac}) # FIXME we will support to change the live VM configuration later. if iface.attrib['type'] == 'network' and 'network' in params: iface.source.attrib['network'] = params['network'] xml = etree.tostring(iface) dom.updateDeviceFlags(xml, flags=libvirt.VIR_DOMAIN_AFFECT_CONFIG) # change on the persisted VM configuration only. if 'model' in params and dom.isPersistent(): iface.model.attrib["type"] = params['model'] xml = etree.tostring(iface) dom.updateDeviceFlags(xml, flags=libvirt.VIR_DOMAIN_AFFECT_CONFIG) return mac
def delete(self, vm_name, dev_name): # Get storage device xml disk = self._get_device_xml(vm_name, dev_name) if disk is None: raise NotFoundError("KCHCDROM0007E", { 'dev_name': dev_name, 'vm_name': vm_name }) dom = VMModel.get_vm(vm_name, self.conn) if DOM_STATE_MAP[dom.info()[0]] != 'shutoff': raise InvalidOperation('KCHCDROM0011E') try: conn = self.conn.get() dom = conn.lookupByName(vm_name) dom.detachDeviceFlags(etree.tostring(disk), libvirt.VIR_DOMAIN_AFFECT_CURRENT) except Exception as e: raise OperationFailed("KCHCDROM0010E", {'error': e.message})
def create(self, vm, params): def randomMAC(): mac = [0x52, 0x54, 0x00, random.randint(0x00, 0x7f), random.randint(0x00, 0xff), random.randint(0x00, 0xff)] return ':'.join(map(lambda x: "%02x" % x, mac)) conn = self.conn.get() networks = conn.listNetworks() + conn.listDefinedNetworks() if params["type"] == "network" and params["network"] not in networks: raise InvalidParameter("KCHVMIF0002E", {'name': vm, 'network': params["network"]}) dom = VMModel.get_vm(vm, self.conn) if DOM_STATE_MAP[dom.info()[0]] != "shutoff": raise InvalidOperation("KCHVMIF0003E") macs = (iface.mac.get('address') for iface in self.get_vmifaces(vm, self.conn)) mac = randomMAC() while True: if mac not in macs: break mac = randomMAC() children = [E.mac(address=mac)] ("network" in params.keys() and children.append(E.source(network=params['network']))) ("model" in params.keys() and children.append(E.model(type=params['model']))) attrib = {"type": params["type"]} xml = etree.tostring(E.interface(*children, **attrib)) dom.attachDeviceFlags(xml, libvirt.VIR_DOMAIN_AFFECT_CURRENT) return mac
def _attach_pci_device(self, vmid, dev_info): self._validate_pci_passthrough_env() dom = VMModel.get_vm(vmid, self.conn) # Due to libvirt limitation, we don't support live assigne device to # vfio driver. driver = ('vfio' if DOM_STATE_MAP[dom.info()[0]] == "shutoff" and self.caps.kernel_vfio else 'kvm') # Attach all PCI devices in the same IOMMU group dev_model = DeviceModel(conn=self.conn) devs_model = DevicesModel(conn=self.conn) affected_names = devs_model.get_list( _passthrough_affected_by=dev_info['name']) passthrough_names = devs_model.get_list(_cap='pci', _passthrough='true') group_names = list(set(affected_names) & set(passthrough_names)) pci_infos = [dev_model.lookup(dev_name) for dev_name in group_names] pci_infos.append(dev_info) device_flags = get_vm_config_flag(dom, mode='all') with RollbackContext() as rollback: for pci_info in pci_infos: pci_info['detach_driver'] = driver xmlstr = self._get_pci_device_xml(pci_info) try: dom.attachDeviceFlags(xmlstr, device_flags) except libvirt.libvirtError: kimchi_log.error( 'Failed to attach host device %s to VM %s: \n%s', pci_info['name'], vmid, xmlstr) raise rollback.prependDefer(dom.detachDeviceFlags, xmlstr, device_flags) rollback.commitAll() return dev_info['name']
def lookup(self, vmid, dev_name): dom = VMModel.get_vm(vmid, self.conn) xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: hostdev = root.devices.hostdev except AttributeError: raise NotFoundError('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name }) devsmodel = VMHostDevsModel(conn=self.conn) for e in hostdev: deduced_name = devsmodel._deduce_dev_name(e) if deduced_name == dev_name: return {'name': dev_name, 'type': e.attrib['type']} raise NotFoundError('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name })