def delete(self, vmid, dev_name): dom = VMModel.get_vm(vmid, self.conn) xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: hostdev = root.devices.hostdev except AttributeError: raise NotFoundError('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name}) task_params = { 'vmid': vmid, 'dev_name': dev_name, 'dom': dom, 'hostdev': hostdev, 'lock': threading.RLock(), } task_uri = u'/plugins/kimchi/vms/%s/hostdevs/%s' % ( VMModel.get_vm(vmid, self.conn).name(), dev_name, ) taskid = AsyncTask(task_uri, self._detach_device, task_params).id return self.task.lookup(taskid)
def create(self, vmid, params): dev_name = params['name'] dev_info = self.dev_model.lookup(dev_name) if dev_info['device_type'] == 'pci': taskid = AsyncTask(u'/plugins/kimchi/vms/%s/hostdevs/' % VMModel.get_vm(vmid, self.conn).name(), self._attach_pci_device, {'vmid': vmid, 'dev_info': dev_info, 'lock': threading.RLock()}).id return self.task.lookup(taskid) with RollbackContext() as rollback: try: dev = self.conn.get().nodeDeviceLookupByName(dev_name) dev.dettach() except Exception: raise OperationFailed('KCHVMHDEV0005E', {'name': dev_name}) else: rollback.prependDefer(dev.reAttach) rollback.commitAll() taskid = AsyncTask(u'/plugins/kimchi/vms/%s/hostdevs/' % VMModel.get_vm(vmid, self.conn).name(), '_attach_%s_device' % dev_info['device_type'], {'vmid': vmid, 'dev_info': dev_info, 'lock': threading.RLock()}).id return self.task.lookup(taskid)
def create(self, vmid, params): dev_name = params['name'] dev_info = self.dev_model.lookup(dev_name) if dev_info['device_type'] == 'pci': taskid = AsyncTask( u'/plugins/kimchi/vms/%s/hostdevs/' % VMModel.get_vm(vmid, self.conn).name(), self._attach_pci_device, {'vmid': vmid, 'dev_info': dev_info, 'lock': threading.RLock()}, ).id return self.task.lookup(taskid) with RollbackContext() as rollback: try: dev = self.conn.get().nodeDeviceLookupByName(dev_name) dev.dettach() except Exception: raise OperationFailed('KCHVMHDEV0005E', {'name': dev_name}) else: rollback.prependDefer(dev.reAttach) rollback.commitAll() taskid = AsyncTask( u'/plugins/kimchi/vms/%s/hostdevs/' % VMModel.get_vm(vmid, self.conn).name(), '_attach_%s_device' % dev_info['device_type'], {'vmid': vmid, 'dev_info': dev_info, 'lock': threading.RLock()}, ).id return self.task.lookup(taskid)
def create(self, vmid, params): dev_name = params['name'] self._passthrough_device_validate(dev_name) dev_info = DeviceModel(conn=self.conn).lookup(dev_name) if dev_info['device_type'] == 'pci': taskid = add_task( u'/plugins/kimchi/vms/%s/hostdevs/' % VMModel.get_vm(vmid, self.conn).name(), self._attach_pci_device, self.objstore, { 'vmid': vmid, 'dev_info': dev_info }) return self.task.lookup(taskid) with RollbackContext() as rollback: try: dev = self.conn.get().nodeDeviceLookupByName(dev_name) dev.dettach() except Exception: raise OperationFailed('KCHVMHDEV0005E', {'name': dev_name}) else: rollback.prependDefer(dev.reAttach) rollback.commitAll() taskid = add_task( u'/plugins/kimchi/vms/%s/hostdevs/' % VMModel.get_vm(vmid, self.conn).name(), '_attach_%s_device' % dev_info['device_type'], self.objstore, { 'vmid': vmid, 'dev_info': dev_info }) return self.task.lookup(taskid)
def create(self, vmid, params): dev_name = params['name'] self._passthrough_device_validate(dev_name) dev_info = DeviceModel(conn=self.conn).lookup(dev_name) if dev_info['device_type'] == 'pci': taskid = add_task(u'/plugins/kimchi/vms/%s/hostdevs/' % VMModel.get_vm(vmid, self.conn).name(), self._attach_pci_device, self.objstore, {'vmid': vmid, 'dev_info': dev_info}) return self.task.lookup(taskid) with RollbackContext() as rollback: try: dev = self.conn.get().nodeDeviceLookupByName(dev_name) dev.dettach() except Exception: raise OperationFailed('KCHVMHDEV0005E', {'name': dev_name}) else: rollback.prependDefer(dev.reAttach) rollback.commitAll() taskid = add_task(u'/plugins/kimchi/vms/%s/hostdevs/' % VMModel.get_vm(vmid, self.conn).name(), '_attach_%s_device' % dev_info['device_type'], self.objstore, {'vmid': vmid, 'dev_info': dev_info}) return self.task.lookup(taskid)
def _get_ips(self, vm, mac, network): ips = [] # Return empty list if shutoff, even if leases still valid or ARP # cache has entries for this MAC. conn = self.conn.get() dom = VMModel.get_vm(vm, self.conn) if DOM_STATE_MAP[dom.info()[0]] == "shutoff": return ips # An iface may have multiple IPs # An IP could have been assigned without libvirt. # First check the ARP cache. with open('/proc/net/arp') as f: ips = [line.split()[0] for line in f.xreadlines() if mac in line] # Some ifaces may be inactive, so if the ARP cache didn't have them, # and they happen to be assigned via DHCP, we can check there too. try: # Some type of interfaces may not have a network associated with net = conn.networkLookupByName(network) leases = net.DHCPLeases(mac) for lease in leases: ip = lease.get('ipaddr') if ip not in ips: ips.append(ip) except libvirt.libvirtError: pass return ips
def lookup(self, vmid, dev_name): dom = VMModel.get_vm(vmid, self.conn) xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: hostdev = root.devices.hostdev except AttributeError: raise NotFoundError('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name}) for e in hostdev: deduced_name = DeviceModel.deduce_dev_name(e, self.conn) if deduced_name == dev_name: dev_info = self.dev_model.lookup(dev_name) return { 'name': dev_name, 'type': e.attrib['type'], 'product': dev_info.get('product', None), 'vendor': dev_info.get('vendor', None), 'multifunction': dev_info.get('multifunction', None), 'vga3d': dev_info.get('vga3d', None), } raise NotFoundError('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name})
def update_mmio_guest(self, vmid, is_attaching): dom = VMModel.get_vm(vmid, self.conn) # get the number of 3D graphic cards already attached to the guest # based on this number we will decide if the memory size will be # increased or not counter = self._count_3D_devices_attached(dom) if counter == 0 and is_attaching: return size = 0 if is_attaching: # suppose this is the 3rd graphic card to be attached to the same # guest, counter will be 2+1 (2 existing + this attachment) times # 32G (0x80000000) size = hex((counter + 1) * WINDOW_SIZE_BAR) else: size = hex(counter * WINDOW_SIZE_BAR) # if the guest already has the xml file we will simply update the # value, otherwise we will add the new field new_xml = self._update_win_memory_size(dom, counter, size) if new_xml is None and is_attaching: new_xml = self._add_win_memory_size(dom, size) # update the XML if new_xml is not None: self.conn.get().defineXML(new_xml)
def get_disk_used_by(objstore, conn, path): try: with objstore as session: try: used_by = session.get('storagevolume', path)['used_by'] except (KeyError, NotFoundError): wok_log.info('Volume %s not found in obj store.' % path) used_by = [] # try to find this volume in existing vm vms_list = VMsModel.get_vms(conn) for vm in vms_list: dom = VMModel.get_vm(vm, conn) storages = get_vm_disks(dom) for disk in storages.keys(): d_info = get_vm_disk_info(dom, disk) if path == d_info['path']: used_by.append(vm) try: session.store('storagevolume', path, {'used_by': used_by}, get_kimchi_version()) except Exception as e: # Let the exception be raised. If we allow disks' # used_by to be out of sync, data corruption could # occour if a disk is added to two guests # unknowingly. wok_log.error( 'Unable to store storage volume id in' ' objectstore due error: %s', e.message) raise OperationFailed('KCHVOL0017E', {'err': e.message}) except Exception as e: # This exception is going to catch errors returned by 'with', # specially ones generated by 'session.store'. It is outside # to avoid conflict with the __exit__ function of 'with' raise OperationFailed('KCHVOL0017E', {'err': e.message}) return used_by
def update(self, vm, mac, params): dom = VMModel.get_vm(vm, self.conn) iface = self._get_vmiface(vm, mac) if iface is None: raise NotFoundError("KCHVMIF0001E", {'name': vm, 'iface': mac}) # cannot change mac address in a running system if DOM_STATE_MAP[dom.info()[0]] != "shutoff": raise InvalidOperation('KCHVMIF0011E') # mac address is a required parameter if 'mac' not in params: raise MissingParameter('KCHVMIF0008E') # new mac address must be unique if self._get_vmiface(vm, params['mac']) is not None: raise InvalidParameter('KCHVMIF0009E', {'name': vm, 'mac': params['mac']}) flags = 0 if dom.isPersistent(): flags |= libvirt.VIR_DOMAIN_AFFECT_CONFIG # remove the current nic xml = etree.tostring(iface) dom.detachDeviceFlags(xml, flags=flags) # add the nic with the desired mac address iface.mac.attrib['address'] = params['mac'] xml = etree.tostring(iface) dom.attachDeviceFlags(xml, flags=flags) return [vm, params['mac']]
def _create_task(self, cb, params): """Asynchronous function which actually creates the snapshot. Parameters: cb -- a callback function to signal the Task's progress. params -- a dict with the following values: "vm_name": the name of the VM where the snapshot will be created. "name": the snapshot name. """ vm_name = params['vm_name'] name = params['name'] cb('building snapshot XML') root_elem = E.domainsnapshot() root_elem.append(E.name(name)) xml = ET.tostring(root_elem, encoding='unicode') try: cb('fetching snapshot domain') vir_dom = VMModel.get_vm(vm_name, self.conn) cb('creating snapshot') vir_dom.snapshotCreateXML(xml, 0) except (NotFoundError, OperationFailed, libvirt.libvirtError) as e: raise OperationFailed('KCHSNAP0002E', { 'name': name, 'vm': vm_name, 'err': str(e) }) cb('OK', True)
def delete(self, vm_name, dev_name): try: bus_type = self.lookup(vm_name, dev_name)['bus'] dom = VMModel.get_vm(vm_name, self.conn) except NotFoundError: raise if (bus_type not in HOTPLUG_TYPE and DOM_STATE_MAP[dom.info()[0]] != 'shutoff'): raise InvalidOperation('KCHVMSTOR0011E') try: disk = get_device_node(dom, dev_name) path = get_vm_disk_info(dom, dev_name)['path'] if path is None or len(path) < 1: path = self.lookup(vm_name, dev_name)['path'] # This has to be done before it's detached. If it wasn't # in the obj store, its ref count would have been updated # by get_disk_used_by() if path is not None: used_by = get_disk_used_by(self.objstore, self.conn, path) else: wok_log.error("Unable to decrement volume used_by on" " delete because no path could be found.") dom.detachDeviceFlags(etree.tostring(disk), get_vm_config_flag(dom, 'all')) except Exception as e: raise OperationFailed("KCHVMSTOR0010E", {'error': e.message}) if used_by is not None and vm_name in used_by: used_by.remove(vm_name) set_disk_used_by(self.objstore, path, used_by) else: wok_log.error("Unable to update %s:%s used_by on delete." % (vm_name, dev_name))
def _get_available_bus_address(self, bus_type, vm_name): if bus_type not in ['ide']: return dict() # libvirt limitation of just 1 ide controller # each controller have at most 2 buses and each bus 2 units. dom = VMModel.get_vm(vm_name, self.conn) disks = self.get_list(vm_name) valid_id = [('0', '0'), ('0', '1'), ('1', '0'), ('1', '1')] controller_id = '0' for dev_name in disks: disk = get_device_node(dom, dev_name) if disk.target.attrib['bus'] == 'ide': controller_id = disk.address.attrib['controller'] bus_id = disk.address.attrib['bus'] unit_id = disk.address.attrib['unit'] if (bus_id, unit_id) in valid_id: valid_id.remove((bus_id, unit_id)) continue if not valid_id: raise OperationFailed('KCHVMSTOR0014E', {'type': 'ide', 'limit': 4}) else: address = {'controller': controller_id, 'bus': valid_id[0][0], 'unit': valid_id[0][1]} return dict(address=address)
def create(self, vm_name, params={}): """Create a snapshot with the current domain state. The VM must be stopped and contain only disks with format 'qcow2'; otherwise an exception will be raised. Parameters: vm_name -- the name of the VM where the snapshot will be created. params -- a dict with the following values: "name": The snapshot name (optional). If omitted, a default value based on the current time will be used. Return: A Task running the operation. """ vir_dom = VMModel.get_vm(vm_name, self.conn) if DOM_STATE_MAP[vir_dom.info()[0]] != u'shutoff': raise InvalidOperation('KCHSNAP0001E', {'vm': vm_name}) # if the VM has a non-CDROM disk with type 'raw', abort. for storage_name in self.vmstorages.get_list(vm_name): storage = self.vmstorage.lookup(vm_name, storage_name) type = storage['type'] format = storage['format'] if type != u'cdrom' and format != u'qcow2': raise InvalidOperation('KCHSNAP0010E', {'vm': vm_name, 'format': format}) name = params.get('name', unicode(int(time.time()))) task_params = {'vm_name': vm_name, 'name': name} taskid = add_task(u'/plugins/kimchi/vms/%s/snapshots/%s' % (vm_name, name), self._create_task, self.objstore, task_params) return self.task.lookup(taskid)
def delete(self, vmid, dev_name): dom = VMModel.get_vm(vmid, self.conn) xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: hostdev = root.devices.hostdev except AttributeError: raise NotFoundError('KCHVMHDEV0001E', {'vmid': vmid, 'dev_name': dev_name}) pci_devs = [(DeviceModel.deduce_dev_name(e, self.conn), e) for e in hostdev if e.attrib['type'] == 'pci'] for e in hostdev: if DeviceModel.deduce_dev_name(e, self.conn) == dev_name: xmlstr = etree.tostring(e) dom.detachDeviceFlags( xmlstr, get_vm_config_flag(dom, mode='all')) if e.attrib['type'] == 'pci': self._delete_affected_pci_devices(dom, dev_name, pci_devs) break else: raise NotFoundError('KCHVMHDEV0001E', {'vmid': vmid, 'dev_name': dev_name})
def delete(self, vmid, dev_name): dom = VMModel.get_vm(vmid, self.conn) xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: hostdev = root.devices.hostdev except AttributeError: raise NotFoundError('KCHVMHDEV0001E', {'vmid': vmid, 'dev_name': dev_name}) pci_devs = [(DeviceModel.deduce_dev_name(e, self.conn), e) for e in hostdev if e.attrib['type'] == 'pci'] dev_model = DeviceModel(conn=self.conn) dev_info = dev_model.lookup(dev_name) is_3D_device = dev_model.is_device_3D_controller(dev_info) if is_3D_device and DOM_STATE_MAP[dom.info()[0]] != "shutoff": raise InvalidOperation('KCHVMHDEV0006E', {'name': dev_info['name']}) for e in hostdev: if DeviceModel.deduce_dev_name(e, self.conn) == dev_name: xmlstr = etree.tostring(e) dom.detachDeviceFlags( xmlstr, get_vm_config_flag(dom, mode='all')) if e.attrib['type'] == 'pci': self._delete_affected_pci_devices(dom, dev_name, pci_devs) if is_3D_device: devsmodel = VMHostDevsModel(conn=self.conn) devsmodel.update_mmio_guest(vmid, False) break else: raise NotFoundError('KCHVMHDEV0001E', {'vmid': vmid, 'dev_name': dev_name})
def _create_task(self, cb, params): """Asynchronous function which actually creates the snapshot. Parameters: cb -- a callback function to signal the Task's progress. params -- a dict with the following values: "vm_name": the name of the VM where the snapshot will be created. "name": the snapshot name. """ vm_name = params['vm_name'] name = params['name'] cb('building snapshot XML') root_elem = E.domainsnapshot() root_elem.append(E.name(name)) xml = ET.tostring(root_elem, encoding='utf-8').decode('utf-8') try: cb('fetching snapshot domain') vir_dom = VMModel.get_vm(vm_name, self.conn) cb('creating snapshot') vir_dom.snapshotCreateXML(xml, 0) except (NotFoundError, OperationFailed, libvirt.libvirtError) as e: raise OperationFailed( 'KCHSNAP0002E', {'name': name, 'vm': vm_name, 'err': str(e)} ) cb('OK', True)
def delete(self, vmid, dev_name): dom = VMModel.get_vm(vmid, self.conn) xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: hostdev = root.devices.hostdev except AttributeError: raise NotFoundError('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name }) pci_devs = [(DeviceModel.deduce_dev_name(e, self.conn), e) for e in hostdev if e.attrib['type'] == 'pci'] for e in hostdev: if DeviceModel.deduce_dev_name(e, self.conn) == dev_name: xmlstr = etree.tostring(e) dom.detachDeviceFlags(xmlstr, get_vm_config_flag(dom, mode='all')) if e.attrib['type'] == 'pci': self._delete_affected_pci_devices(dom, dev_name, pci_devs) break else: raise NotFoundError('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name })
def get_disk_used_by(objstore, conn, path): try: with objstore as session: try: used_by = session.get("storagevolume", path)["used_by"] except (KeyError, NotFoundError): wok_log.info("Volume %s not found in obj store." % path) used_by = [] # try to find this volume in existing vm vms_list = VMsModel.get_vms(conn) for vm in vms_list: dom = VMModel.get_vm(vm, conn) storages = get_vm_disks(dom) for disk in storages.keys(): d_info = get_vm_disk_info(dom, disk) if path == d_info["path"]: used_by.append(vm) try: session.store("storagevolume", path, {"used_by": used_by}, get_kimchi_version()) except Exception as e: # Let the exception be raised. If we allow disks' # used_by to be out of sync, data corruption could # occour if a disk is added to two guests # unknowingly. wok_log.error("Unable to store storage volume id in" " objectstore due error: %s", e.message) raise OperationFailed("KCHVOL0017E", {"err": e.message}) except Exception as e: # This exception is going to catch errors returned by 'with', # specially ones generated by 'session.store'. It is outside # to avoid conflict with the __exit__ function of 'with' raise OperationFailed("KCHVOL0017E", {"err": e.message}) return used_by
def test_vm_livemigrate_transient(self): inst = model.Model(libvirt_uri='qemu:///system', objstore_loc=self.tmp_store) self.create_vm_test() with RollbackContext() as rollback: try: # removing cdrom because it is not shared storage and will make # the migration fail dev_list = self.inst.vmstorages_get_list('test_vm_migrate') self.inst.vmstorage_delete('test_vm_migrate', dev_list[0]) self.inst.vm_start('test_vm_migrate') # to make the VM transient, undefine it while it's running vm = VMModel.get_vm( 'test_vm_migrate', LibvirtConnection('qemu:///system') ) vm.undefine() task = inst.vm_migrate('test_vm_migrate', KIMCHI_LIVE_MIGRATION_TEST) inst.task_wait(task['id']) self.assertIn('test_vm_migrate', self.get_remote_vm_list()) remote_conn = self.get_remote_conn() rollback.prependDefer(remote_conn.close) remote_vm = remote_conn.lookupByName('test_vm_migrate') self.assertFalse(remote_vm.isPersistent()) remote_vm.destroy() except Exception, e: # Clean up here instead of rollback because if the # VM was turned transient and shut down it might # not exist already - rollback in this case will cause # a QEMU error vm = VMModel.get_vm( 'test_vm_migrate', LibvirtConnection('qemu:///system') ) if vm.isPersistent(): vm.undefine() vm.shutdown() self.fail('Migration test failed: %s' % e.message)
def get_list(self, vm_name): vir_dom = VMModel.get_vm(vm_name, self.conn) try: vir_snaps = vir_dom.listAllSnapshots(0) return sorted([s.getName().decode("utf-8") for s in vir_snaps], key=unicode.lower) except libvirt.libvirtError, e: raise OperationFailed("KCHSNAP0005E", {"vm": vm_name, "err": e.message})
def test_vm_livemigrate_RDMA(self, mock_migrate, mock_remote_conn, mock_precheck): mock_remote_conn.return_value = 'remote_conn' self.create_vm_test() try: # removing cdrom because it is not shared storage and will make # the migration fail dev_list = self.inst.vmstorages_get_list('test_vm_migrate') self.inst.vmstorage_delete('test_vm_migrate', dev_list[0]) self.inst.vm_start('test_vm_migrate') # to make the VM transient, undefine it while it's running vm = VMModel.get_vm( 'test_vm_migrate', LibvirtConnection('qemu:///system') ) vm.undefine() self.inst.vm_migrate('test_vm_migrate', KIMCHI_LIVE_MIGRATION_TEST, enable_rdma=True) flags = (libvirt.VIR_MIGRATE_PEER2PEER | libvirt.VIR_MIGRATE_LIVE | libvirt.VIR_MIGRATE_TUNNELLED) param_uri = 'rdma://' + KIMCHI_LIVE_MIGRATION_TEST mock_migrate.assert_called_once_with(vm, flags, param_uri) except Exception, e: # Clean up here instead of rollback because if the # VM was turned transient and shut down it might # not exist already - rollback in this case will cause # a QEMU error vm = VMModel.get_vm( 'test_vm_migrate', LibvirtConnection('qemu:///system') ) if vm.isPersistent(): vm.undefine() vm.shutdown() self.fail('Migration test failed: %s' % e.message)
def get_list(self, vm_name): vir_dom = VMModel.get_vm(vm_name, self.conn) try: vir_snaps = vir_dom.listAllSnapshots(0) return sorted([s.getName() for s in vir_snaps], key=str.lower) except libvirt.libvirtError as e: raise OperationFailed( 'KCHSNAP0005E', {'vm': vm_name, 'err': str(e)})
def get_list(self, vmid): dom = VMModel.get_vm(vmid, self.conn) xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: hostdev = root.devices.hostdev except AttributeError: return [] return [DeviceModel.deduce_dev_name(e, self.conn) for e in hostdev]
def lookup(self, vm_name): vir_dom = VMModel.get_vm(vm_name, self.conn) try: vir_snap = vir_dom.snapshotCurrent(0) snap_name = vir_snap.getName().decode("utf-8") except libvirt.libvirtError, e: if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN_SNAPSHOT: return {} raise OperationFailed("KCHSNAP0008E", {"vm": vm_name, "err": e.message})
def get_vmsnapshot(self, vm_name, name): vir_dom = VMModel.get_vm(vm_name, self.conn) try: return vir_dom.snapshotLookupByName(name, 0) except libvirt.libvirtError, e: code = e.get_error_code() if code == libvirt.VIR_ERR_NO_DOMAIN_SNAPSHOT: raise NotFoundError("KCHSNAP0003E", {"name": name, "vm": vm_name}) else: raise OperationFailed("KCHSNAP0004E", {"name": name, "vm": vm_name, "err": e.message})
def revert(self, vm_name, name): try: vir_dom = VMModel.get_vm(vm_name, self.conn) vir_snap = self.get_vmsnapshot(vm_name, name) vir_dom.revertToSnapshot(vir_snap, 0) # get vm name recorded in the snapshot and return new uri params vm_new_name = xpath_get_text(vir_snap.getXMLDesc(0), "domain/name")[0] return [vm_new_name, name] except libvirt.libvirtError, e: raise OperationFailed("KCHSNAP0009E", {"name": name, "vm": vm_name, "err": e.message})
def get_list(self, vm_name): vir_dom = VMModel.get_vm(vm_name, self.conn) try: vir_snaps = vir_dom.listAllSnapshots(0) return sorted([s.getName() for s in vir_snaps], key=str.lower) except libvirt.libvirtError as e: raise OperationFailed('KCHSNAP0005E', { 'vm': vm_name, 'err': str(e) })
def test_vm_livemigrate_RDMA(self, mock_migrate, mock_remote_conn, mock_precheck): mock_remote_conn.return_value = 'remote_conn' self.create_vm_test() try: # removing cdrom because it is not shared storage and will make # the migration fail dev_list = self.inst.vmstorages_get_list('test_vm_migrate') self.inst.vmstorage_delete('test_vm_migrate', dev_list[0]) self.inst.vm_start('test_vm_migrate') # to make the VM transient, undefine it while it's running vm = VMModel.get_vm('test_vm_migrate', LibvirtConnection('qemu:///system')) vm.undefine() self.inst.vm_migrate('test_vm_migrate', KIMCHI_LIVE_MIGRATION_TEST, enable_rdma=True) flags = (libvirt.VIR_MIGRATE_PEER2PEER | libvirt.VIR_MIGRATE_LIVE | libvirt.VIR_MIGRATE_TUNNELLED) param_uri = 'rdma://' + KIMCHI_LIVE_MIGRATION_TEST mock_migrate.assert_called_once_with(vm, flags, param_uri) except Exception as e: # Clean up here instead of rollback because if the # VM was turned transient and shut down it might # not exist already - rollback in this case will cause # a QEMU error vm = VMModel.get_vm('test_vm_migrate', LibvirtConnection('qemu:///system')) if vm.isPersistent(): vm.undefine() vm.shutdown() self.fail('Migration test failed: %s' % e.message)
def get_list(self, vm_name): vir_dom = VMModel.get_vm(vm_name, self.conn) try: vir_snaps = vir_dom.listAllSnapshots(0) return sorted([s.getName().decode('utf-8') for s in vir_snaps], key=unicode.lower) except libvirt.libvirtError, e: raise OperationFailed('KCHSNAP0005E', { 'vm': vm_name, 'err': e.message })
def get_disk_used_by(conn, path): used_by = [] # try to find this volume in existing vm vms_list = VMsModel.get_vms(conn) for vm in vms_list: dom = VMModel.get_vm(vm, conn) storages = get_vm_disks(dom) for disk in storages.keys(): d_info = get_vm_disk_info(dom, disk) if path == d_info['path']: used_by.append(vm) return used_by
def create(self, vm, params): conn = self.conn.get() networks = conn.listNetworks() + conn.listDefinedNetworks() networks = map(lambda x: x.decode('utf-8'), networks) if params['type'] == 'network': network = params.get("network") if network is None: raise MissingParameter('KCHVMIF0007E') if network not in networks: raise InvalidParameter('KCHVMIF0002E', { 'name': vm, 'network': network }) macs = (iface.mac.get('address') for iface in self.get_vmifaces(vm, self.conn)) # user defined customized mac address if 'mac' in params and params['mac']: # make sure it is unique if params['mac'] in macs: raise InvalidParameter('KCHVMIF0009E', { 'name': vm, 'mac': params['mac'] }) # otherwise choose a random mac address else: while True: params['mac'] = VMIfacesModel.random_mac() if params['mac'] not in macs: break dom = VMModel.get_vm(vm, self.conn) os_data = VMModel.vm_get_os_metadata(dom) os_version, os_distro = os_data xml = get_iface_xml(params, conn.getInfo()[0], os_distro, os_version) flags = 0 if dom.isPersistent(): flags |= libvirt.VIR_DOMAIN_AFFECT_CONFIG if DOM_STATE_MAP[dom.info()[0]] != "shutoff": flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE dom.attachDeviceFlags(xml, flags) return params['mac']
def delete(self, vm, mac): dom = VMModel.get_vm(vm, self.conn) iface = self._get_vmiface(vm, mac) if iface is None: raise NotFoundError("KCHVMIF0001E", {'name': vm, 'iface': mac}) flags = 0 if dom.isPersistent(): flags |= libvirt.VIR_DOMAIN_AFFECT_CONFIG if DOM_STATE_MAP[dom.info()[0]] != "shutoff": flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE dom.detachDeviceFlags(etree.tostring(iface), flags)
def handleVMShutdownPowerOff(self, vm_name): try: dom = VMModel.get_vm(vm_name, self.conn) cb_id = self.conn.get().domainEventRegisterAny( dom, libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE, self.event_vmshutdown_cb, None) self.vm_event_callbacks[vm_name] = cb_id except (libvirt.libvirtError, AttributeError) as e: if type(e) == AttributeError: reason = 'Libvirt service is not running' else: reason = e.message wok_log.error("Register of LIFECYCLE event failed: %s" % reason)
def revert(self, vm_name, name): try: vir_dom = VMModel.get_vm(vm_name, self.conn) vir_snap = self.get_vmsnapshot(vm_name, name) vir_dom.revertToSnapshot(vir_snap, 0) # get vm name recorded in the snapshot and return new uri params vm_new_name = xpath_get_text( vir_snap.getXMLDesc(0), 'domain/name')[0] return [vm_new_name, name] except libvirt.libvirtError as e: raise OperationFailed( 'KCHSNAP0009E', {'name': name, 'vm': vm_name, 'err': str(e)} )
def lookup(self, vm_name): vir_dom = VMModel.get_vm(vm_name, self.conn) try: vir_snap = vir_dom.snapshotCurrent(0) snap_name = vir_snap.getName() except libvirt.libvirtError as e: if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN_SNAPSHOT: return {} raise OperationFailed( 'KCHSNAP0008E', {'vm': vm_name, 'err': str(e)}) return self.vmsnapshot.lookup(vm_name, snap_name)
def lookup(self, vm_name): vir_dom = VMModel.get_vm(vm_name, self.conn) try: vir_snap = vir_dom.snapshotCurrent(0) snap_name = vir_snap.getName().decode('utf-8') except libvirt.libvirtError, e: if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN_SNAPSHOT: return {} raise OperationFailed('KCHSNAP0008E', { 'vm': vm_name, 'err': e.message })
def get_vmsnapshot(self, vm_name, name): vir_dom = VMModel.get_vm(vm_name, self.conn) try: return vir_dom.snapshotLookupByName(name, 0) except libvirt.libvirtError as e: code = e.get_error_code() if code == libvirt.VIR_ERR_NO_DOMAIN_SNAPSHOT: raise NotFoundError( 'KCHSNAP0003E', {'name': name, 'vm': vm_name}) else: raise OperationFailed( 'KCHSNAP0004E', {'name': name, 'vm': vm_name, 'err': str(e)} )
def revert(self, vm_name, name): try: vir_dom = VMModel.get_vm(vm_name, self.conn) vir_snap = self.get_vmsnapshot(vm_name, name) vir_dom.revertToSnapshot(vir_snap, 0) # get vm name recorded in the snapshot and return new uri params vm_new_name = xpath_get_text(vir_snap.getXMLDesc(0), 'domain/name')[0] return [vm_new_name, name] except libvirt.libvirtError as e: raise OperationFailed('KCHSNAP0009E', { 'name': name, 'vm': vm_name, 'err': str(e) })
def create(self, vm, params): conn = self.conn.get() networks = conn.listNetworks() + conn.listDefinedNetworks() networks = map(lambda x: x.decode('utf-8'), networks) if params['type'] == 'network': network = params.get("network") if network is None: raise MissingParameter('KCHVMIF0007E') if network not in networks: raise InvalidParameter('KCHVMIF0002E', {'name': vm, 'network': network}) macs = (iface.mac.get('address') for iface in self.get_vmifaces(vm, self.conn)) # user defined customized mac address if 'mac' in params and params['mac']: # make sure it is unique if params['mac'] in macs: raise InvalidParameter('KCHVMIF0009E', {'name': vm, 'mac': params['mac']}) # otherwise choose a random mac address else: while True: params['mac'] = VMIfacesModel.random_mac() if params['mac'] not in macs: break dom = VMModel.get_vm(vm, self.conn) os_data = VMModel.vm_get_os_metadata(dom) os_version, os_distro = os_data xml = get_iface_xml(params, conn.getInfo()[0], os_distro, os_version) flags = 0 if dom.isPersistent(): flags |= libvirt.VIR_DOMAIN_AFFECT_CONFIG if DOM_STATE_MAP[dom.info()[0]] != "shutoff": flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE dom.attachDeviceFlags(xml, flags) return params['mac']
def handleVMShutdownPowerOff(self, vm_name): try: dom = VMModel.get_vm(vm_name, self.conn) cb_id = self.conn.get().domainEventRegisterAny( dom, libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE, self.event_vmshutdown_cb, None ) self.vm_event_callbacks[vm_name] = cb_id except (libvirt.libvirtError, AttributeError) as e: if type(e) == AttributeError: reason = 'Libvirt service is not running' else: reason = e.message wok_log.error("Register of LIFECYCLE event failed: %s" % reason)
def get_vmsnapshot(self, vm_name, name): vir_dom = VMModel.get_vm(vm_name, self.conn) try: return vir_dom.snapshotLookupByName(name, 0) except libvirt.libvirtError as e: code = e.get_error_code() if code == libvirt.VIR_ERR_NO_DOMAIN_SNAPSHOT: raise NotFoundError('KCHSNAP0003E', { 'name': name, 'vm': vm_name }) else: raise OperationFailed('KCHSNAP0004E', { 'name': name, 'vm': vm_name, 'err': str(e) })
def have_usb_controller(self, vmid): dom = VMModel.get_vm(vmid, self.conn) root = objectify.fromstring(dom.XMLDesc(0)) try: controllers = root.devices.controller except AttributeError: return False for controller in controllers: if 'model' not in controller.attrib: continue if (controller.attrib['type'] == 'usb' and controller.attrib['model'] in USB_MODELS_PCI_HOTPLUG): return True return False
def update(self, vm_name, dev_name, params): old_disk_used_by = None new_disk_used_by = None dom = VMModel.get_vm(vm_name, self.conn) dev_info = self.lookup(vm_name, dev_name) if dev_info['type'] != 'cdrom': raise InvalidOperation("KCHVMSTOR0006E") params['path'] = params.get('path', '') old_disk_path = dev_info['path'] new_disk_path = params['path'] if new_disk_path != old_disk_path: # An empty path means a CD-ROM was empty or ejected: if old_disk_path is not '': old_disk_used_by = get_disk_used_by( self.objstore, self.conn, old_disk_path) if new_disk_path is not '': new_disk_used_by = get_disk_used_by( self.objstore, self.conn, new_disk_path) dev_info.update(params) dev, xml = get_disk_xml(dev_info) try: dom.updateDeviceFlags(xml, get_vm_config_flag(dom, 'all')) except Exception as e: raise OperationFailed("KCHVMSTOR0009E", {'error': e.message}) try: if old_disk_used_by is not None and \ vm_name in old_disk_used_by: old_disk_used_by.remove(vm_name) set_disk_used_by(self.objstore, old_disk_path, old_disk_used_by) if new_disk_used_by is not None: new_disk_used_by.append(vm_name) set_disk_used_by(self.objstore, new_disk_path, new_disk_used_by) except Exception as e: wok_log.error("Unable to update dev used_by on update due to" " %s:" % e.message) return dev
def _attach_scsi_device(self, cb, params): cb('Attaching SCSI device...') self._cb = cb vmid = params['vmid'] dev_info = params['dev_info'] lock = params['lock'] try: self._passthrough_device_validate(dev_info['name']) except InvalidParameter as e: cb(str(e), False) raise with lock: dom = VMModel.get_vm(vmid, self.conn) with RollbackContext() as rollback: xmlstr = self._get_scsi_device_xml(dev_info) device_flags = get_vm_config_flag(dom, mode='all') try: cb('Attaching device to VM') dom.attachDeviceFlags(xmlstr, device_flags) except libvirt.libvirtError: msg = WokMessage( 'KCHVMHDEV0007E', { 'device': dev_info['name'], 'vm': vmid} ) cb(msg.get_text(), False) wok_log.error( 'Failed to attach host device %s to VM %s: \n%s', dev_info['name'], vmid, xmlstr, ) raise rollback.prependDefer( dom.detachDeviceFlags, xmlstr, device_flags) rollback.commitAll() if DOM_STATE_MAP[dom.info()[0]] == 'shutoff': cb('OK', True)
def _get_unavailable_devices(self): vm_list = VMsModel.get_vms(self.conn) unavailable_devs = [] for vm in vm_list: dom = VMModel.get_vm(vm, self.conn) xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: hostDevices = root.devices.hostdev except AttributeError: continue vm_devs = [DeviceModel.deduce_dev_name(e, self.conn) for e in hostDevices] for dev in vm_devs: unavailable_devs.append(dev) return unavailable_devs
def _attach_usb_device(self, cb, params): cb('Attaching USB device...') vmid = params['vmid'] dev_info = params['dev_info'] dom = VMModel.get_vm(vmid, self.conn) with RollbackContext() as rollback: cb('Reading source device XML') xmlstr = self._get_usb_device_xml(dev_info) device_flags = get_vm_config_flag(dom, mode='all') try: cb('Attaching device to VM') dom.attachDeviceFlags(xmlstr, device_flags) except libvirt.libvirtError: wok_log.error('Failed to attach host device %s to VM %s: \n%s', dev_info['name'], vmid, xmlstr) raise rollback.prependDefer(dom.detachDeviceFlags, xmlstr, device_flags) rollback.commitAll() cb('OK', True)
def create(self, vm_name, params=None): """Create a snapshot with the current domain state. The VM must be stopped and contain only disks with format 'qcow2'; otherwise an exception will be raised. Parameters: vm_name -- the name of the VM where the snapshot will be created. params -- a dict with the following values: "name": The snapshot name (optional). If omitted, a default value based on the current time will be used. Return: A Task running the operation. """ if params is None: params = {} vir_dom = VMModel.get_vm(vm_name, self.conn) if DOM_STATE_MAP[vir_dom.info()[0]] != u'shutoff': raise InvalidOperation('KCHSNAP0001E', {'vm': vm_name}) # if the VM has a non-CDROM disk with type 'raw', abort. for storage_name in self.vmstorages.get_list(vm_name): storage = self.vmstorage.lookup(vm_name, storage_name) type = storage['type'] format = storage['format'] if type != u'cdrom' and format != u'qcow2': raise InvalidOperation('KCHSNAP0010E', { 'vm': vm_name, 'format': format }) name = params.get('name', unicode(int(time.time()))) task_params = {'vm_name': vm_name, 'name': name} taskid = add_task( u'/plugins/kimchi/vms/%s/snapshots/%s' % (vm_name, name), self._create_task, self.objstore, task_params) return self.task.lookup(taskid)