def get_disk_ref_cnt(objstore, conn, path): try: with objstore as session: try: ref_cnt = session.get('storagevolume', path)['ref_cnt'] except NotFoundError: kimchi_log.info('Volume %s not found in obj store.' % path) ref_cnt = 0 # try to find this volume in existing vm vms_list = VMsModel.get_vms(conn) for vm in vms_list: dom = VMModel.get_vm(vm, conn) storages = get_vm_disks(dom) for disk in storages.keys(): d_info = get_vm_disk_info(dom, disk) if path == d_info['path']: ref_cnt = ref_cnt + 1 try: session.store('storagevolume', path, {'ref_cnt': ref_cnt}) except Exception as e: # Let the exception be raised. If we allow disks' # ref_cnts to be out of sync, data corruption could # occour if a disk is added to two guests # unknowingly. kimchi_log.error( 'Unable to store storage volume id in' ' objectstore due error: %s', e.message) raise OperationFailed('KCHVOL0017E', {'err': e.message}) except Exception as e: # This exception is going to catch errors returned by 'with', # specially ones generated by 'session.store'. It is outside # to avoid conflict with the __exit__ function of 'with' raise OperationFailed('KCHVOL0017E', {'err': e.message}) return ref_cnt
def __exit__(self, type, value, tb): self._lock.release() if type is not None and issubclass(type, sqlite3.DatabaseError): # Logs the error and return False, which makes __exit__ raise # exception again kimchi_log.error(traceback.format_exc()) return False
def start(self, params=None): cmd = ['systemctl', 'start', 'sepctl'] output, error, rc = run_command(cmd) if rc != 0: kimchi_log.error('SEP service initialization error: %s - %s - %s' % (cmd, rc, error)) raise OperationFailed('GINSEP0008E', {'error': error})
def parse_hdds(temperature_unit): # hddtemp will strangely convert a non-number (see error case # below) to 32 deg F. So just always ask for C and convert later. out, error, rc = run_command(['hddtemp']) if rc: kimchi_log.error("Error retrieving HD temperatures: rc %s." "output: %s" % (rc, error)) return None hdds = OrderedDict() for hdd in out.splitlines(): hdd_name = '' hdd_temp = 0.0 try: hdd_items = hdd.split(':') hdd_name, hdd_temp = hdd_items[0], hdd_items[2] hdd_temp = re.sub('°[C|F]', '', hdd_temp).strip() except Exception as e: kimchi_log.error('Sensors hdd parse error: %s' % e.message) continue try: # Try to convert the number to a float. If it fails, # don't add this disk to the list. hdd_temp = float(hdd_temp) if(temperature_unit == 'F'): hdd_temp = 9.0/5.0 * hdd_temp + 32 hdds[hdd_name] = hdd_temp except ValueError: # If no sensor data, parse float will fail. For example: # "/dev/sda: IBM IPR-10 5D831200: S.M.A.R.T. not available" kimchi_log.warning("Sensors hdd: %s" % hdd) hdds['unit'] = temperature_unit return hdds
def stop(self, params=None): cmd = ['systemctl', 'stop', 'sepctl'] output, error, rc = run_command(cmd) if rc != 0: kimchi_log.error('Error stopping SEP service: %s - %s - %s' % (cmd, rc, error)) raise OperationFailed('GINSEP0009E', {'error': error})
def get_iommu_group(dev_info): # Find out the iommu group of a given device. # Child device belongs to the same iommu group as the parent device. try: return dev_info['iommuGroup'] except KeyError: pass parent = dev_info['parent'] while parent is not None: try: parent_info = dev_dict[parent] except KeyError: kimchi_log.error("Parent %s of device %s does not exist", parent, dev_info['name']) break try: iommuGroup = parent_info['iommuGroup'] except KeyError: pass else: return iommuGroup parent = parent_info['parent'] return None
def parse_hdds(temperature_unit): # hddtemp will strangely convert a non-number (see error case # below) to 32 deg F. So just always ask for C and convert later. out, error, rc = run_command(['hddtemp']) if rc: kimchi_log.error("Error retrieving HD temperatures: rc %s." "output: %s" % (rc, error)) return None hdds = OrderedDict() for hdd in out.splitlines(): hdd_name = '' hdd_temp = 0.0 try: hdd_items = hdd.split(':') hdd_name, hdd_temp = hdd_items[0], hdd_items[2] hdd_temp = re.sub('°[C|F]', '', hdd_temp).strip() except Exception as e: kimchi_log.error('Sensors hdd parse error: %s' % e.message) continue try: # Try to convert the number to a float. If it fails, # don't add this disk to the list. hdd_temp = float(hdd_temp) if (temperature_unit == 'F'): hdd_temp = 9.0 / 5.0 * hdd_temp + 32 hdds[hdd_name] = hdd_temp except ValueError: # If no sensor data, parse float will fail. For example: # "/dev/sda: IBM IPR-10 5D831200: S.M.A.R.T. not available" kimchi_log.warning("Sensors hdd: %s" % hdd) hdds['unit'] = temperature_unit return hdds
def delete(self, name): conn = self.conn.get() dom = self.get_vm(name, self.conn) self._vmscreenshot_delete(dom.UUIDString()) paths = self._vm_get_disk_paths(dom) info = self.lookup(name) if info['state'] == 'running': self.poweroff(name) try: dom.undefine() except libvirt.libvirtError as e: raise OperationFailed("KCHVM0021E", {'name': name, 'err': e.get_error_message()}) for path in paths: vol = conn.storageVolLookupByPath(path) pool = vol.storagePoolLookupByVolume() xml = pool.XMLDesc(0) pool_type = xpath_get_text(xml, "/pool/@type")[0] if pool_type not in READONLY_POOL_TYPE: vol.delete(0) try: with self.objstore as session: session.delete('vm', dom.UUIDString(), ignore_missing=True) except Exception as e: # It is possible to delete vm without delete its database info kimchi_log.error('Error deleting vm information from database: ' '%s', e.message) vnc.remove_proxy_token(name)
def _default_network_check(self): def create_defautl_network(): try: subnet = self._get_available_address(knetwork.DefaultNetsPool) params = {"name": "default", "connection": "nat", "subnet": subnet} self.create(params) return conn.networkLookupByName("default") except Exception as e: kimchi_log.error("Fatal: Cannot create default network " "because of %s, exit kimchid", e.message) sys.exit(1) conn = self.conn.get() try: net = conn.networkLookupByName("default") except libvirt.libvirtError: net = create_defautl_network() if net.isActive() == 0: try: net.create() except libvirt.libvirtError as e: # FIXME we can not distinguish this error from other internal # error by error code. if ("network is already in use by interface" in e.message.lower()): # libvirt do not support update IP element, so delete the # the network and create new one. net.undefine() create_defautl_network() else: kimchi_log.error("Fatal: Cannot activate default network " "because of %s, exit kimchid", e.message) sys.exit(1)
def _get_subscriber(self): activation_info = [] entry = {} cmd = ['/opt/ibm/seprovider/bin/getSubscriber'] output, error, rc = run_command(cmd) # no subscriber: return empty if rc == 1: return activation_info # error: report if rc != 0: kimchi_log.error('SEP execution error: %s - %s - %s' % (cmd, rc, error)) raise OperationFailed('GINSEP0007E') if len(output) > 1: # iterate over lines and parse to dict for line in output.splitlines(): if len(line) > 0: entry = SUBSCRIBER.search(line).groupdict() activation_info.append(entry["hostname"]) return activation_info
def get_dev_info(node_dev): ''' Parse the node device XML string into dict according to http://libvirt.org/formatnode.html. scsi_generic is not documented in libvirt official website. Try to parse scsi_generic according to the following libvirt path series. https://www.redhat.com/archives/libvir-list/2013-June/msg00014.html scsi_target is not documented in libvirt official website. Try to parse scsi_target according to the libvirt commit db19834a0a. ''' xmlstr = node_dev.XMLDesc(0) info = dictize(xmlstr)['device'] dev_type = info['capability'].pop('type') info['device_type'] = dev_type cap_dict = info.pop('capability') info.update(cap_dict) # parent device not found: set as None info["parent"] = info.get("parent") if dev_type in ('scsi', 'scsi_generic', 'scsi_target', 'system', 'usb'): return info if dev_type in ('net', 'pci', 'scsi_host', 'storage', 'usb_device'): return globals()['_get_%s_dev_info' % dev_type](info) kimchi_log.error("Unknown device type: %s", dev_type) return info
def wrapper(*args, **kwargs): try: ret = f(*args, **kwargs) if isinstance(ret, self.wrappables): for name in dir(ret): method = getattr(ret, name) if callable(method) and not name.startswith('_'): setattr(ret, name, wrapMethod(method)) return ret except libvirt.libvirtError as e: edom = e.get_error_domain() ecode = e.get_error_code() EDOMAINS = (libvirt.VIR_FROM_REMOTE, libvirt.VIR_FROM_RPC) ECODES = (libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_ERR_INTERNAL_ERROR, libvirt.VIR_ERR_NO_CONNECT, libvirt.VIR_ERR_INVALID_CONN) if edom in EDOMAINS and ecode in ECODES: kimchi_log.error('Connection to libvirt broken. ' 'Recycling. ecode: %d edom: %d' % (ecode, edom)) with self._connectionLock: self._connections[conn_id] = None raise
def lookup(self, subscription): """ Returns a dictionary with all SEP information. """ cmd = ['/opt/ibm/seprovider/bin/getSubscriber'] output, error, rc = run_command(cmd) # error: report if rc != 0: kimchi_log.error('SEP execution error: %s - %s - %s' % (cmd, rc, error)) raise OperationFailed('GINSEP0005E', {'error': error}) if len(output) > 1: # iterate over lines and parse to dict for line in output.splitlines(): if len(line) > 0: entry = SUBSCRIBER.search(line).groupdict() # subscriber found if entry["hostname"] == subscription: return entry raise NotFoundError("GINSEP0006E", {'hostname': subscription})
def _default_network_check(self): def create_default_network(): try: subnet = self._get_available_address(knetwork.DefaultNetsPool) params = {"name": "default", "connection": "nat", "subnet": subnet} self.create(params) return conn.networkLookupByName("default") except Exception as e: kimchi_log.error("Fatal: Cannot create default network " "because of %s, exit kimchid", e.message) sys.exit(1) conn = self.conn.get() try: net = conn.networkLookupByName("default") except libvirt.libvirtError: net = create_default_network() if net.isActive() == 0: try: net.create() except libvirt.libvirtError as e: # FIXME we can not distinguish this error from other internal # error by error code. if ("network is already in use by interface" in e.message.lower()): # libvirt do not support update IP element, so delete the # the network and create new one. net.undefine() create_default_network() else: kimchi_log.error("Fatal: Cannot activate default network " "because of %s, exit kimchid", e.message) sys.exit(1)
def _update_lvm_disks(self, pool_name, disks): # check if all the disks/partitions exists in the host for disk in disks: lsblk_cmd = ['lsblk', disk] output, error, returncode = run_command(lsblk_cmd) if returncode != 0: kimchi_log.error( '%s is not a valid disk/partition. Could not ' 'add it to the pool %s.', disk, pool_name) raise OperationFailed('KCHPOOL0027E', { 'disk': disk, 'pool': pool_name }) # add disks to the lvm pool using vgextend + virsh refresh vgextend_cmd = ["vgextend", pool_name] vgextend_cmd += disks output, error, returncode = run_command(vgextend_cmd) if returncode != 0: msg = "Could not add disks to pool %s, error: %s" kimchi_log.error(msg, pool_name, error) raise OperationFailed('KCHPOOL0028E', { 'pool': pool_name, 'err': error }) # refreshing pool state pool = self.get_storagepool(pool_name, self.conn) if pool.isActive(): pool.refresh(0)
def update(self, name, params): """ Update/add a subscription machine at IBM SEP tool. """ # check if the hostname to update is the same of the current # subscription and unsubscribe it if not - we are working with # only one subscription at the moment. if ((self._activation_info['hostname'] != '') and (params['hostname'] != self._activation_info['hostname'])): cmd = ['/opt/ibm/seprovider/bin/unsubscribe', '-h', self._activation_info['hostname']] output, error, rc = run_command(cmd) if rc != 0: kimchi_log.error('SEP execution error: %s - %s - %s' % (cmd, rc, error)) raise OperationFailed('GINSEP0004E', {'cmd': cmd, 'rc': rc, 'error': error}) # update the current subscription info, or add a new one. cmd = ['/opt/ibm/seprovider/bin/subscribe', '-h', params['hostname'], '-p', params['port'], '-c', params['community']] output, error, rc = run_command(cmd) if rc != 0: kimchi_log.error('SEP execution error: %s - %s - %s' % (cmd, rc, error)) raise OperationFailed('GINSEP0004E', {'cmd': cmd, 'rc': rc, 'error': error}) self._sep_status = 'running'
def update(self, name, params): if detect_live_vm(): kimchi_log.error('Cannot update system fw while running VMs.') raise OperationFailed('GINFW0001E') fw_path = params['path'] pow_ok = params.get('overwrite-perm-ok', True) # First unpack the rpm to get the fw img file # FIXME: When there's a .deb package available, add support for that command = ['rpm', '-U', '--force', '--ignoreos', fw_path] output, error, rc = run_command(command) if rc: # rpm returns num failed pkgs on failure or neg for unknown raise OperationFailed('GINFW0002E', {'rc': rc, 'err': error}) # The image file should now be in /tmp/fwupdate/ # and match the rpm name. image_file, ext = os.path.splitext(os.path.basename(fw_path)) if image_file is None: kimchi_log.error('FW update failed: ' 'No image file found in the package file.') raise OperationFailed('GINFW0003E') command = ['update_flash', '-f', os.path.join('/tmp/fwupdate', '%s.img' % image_file)] if not pow_ok: command.insert(1, '-n') kimchi_log.info('FW update: System will reboot to flash the firmware.') output, error, rc = run_command(command) if rc: raise OperationFailed('GINFW0004E', {'rc': rc})
def update(self, name, params): if detect_live_vm(): kimchi_log.error('Cannot update system fw while running VMs.') raise OperationFailed('GINFW0001E') fw_path = params['path'] pow_ok = params.get('overwrite-perm-ok', True) # First unpack the rpm to get the fw img file # FIXME: When there's a .deb package available, add support for that command = ['rpm', '-U', '--force', '--ignoreos', fw_path] output, error, rc = run_command(command) if rc: # rpm returns num failed pkgs on failure or neg for unknown raise OperationFailed('GINFW0002E', {'rc': rc, 'err': error}) # The image file should now be in /tmp/fwupdate/ # and match the rpm name. image_file, ext = os.path.splitext(os.path.basename(fw_path)) if image_file is None: kimchi_log.error('FW update failed: ' 'No image file found in the package file.') raise OperationFailed('GINFW0003E') command = [ 'update_flash', '-f', os.path.join('/tmp/fwupdate', '%s.img' % image_file) ] if not pow_ok: command.insert(1, '-n') kimchi_log.info('FW update: System will reboot to flash the firmware.') output, error, rc = run_command(command) if rc: raise OperationFailed('GINFW0004E', {'rc': rc})
def delete(self, name): conn = self.conn.get() dom = self.get_vm(name, self.conn) self._vmscreenshot_delete(dom.UUIDString()) paths = self._vm_get_disk_paths(dom) info = self.lookup(name) if info['state'] == 'running': self.poweroff(name) try: dom.undefine() except libvirt.libvirtError as e: raise OperationFailed("KCHVM0021E", { 'name': name, 'err': e.get_error_message() }) for path in paths: vol = conn.storageVolLookupByPath(path) pool = vol.storagePoolLookupByVolume() xml = pool.XMLDesc(0) pool_type = xmlutils.xpath_get_text(xml, "/pool/@type")[0] if pool_type not in READONLY_POOL_TYPE: vol.delete(0) try: with self.objstore as session: session.delete('vm', dom.UUIDString(), ignore_missing=True) except Exception as e: # It is possible to delete vm without delete its database info kimchi_log.error( 'Error deleting vm information from database: ' '%s', e.message) vnc.remove_proxy_token(name)
def delete(self, vm_name, dev_name): try: bus_type = self.lookup(vm_name, dev_name)['bus'] dom = VMModel.get_vm(vm_name, self.conn) except NotFoundError: raise if (bus_type not in HOTPLUG_TYPE and DOM_STATE_MAP[dom.info()[0]] != 'shutoff'): raise InvalidOperation('KCHVMSTOR0011E') try: disk = get_device_node(dom, dev_name) path = get_vm_disk_info(dom, dev_name)['path'] if path is None or len(path) < 1: path = self.lookup(vm_name, dev_name)['path'] # This has to be done before it's detached. If it wasn't # in the obj store, its ref count would have been updated # by get_disk_used_by() if path is not None: used_by = get_disk_used_by(self.objstore, self.conn, path) else: kimchi_log.error("Unable to decrement volume used_by on" " delete because no path could be found.") dom.detachDeviceFlags(etree.tostring(disk), get_vm_config_flag(dom, 'all')) except Exception as e: raise OperationFailed("KCHVMSTOR0010E", {'error': e.message}) if used_by is not None and vm_name in used_by: used_by.remove(vm_name) set_disk_used_by(self.objstore, path, used_by) else: kimchi_log.error("Unable to update %s:%s used_by on delete." % (vm_name, dev_name))
def _peer_deregister(self): cmd = ["slptool", "deregister", "service:kimchid://%s" % self.url] out, error, ret = run_command(cmd) if out and len(out) != 0: kimchi_log.error("Unable to deregister server on openSLP." " Details: %s" % out)
def _generate_thumbnail(self): thumbnail = os.path.join(config.get_screenshot_path(), '%s-%s.png' % (self.vm_uuid, str(uuid.uuid4()))) self._get_test_result() if stream_test_result is None: self._watch_stream_creation(thumbnail) elif stream_test_result: try: self._generate_scratch(thumbnail) except: kimchi_log.error("screenshot_creation: Unable to create " "screenshot image %s." % thumbnail) else: self._create_black_image(thumbnail) if os.path.getsize(thumbnail) == 0: self._create_black_image(thumbnail) else: im = Image.open(thumbnail) try: # Prevent Image lib from lazy load, # work around pic truncate validation in thumbnail generation im.thumbnail(self.THUMBNAIL_SIZE) except Exception as e: kimchi_log.warning("Image load with warning: %s." % e) im.save(thumbnail, "PNG") self.info['thumbnail'] = thumbnail
def _generate_thumbnail(self): thumbnail = os.path.join( config.get_screenshot_path(), '%s-%s.png' % (self.vm_uuid, str(uuid.uuid4()))) self._get_test_result() if stream_test_result is None: self._watch_stream_creation(thumbnail) elif stream_test_result: try: self._generate_scratch(thumbnail) except: kimchi_log.error("screenshot_creation: Unable to create " "screenshot image %s." % thumbnail) else: self._create_black_image(thumbnail) if os.path.getsize(thumbnail) == 0: self._create_black_image(thumbnail) else: im = Image.open(thumbnail) try: # Prevent Image lib from lazy load, # work around pic truncate validation in thumbnail generation im.thumbnail(self.THUMBNAIL_SIZE) except Exception as e: kimchi_log.warning("Image load with warning: %s." % e) im.save(thumbnail, "PNG") self.info['thumbnail'] = thumbnail
def sosreport_generate(cb, name): try: command = ['sosreport', '--batch', '--name=%s' % name] output, error, retcode = run_command(command) if retcode < 0: raise OperationFailed("KCHDR0003E", {'name': name, 'err': retcode}) elif retcode > 0: raise OperationFailed("KCHDR0003E", {'name': name, 'err': retcode}) # SOSREPORT might create file in /tmp or /var/tmp # FIXME: The right way should be passing the tar.xz file directory # though the parameter '--tmp-dir', but it is failing in Fedora 20 patterns = ['/tmp/sosreport-%s-*', '/var/tmp/sosreport-%s-*'] reports = [] reportFile = None for p in patterns: reports = reports + [f for f in glob.glob(p % name)] for f in reports: if not fnmatch.fnmatch(f, '*.md5'): reportFile = f break # Some error in sosreport happened if reportFile is None: kimchi_log.error('Debug report file not found. See sosreport ' 'output for detail:\n%s', output) fname = (patterns[0] % name).split('/')[-1] raise OperationFailed('KCHDR0004E', {'name': fname}) md5_report_file = reportFile + '.md5' report_file_extension = '.' + reportFile.split('.', 1)[1] path = config.get_debugreports_path() target = os.path.join(path, name + report_file_extension) # Moving report msg = 'Moving debug report file "%s" to "%s"' % (reportFile, target) kimchi_log.info(msg) shutil.move(reportFile, target) # Deleting md5 msg = 'Deleting report md5 file: "%s"' % (md5_report_file) kimchi_log.info(msg) md5 = open(md5_report_file).read().strip() kimchi_log.info('Md5 file content: "%s"', md5) os.remove(md5_report_file) cb('OK', True) return except OSError: raise except Exception, e: # No need to call cb to update the task status here. # The task object will catch the exception rasied here # and update the task status there log = logging.getLogger('Model') log.warning('Exception in generating debug file: %s', e) raise OperationFailed("KCHDR0005E", {'name': name, 'err': e})
def _attach_pci_device(self, vmid, dev_info): self._validate_pci_passthrough_env() dom = VMModel.get_vm(vmid, self.conn) # Due to libvirt limitation, we don't support live assigne device to # vfio driver. driver = ('vfio' if DOM_STATE_MAP[dom.info()[0]] == "shutoff" and self.caps.kernel_vfio else 'kvm') # on powerkvm systems it must be vfio driver. distro, _, _ = platform.linux_distribution() if distro == 'IBM_PowerKVM': driver = 'vfio' # Attach all PCI devices in the same IOMMU group dev_model = DeviceModel(conn=self.conn) devs_model = DevicesModel(conn=self.conn) affected_names = devs_model.get_list( _passthrough_affected_by=dev_info['name']) passthrough_names = devs_model.get_list(_cap='pci', _passthrough='true') group_names = list(set(affected_names) & set(passthrough_names)) pci_infos = [dev_model.lookup(dev_name) for dev_name in group_names] pci_infos.append(dev_info) # all devices in the group that is going to be attached to the vm # must be detached from the host first with RollbackContext() as rollback: for pci_info in pci_infos: try: dev = self.conn.get().nodeDeviceLookupByName( pci_info['name']) dev.dettach() except Exception: raise OperationFailed('KCHVMHDEV0005E', {'name': pci_info['name']}) else: rollback.prependDefer(dev.reAttach) rollback.commitAll() device_flags = get_vm_config_flag(dom, mode='all') with RollbackContext() as rollback: for pci_info in pci_infos: pci_info['detach_driver'] = driver xmlstr = self._get_pci_device_xml(pci_info) try: dom.attachDeviceFlags(xmlstr, device_flags) except libvirt.libvirtError: kimchi_log.error( 'Failed to attach host device %s to VM %s: \n%s', pci_info['name'], vmid, xmlstr) raise rollback.prependDefer(dom.detachDeviceFlags, xmlstr, device_flags) rollback.commitAll() return dev_info['name']
def _attach_pci_device(self, vmid, dev_info): self._validate_pci_passthrough_env() dom = VMModel.get_vm(vmid, self.conn) # Due to libvirt limitation, we don't support live assigne device to # vfio driver. driver = ('vfio' if DOM_STATE_MAP[dom.info()[0]] == "shutoff" and self.caps.kernel_vfio else 'kvm') # on powerkvm systems it must be vfio driver. distro, _, _ = platform.linux_distribution() if distro == 'IBM_PowerKVM': driver = 'vfio' # Attach all PCI devices in the same IOMMU group dev_model = DeviceModel(conn=self.conn) devs_model = DevicesModel(conn=self.conn) affected_names = devs_model.get_list( _passthrough_affected_by=dev_info['name']) passthrough_names = devs_model.get_list( _cap='pci', _passthrough='true') group_names = list(set(affected_names) & set(passthrough_names)) pci_infos = [dev_model.lookup(dev_name) for dev_name in group_names] pci_infos.append(dev_info) # all devices in the group that is going to be attached to the vm # must be detached from the host first with RollbackContext() as rollback: for pci_info in pci_infos: try: dev = self.conn.get().nodeDeviceLookupByName( pci_info['name']) dev.dettach() except Exception: raise OperationFailed('KCHVMHDEV0005E', {'name': pci_info['name']}) else: rollback.prependDefer(dev.reAttach) rollback.commitAll() device_flags = get_vm_config_flag(dom, mode='all') with RollbackContext() as rollback: for pci_info in pci_infos: pci_info['detach_driver'] = driver xmlstr = self._get_pci_device_xml(pci_info) try: dom.attachDeviceFlags(xmlstr, device_flags) except libvirt.libvirtError: kimchi_log.error( 'Failed to attach host device %s to VM %s: \n%s', pci_info['name'], vmid, xmlstr) raise rollback.prependDefer(dom.detachDeviceFlags, xmlstr, device_flags) rollback.commitAll() return dev_info['name']
def lookup(self, params=None): output, error, rc = run_command('lsmcode') if rc: kimchi_log.error('Unable to retreive firmware level.') return {'level': 'Unknown'} # Cut out the chatter from the command output levels = output.split()[5:] levels = " ".join(levels) return {'level': levels}
def stop(self, params=None): cmd = ['/opt/ibm/seprovider/bin/sepcli', 'stop'] output, error, rc = run_command(cmd) if rc != 10: kimchi_log.error('SEP execution error: %s - %s - %s' % (cmd, rc, error)) raise OperationFailed('GINSEP0004E', {'cmd': cmd, 'rc': rc, 'error': error}) self._sep_status = 'not running'
def _create_archive_dir(cls): try: os.makedirs(cls._archive_dir) except OSError as e: # It's OK if archive_dir already exists if e.errno != errno.EEXIST: kimchi_log.error('Error creating archive dir %s: %s', cls._archive_dir, e) raise OperationFailed('GINHBK0003E', {'dir': cls._archive_dir})
def create_defautl_network(): try: subnet = self._get_available_address(knetwork.DefaultNetsPool) params = {"name": "default", "connection": "nat", "subnet": subnet} self.create(params) return conn.networkLookupByName("default") except Exception as e: kimchi_log.error("Fatal: Cannot create default network " "because of %s, exit kimchid", e.message) sys.exit(1)
def _add_user_to_sudoers(self, user): try: # Creates the file in /etc/sudoers.d with proper user permission with open(SUDOERS_FILE % user, 'w') as f: f.write(SUDOERS_LINE % user) os.chmod(SUDOERS_FILE % user, 0440) except Exception as e: UserModel().delete(user) kimchi_log.error('Could not add user %s to sudoers: %s', user, e.message) raise OperationFailed('GINUSER0007E', {'user': user})
def wait_task(task_lookup, taskid, timeout=10): for i in range(0, timeout): task_info = task_lookup(taskid) if task_info['status'] == "running": kimchi_log.info("Waiting task %s, message: %s", taskid, task_info['message']) time.sleep(1) else: return kimchi_log.error("Timeout while process long-run task, " "try to increase timeout value.")
def _add_user_to_kvm_group(self, adm, user): # Add new user to KVM group kvmgrp = adm.lookupGroupByName('kvm') kvmgrp.add('gr_mem', user) ret = adm.modifyGroup(kvmgrp) if ret != 1: UserModel().delete(user) msg = ('Could not add user %s to kvm group. Operation failed.' % user) kimchi_log.error(msg) raise OperationFailed('GINUSER0006E', {'user': user})
def _create_task(self, cb, params): """ params: A dict with the following values: - vm_uuid: The UUID of the VM being created - template: The template being used to create the VM - name: The name for the new VM """ vm_uuid = str(uuid.uuid4()) t = params['template'] name = params['name'] conn = self.conn.get() cb('Storing VM icon') # Store the icon for displaying later icon = t.info.get('icon') if icon: try: with self.objstore as session: session.store('vm', vm_uuid, {'icon': icon}) except Exception as e: # It is possible to continue Kimchi executions without store # vm icon info kimchi_log.error('Error trying to update database with guest ' 'icon information due error: %s', e.message) # If storagepool is SCSI, volumes will be LUNs and must be passed by # the user from UI or manually. cb('Provisioning storage for new VM') vol_list = [] if t._get_storage_type() not in ["iscsi", "scsi"]: vol_list = t.fork_vm_storage(vm_uuid) graphics = params.get('graphics', {}) stream_protocols = self.caps.libvirt_stream_protocols xml = t.to_vm_xml(name, vm_uuid, libvirt_stream_protocols=stream_protocols, graphics=graphics, volumes=vol_list) cb('Defining new VM') try: conn.defineXML(xml.encode('utf-8')) except libvirt.libvirtError as e: if t._get_storage_type() not in READONLY_POOL_TYPE: for v in vol_list: vol = conn.storageVolLookupByPath(v['path']) vol.delete(0) raise OperationFailed("KCHVM0007E", {'name': name, 'err': e.get_error_message()}) cb('Updating VM metadata') VMModel.vm_update_os_metadata(VMModel.get_vm(name, self.conn), t.info, self.caps.metadata_support) cb('OK', True)
def update(self, profile, params): if params['active'] and self.active_powerprofile != profile: self.active_powerprofile = profile tuned_cmd = ["tuned-adm", "profile", profile] output, error, returncode = run_command(tuned_cmd) if returncode != 0: kimchi_log.error('Could not activate power profile %s, ' 'error: %s', powerprofile, error) raise OperationFailed('Error while activating power ' 'saving profile %s.', powerprofile) return profile
def libvirt_supports_iso_stream(conn, protocol): domain_type = 'test' if conn.getType().lower() == 'test' else 'kvm' xml = ISO_STREAM_XML % {'domain': domain_type, 'protocol': protocol} try: FeatureTests.disable_libvirt_error_logging() dom = conn.defineXML(xml) dom.undefine() return True except libvirt.libvirtError, e: kimchi_log.error(e.message) return False
def create_default_network(): try: subnet = self._get_available_address(knetwork.DefaultNetsPool) params = {"name": "default", "connection": "nat", "subnet": subnet} self.create(params) return conn.networkLookupByName("default") except Exception as e: kimchi_log.error("Fatal: Cannot create default network " "because of %s, exit kimchid", e.message) sys.exit(1)
def _qemu_support_spice(self): qemu_path = find_qemu_binary(find_emulator=True) out, err, rc = run_command(['ldd', qemu_path]) if rc != 0: kimchi_log.error('Failed to find qemu binary dependencies: %s', err) return False for line in out.split('\n'): if line.lstrip().startswith('libspice-server.so'): return True return False
def update(self, powerprofile, params): if params['active'] and self.active_powerprofile != powerprofile: self.active_powerprofile = powerprofile tuned_cmd = ["tuned-adm", "profile", powerprofile] output, error, returncode = run_command(tuned_cmd) if returncode != 0: kimchi_log.error('Could not activate power profile %s, ' 'error: %s', powerprofile, error) raise OperationFailed("GINPOWER004E", {'profile': powerprofile}) return powerprofile
def delete_user(username): adm = libuser.admin() user_obj = adm.lookupUserByName(username) if user_obj is None: kimchi_log.error('User "%s" does not exist', username) raise OperationFailed('GINUSER0011E', {'user': username}) try: adm.deleteUser(user_obj, True, True) except Exception as e: kimchi_log.error('Could not delete user %s: %s', username, e) raise OperationFailed('GINUSER0010E', {'user': username})
def update(self, powerprofile, params): if params['active'] and self.active_powerprofile != powerprofile: self.active_powerprofile = powerprofile tuned_cmd = ["tuned-adm", "profile", powerprofile] output, error, returncode = run_command(tuned_cmd) if returncode != 0: kimchi_log.error( 'Could not activate power profile %s, ' 'error: %s', powerprofile, error) raise OperationFailed("GINPOWER004E", {'profile': powerprofile}) return powerprofile
def lookup(self, user): try: user_info = pwd.getpwnam(user) except Exception: kimchi_log.error('User "%s" does not exist', user) raise OperationFailed('GINUSER0011E', {'user': user}) return {"name": user, "uid": user_info.pw_uid, "gid": user_info.pw_gid, "group": grp.getgrgid(user_info.pw_gid).gr_name, "profile": self._get_user_profile(user)}
def libvirt_supports_iso_stream(protocol): xml = ISO_STREAM_XML % {'protocol': protocol} conn = None try: FeatureTests.disable_screen_error_logging() conn = libvirt.open('qemu:///system') dom = conn.defineXML(xml) dom.undefine() return True except libvirt.libvirtError, e: kimchi_log.error(e.message) return False
def _vmscreenshot_delete(self, vm_uuid): screenshot = VMScreenshotModel.get_screenshot(vm_uuid, self.objstore, self.conn) screenshot.delete() try: with self.objstore as session: session.delete('screenshot', vm_uuid) except Exception as e: # It is possible to continue Kimchi executions without delete # screenshots kimchi_log.error('Error trying to delete vm screenshot from ' 'database due error: %s', e.message)
def _is_dev_leaf(devNodePath): try: # By default, lsblk prints a device information followed by children # device information childrenCount = len(_get_lsblk_devs(["NAME"], [devNodePath])) - 1 except OperationFailed as e: # lsblk is known to fail on multipath devices # Assume these devices contain children kimchi_log.error("Error getting device info for %s: %s", devNodePath, e) return False return childrenCount == 0
def update(self, powerprofile, params): if params['active'] and self.active_powerprofile != powerprofile: self.active_powerprofile = powerprofile tuned_cmd = ["tuned-adm", "profile", powerprofile] output, error, returncode = run_command(tuned_cmd) if returncode != 0: kimchi_log.error( 'Could not activate power profile %s, ' 'error: %s', powerprofile, error) raise OperationFailed( 'Error while activating power ' 'saving profile %s.', powerprofile) return powerprofile
def get_list(self): if self.error is not None: kimchi_log.error(self.error) raise OperationFailed(self.error) profiles = [] tuned_cmd = ["tuned-adm", "list"] output, error, returncode = run_command(tuned_cmd) lines_output = output.rstrip("\n").split("\n") for line in lines_output: if line.startswith('-'): line = line.strip("- ") profiles.append(line) return profiles