def _generate_thumbnail(self): thumbnail = os.path.join(config.get_screenshot_path(), '%s-%s.png' % (self.vm_uuid, str(uuid.uuid4()))) self._get_test_result() if stream_test_result is None: self._watch_stream_creation(thumbnail) elif stream_test_result: try: self._generate_scratch(thumbnail) except: kimchi_log.error("screenshot_creation: Unable to create " "screenshot image %s." % thumbnail) else: self._create_black_image(thumbnail) if os.path.getsize(thumbnail) == 0: self._create_black_image(thumbnail) else: im = Image.open(thumbnail) try: # Prevent Image lib from lazy load, # work around pic truncate validation in thumbnail generation im.thumbnail(self.THUMBNAIL_SIZE) except Exception as e: kimchi_log.warning("Image load with warning: %s." % e) im.save(thumbnail, "PNG") self.info['thumbnail'] = thumbnail
def parse_hdds(temperature_unit): # hddtemp will strangely convert a non-number (see error case # below) to 32 deg F. So just always ask for C and convert later. out, error, rc = run_command(['hddtemp']) if rc: kimchi_log.error("Error retrieving HD temperatures: rc %s." "output: %s" % (rc, error)) return None hdds = OrderedDict() for hdd in out.splitlines(): hdd_name = '' hdd_temp = 0.0 try: hdd_items = hdd.split(':') hdd_name, hdd_temp = hdd_items[0], hdd_items[2] hdd_temp = re.sub('°[C|F]', '', hdd_temp).strip() except Exception as e: kimchi_log.error('Sensors hdd parse error: %s' % e.message) continue try: # Try to convert the number to a float. If it fails, # don't add this disk to the list. hdd_temp = float(hdd_temp) if(temperature_unit == 'F'): hdd_temp = 9.0/5.0 * hdd_temp + 32 hdds[hdd_name] = hdd_temp except ValueError: # If no sensor data, parse float will fail. For example: # "/dev/sda: IBM IPR-10 5D831200: S.M.A.R.T. not available" kimchi_log.warning("Sensors hdd: %s" % hdd) hdds['unit'] = temperature_unit return hdds
def _generate_thumbnail(self): thumbnail = os.path.join( config.get_screenshot_path(), '%s-%s.png' % (self.vm_uuid, str(uuid.uuid4()))) self._get_test_result() if stream_test_result is None: self._watch_stream_creation(thumbnail) elif stream_test_result: try: self._generate_scratch(thumbnail) except: kimchi_log.error("screenshot_creation: Unable to create " "screenshot image %s." % thumbnail) else: self._create_black_image(thumbnail) if os.path.getsize(thumbnail) == 0: self._create_black_image(thumbnail) else: im = Image.open(thumbnail) try: # Prevent Image lib from lazy load, # work around pic truncate validation in thumbnail generation im.thumbnail(self.THUMBNAIL_SIZE) except Exception as e: kimchi_log.warning("Image load with warning: %s." % e) im.save(thumbnail, "PNG") self.info['thumbnail'] = thumbnail
def parse_hdds(temperature_unit): # hddtemp will strangely convert a non-number (see error case # below) to 32 deg F. So just always ask for C and convert later. out, error, rc = run_command(['hddtemp']) if rc: kimchi_log.error("Error retrieving HD temperatures: rc %s." "output: %s" % (rc, error)) return None hdds = OrderedDict() for hdd in out.splitlines(): hdd_name = '' hdd_temp = 0.0 try: hdd_items = hdd.split(':') hdd_name, hdd_temp = hdd_items[0], hdd_items[2] hdd_temp = re.sub('°[C|F]', '', hdd_temp).strip() except Exception as e: kimchi_log.error('Sensors hdd parse error: %s' % e.message) continue try: # Try to convert the number to a float. If it fails, # don't add this disk to the list. hdd_temp = float(hdd_temp) if (temperature_unit == 'F'): hdd_temp = 9.0 / 5.0 * hdd_temp + 32 hdds[hdd_name] = hdd_temp except ValueError: # If no sensor data, parse float will fail. For example: # "/dev/sda: IBM IPR-10 5D831200: S.M.A.R.T. not available" kimchi_log.warning("Sensors hdd: %s" % hdd) hdds['unit'] = temperature_unit return hdds
def get_list(self, storage_server, _target_type=None): target_list = list() if not _target_type: target_types = STORAGE_SOURCES.keys() else: target_types = [_target_type] for target_type in target_types: if not self.caps.nfs_target_probe and target_type == 'netfs': targets = patch_find_nfs_target(storage_server) else: xml = self._get_storage_server_spec(server=storage_server, target_type=target_type) conn = self.conn.get() try: ret = conn.findStoragePoolSources(target_type, xml, 0) except libvirt.libvirtError as e: err = "Query storage pool source fails because of %s" kimchi_log.warning(err, e.get_error_message()) continue targets = self._parse_target_source_result(target_type, ret) target_list.extend(targets) return target_list
def get_list(self, storage_server, _target_type=None, _server_port=None): target_list = list() if not _target_type: target_types = STORAGE_SERVERS else: target_types = [_target_type] for target_type in target_types: if not self.caps.nfs_target_probe and target_type == 'netfs': targets = patch_find_nfs_target(storage_server) else: xml = self._get_storage_server_spec(server=storage_server, target_type=target_type, server_port=_server_port) conn = self.conn.get() try: ret = conn.findStoragePoolSources(target_type, xml, 0) except libvirt.libvirtError as e: err = "Query storage pool source fails because of %s" kimchi_log.warning(err, e.get_error_message()) continue targets = self._parse_target_source_result(target_type, ret) target_list.extend(targets) # Get all netfs and iscsi paths in use used_paths = [] try: conn = self.conn.get() # Get all existing ISCSI and NFS pools pools = conn.listAllStoragePools( libvirt.VIR_CONNECT_LIST_STORAGE_POOLS_ISCSI | libvirt.VIR_CONNECT_LIST_STORAGE_POOLS_NETFS) for pool in pools: pool_xml = pool.XMLDesc(0) root = objectify.fromstring(pool_xml) if root.get('type') == 'netfs' and \ root.source.dir is not None: used_paths.append(root.source.dir.get('path')) elif root.get('type') == 'iscsi' and \ root.source.device is not None: used_paths.append(root.source.device.get('path')) except libvirt.libvirtError as e: err = "Query storage pool source fails because of %s" kimchi_log.warning(err, e.get_error_message()) # Filter target_list to not not show the used paths target_list = [elem for elem in target_list if elem.get('target') not in used_paths] return target_list
def get_list(self, storage_server, _target_type=None, _server_port=None): target_list = list() if not _target_type: target_types = STORAGE_SERVERS else: target_types = [_target_type] for target_type in target_types: if not self.caps.nfs_target_probe and target_type == 'netfs': targets = patch_find_nfs_target(storage_server) else: xml = self._get_storage_server_spec(server=storage_server, target_type=target_type, server_port=_server_port) conn = self.conn.get() try: ret = conn.findStoragePoolSources(target_type, xml, 0) except libvirt.libvirtError as e: err = "Query storage pool source fails because of %s" kimchi_log.warning(err, e.get_error_message()) continue targets = self._parse_target_source_result(target_type, ret) target_list.extend(targets) # Get all netfs and iscsi paths in use used_paths = [] try: conn = self.conn.get() # Get all existing ISCSI and NFS pools pools = conn.listAllStoragePools( libvirt.VIR_CONNECT_LIST_STORAGE_POOLS_ISCSI | libvirt.VIR_CONNECT_LIST_STORAGE_POOLS_NETFS) for pool in pools: pool_xml = pool.XMLDesc(0) root = objectify.fromstring(pool_xml) if root.get('type') == 'netfs' and \ root.source.dir is not None: used_paths.append(root.source.dir.get('path')) elif root.get('type') == 'iscsi' and \ root.source.device is not None: used_paths.append(root.source.device.get('path')) except libvirt.libvirtError as e: err = "Query storage pool source fails because of %s" kimchi_log.warning(err, e.get_error_message()) # Filter target_list to not not show the used paths target_list = [elem for elem in target_list if elem.get('target') not in used_paths] return [dict(t) for t in set(tuple(t.items()) for t in target_list)]
def probe_img_info(path): cmd = ["qemu-img", "info", "--output=json", path] info = dict() try: out = run_command(cmd, 10)[0] except TimeoutExpired: kimchi_log.warning("Cannot decide format of base img %s", path) return None info = json.loads(out) info['virtual-size'] = info['virtual-size'] >> 30 info['actual-size'] = info['actual-size'] >> 30 return info
def _validate_pci_passthrough_env(): # Linux kernel < 3.5 doesn't provide /sys/kernel/iommu_groups if os.path.isdir('/sys/kernel/iommu_groups'): if not glob.glob('/sys/kernel/iommu_groups/*'): raise InvalidOperation("KCHVMHDEV0003E") # Enable virt_use_sysfs on RHEL6 and older distributions # In recent Fedora, there is no virt_use_sysfs. out, err, rc = run_command(['getsebool', 'virt_use_sysfs']) if rc == 0 and out.rstrip('\n') != "virt_use_sysfs --> on": out, err, rc = run_command(['setsebool', '-P', 'virt_use_sysfs=on']) if rc != 0: kimchi_log.warning("Unable to turn on sebool virt_use_sysfs")
def _validate_pci_passthrough_env(): # Linux kernel < 3.5 doesn't provide /sys/kernel/iommu_groups if os.path.isdir('/sys/kernel/iommu_groups'): if not glob.glob('/sys/kernel/iommu_groups/*'): raise InvalidOperation("KCHVMHDEV0003E") # Enable virt_use_sysfs on RHEL6 and older distributions # In recent Fedora, there is no virt_use_sysfs. out, err, rc = run_command(['getsebool', 'virt_use_sysfs']) if rc == 0 and out.rstrip('\n') != "virt_use_sysfs --> on": out, err, rc = run_command( ['setsebool', '-P', 'virt_use_sysfs=on']) if rc != 0: kimchi_log.warning("Unable to turn on sebool virt_use_sysfs")
def _is_dev_extended_partition(devType, devNodePath): if devType != 'part': return False diskPath = devNodePath.rstrip('0123456789') device = PDevice(diskPath) try: extended_part = PDisk(device).getExtendedPartition() except NotImplementedError as e: kimchi_log.warning( "Error getting extended partition info for dev %s type %s: %s", devNodePath, devType, e.message) # Treate disk with unsupported partiton table as if it does not # contain extended partitions. return False if extended_part and extended_part.path == devNodePath: return True return False
class StorageVolumesModel(object): def __init__(self, **kargs): self.conn = kargs['conn'] self.objstore = kargs['objstore'] def create(self, pool_name, params): vol_xml = """ <volume> <name>%(name)s</name> <allocation unit="MiB">%(allocation)s</allocation> <capacity unit="MiB">%(capacity)s</capacity> <source> </source> <target> <format type='%(format)s'/> </target> </volume> """ params.setdefault('allocation', 0) params.setdefault('format', 'qcow2') name = params['name'] vol_id = '%s:%s' % (pool_name, name) try: pool = StoragePoolModel.get_storagepool(pool_name, self.conn) xml = vol_xml % params except KeyError, item: raise MissingParameter("KCHVOL0004E", { 'item': str(item), 'volume': name }) pool_info = StoragePoolModel(conn=self.conn, objstore=self.objstore).lookup(pool_name) if pool_info['type'] in READONLY_POOL_TYPE: raise InvalidParameter("KCHVOL0012E", {'type': pool_info['type']}) try: pool.createXML(xml, 0) except libvirt.libvirtError as e: raise OperationFailed("KCHVOL0007E", { 'name': name, 'pool': pool, 'err': e.get_error_message() }) try: with self.objstore as session: session.store('storagevolume', vol_id, {'ref_cnt': 0}) except Exception as e: # If the storage volume was created flawlessly, then lets hide this # error to avoid more error in the VM creation process kimchi_log.warning( 'Unable to store storage volume id in ' 'objectstore due error: %s', e.message) return name
def kernel_support_vfio(): out, err, rc = run_command(['modprobe', 'vfio-pci']) if rc != 0: kimchi_log.warning("Unable to load Kernal module vfio-pci.") return False return True
class StorageVolumesModel(object): def __init__(self, **kargs): self.conn = kargs['conn'] self.objstore = kargs['objstore'] self.task = TaskModel(**kargs) def create(self, pool_name, params): vol_source = ['file', 'url', 'capacity'] name = params.get('name') index_list = list(i for i in range(len(vol_source)) if vol_source[i] in params) if len(index_list) != 1: raise InvalidParameter("KCHVOL0018E", {'param': ",".join(vol_source)}) create_param = vol_source[index_list[0]] # Verify if the URL is valid if create_param == 'url': url = params['url'] try: urllib2.urlopen(url).close() except: raise InvalidParameter('KCHVOL0022E', {'url': url}) all_vol_names = self.get_list(pool_name) if name is None: # the methods listed in 'REQUIRE_NAME_PARAMS' cannot have # 'name' == None if create_param in REQUIRE_NAME_PARAMS: raise InvalidParameter('KCHVOL0016E') # if 'name' is omitted - except for the methods listed in # 'REQUIRE_NAME_PARAMS' - the default volume name will be the # file/URL basename. if create_param == 'file': name = os.path.basename(params['file'].filename) elif create_param == 'url': name = os.path.basename(params['url']) else: name = 'upload-%s' % int(time.time()) name = get_unique_file_name(all_vol_names, name) params['name'] = name try: create_func = getattr(self, '_create_volume_with_%s' % create_param) except AttributeError: raise InvalidParameter("KCHVOL0019E", {'param': create_param}) pool_info = StoragePoolModel(conn=self.conn, objstore=self.objstore).lookup(pool_name) if pool_info['type'] in READONLY_POOL_TYPE: raise InvalidParameter("KCHVOL0012E", {'type': pool_info['type']}) if pool_info['state'] == 'inactive': raise InvalidParameter('KCHVOL0003E', { 'pool': pool_name, 'volume': name }) if name in all_vol_names: raise InvalidParameter('KCHVOL0001E', {'name': name}) params['pool'] = pool_name targeturi = '/storagepools/%s/storagevolumes/%s' % (pool_name, name) taskid = add_task(targeturi, create_func, self.objstore, params) return self.task.lookup(taskid) def _create_volume_with_file(self, cb, params): pool_name = params.pop('pool') dir_path = StoragePoolModel( conn=self.conn, objstore=self.objstore).lookup(pool_name)['path'] file_path = os.path.join(dir_path, params['name']) if os.path.exists(file_path): raise InvalidParameter('KCHVOL0001E', {'name': params['name']}) upload_file = params['file'] f_len = upload_file.fp.length try: size = 0 with open(file_path, 'wb') as f: while True: data = upload_file.file.read(READ_CHUNK_SIZE) if not data: break size += len(data) f.write(data) cb('%s/%s' % (size, f_len)) except Exception as e: raise OperationFailed('KCHVOL0007E', { 'name': params['name'], 'pool': pool_name, 'err': e.message }) # Refresh to make sure volume can be found in following lookup StoragePoolModel.get_storagepool(pool_name, self.conn).refresh(0) cb('OK', True) def _create_volume_with_capacity(self, cb, params): pool_name = params.pop('pool') vol_xml = """ <volume> <name>%(name)s</name> <allocation unit='bytes'>%(allocation)s</allocation> <capacity unit='bytes'>%(capacity)s</capacity> <source> </source> <target> <format type='%(format)s'/> </target> </volume> """ params.setdefault('allocation', 0) params.setdefault('format', 'qcow2') name = params['name'] try: pool = StoragePoolModel.get_storagepool(pool_name, self.conn) xml = vol_xml % params except KeyError, item: raise MissingParameter("KCHVOL0004E", { 'item': str(item), 'volume': name }) try: pool.createXML(xml, 0) except libvirt.libvirtError as e: raise OperationFailed("KCHVOL0007E", { 'name': name, 'pool': pool, 'err': e.get_error_message() }) path = StoragePoolModel( conn=self.conn, objstore=self.objstore).lookup(pool_name)['path'] try: with self.objstore as session: session.store('storagevolume', path, {'ref_cnt': 0}) except Exception as e: # If the storage volume was created flawlessly, then lets hide this # error to avoid more error in the VM creation process kimchi_log.warning( 'Unable to store storage volume id in ' 'objectstore due error: %s', e.message) cb('', True)
def _clone_update_disks(self, xml, rollback): """Clone disks from a virtual machine. The disks are copied as new volumes and the new VM's XML is updated accordingly. Arguments: xml -- The XML descriptor of the original VM + new value for "/domain/uuid". rollback -- A rollback context so the new volumes can be removed if an error occurs during the cloning operation. Return: The XML descriptor <xml> with the new disk paths instead of the old ones. """ # the UUID will be used to create the disk paths uuid = xpath_get_text(xml, XPATH_DOMAIN_UUID)[0] all_paths = xpath_get_text(xml, XPATH_DOMAIN_DISK) vir_conn = self.conn.get() for i, path in enumerate(all_paths): try: vir_orig_vol = vir_conn.storageVolLookupByPath(path) vir_pool = vir_orig_vol.storagePoolLookupByVolume() orig_pool_name = vir_pool.name().decode('utf-8') orig_vol_name = vir_orig_vol.name().decode('utf-8') except libvirt.libvirtError, e: domain_name = xpath_get_text(xml, XPATH_DOMAIN_NAME)[0] raise OperationFailed('KCHVM0035E', {'name': domain_name, 'err': e.message}) orig_pool = self.storagepool.lookup(orig_pool_name) orig_vol = self.storagevolume.lookup(orig_pool_name, orig_vol_name) new_pool_name = orig_pool_name new_pool = orig_pool if orig_pool['type'] in ['dir', 'netfs', 'logical']: # if a volume in a pool 'dir', 'netfs' or 'logical' cannot hold # a new volume with the same size, the pool 'default' should # be used if orig_vol['capacity'] > orig_pool['available']: kimchi_log.warning('storage pool \'%s\' doesn\'t have ' 'enough free space to store image ' '\'%s\'; falling back to \'default\'', orig_pool_name, path) new_pool_name = u'default' new_pool = self.storagepool.lookup(u'default') # ...and if even the pool 'default' cannot hold a new # volume, raise an exception if orig_vol['capacity'] > new_pool['available']: domain_name = xpath_get_text(xml, XPATH_DOMAIN_NAME)[0] raise InvalidOperation('KCHVM0034E', {'name': domain_name}) elif orig_pool['type'] in ['scsi', 'iscsi']: # SCSI and iSCSI always fall back to the storage pool 'default' kimchi_log.warning('cannot create new volume for clone in ' 'storage pool \'%s\'; falling back to ' '\'default\'', orig_pool_name) new_pool_name = u'default' new_pool = self.storagepool.lookup(u'default') # if the pool 'default' cannot hold a new volume, raise # an exception if orig_vol['capacity'] > new_pool['available']: domain_name = xpath_get_text(xml, XPATH_DOMAIN_NAME)[0] raise InvalidOperation('KCHVM0034E', {'name': domain_name}) else: # unexpected storage pool type raise InvalidOperation('KCHPOOL0014E', {'type': orig_pool['type']}) # new volume name: <UUID>-<loop-index>.<original extension> # e.g. 1234-5678-9012-3456-0.img ext = os.path.splitext(path)[1] new_vol_name = u'%s-%d%s' % (uuid, i, ext) task = self.storagevolume.clone(orig_pool_name, orig_vol_name, new_name=new_vol_name) self.task.wait(task['id'], 3600) # 1 h # get the new volume path and update the XML descriptor new_vol = self.storagevolume.lookup(new_pool_name, new_vol_name) xml = xml_item_update(xml, XPATH_DOMAIN_DISK_BY_FILE % path, new_vol['path'], 'file') # remove the new volume should an error occur later rollback.prependDefer(self.storagevolume.delete, new_pool_name, new_vol_name)