def __init__(self, **kargs): self.task = TaskModel(**kargs) self.objstore = kargs['objstore'] self.pkgs2update = [] try: self.host_swupdate = SoftwareUpdate() except: self.host_swupdate = None
def __init__(self, **kargs): self.conn = kargs['conn'] self.objstore = kargs['objstore'] self.events = kargs['eventsloop'] self.task = TaskModel(**kargs) self.devs_model = DevicesModel(**kargs) self.dev_model = DeviceModel(**kargs) self._cb = None self.events.registerDetachDevicesEvent( self.conn, self._event_devices, self)
def __init__(self, **kargs): self.conn = kargs['conn'] self.objstore = kargs['objstore'] self.task = TaskModel(**kargs) self.storagevolumes = StorageVolumesModel(**kargs) self.storagepool = StoragePoolModel(**kargs) if self.conn.get() is not None: self.libvirt_user = UserTests().probe_user() else: self.libvirt_user = None
def setUp(self): self.temp_file = tempfile.NamedTemporaryFile(delete=False) objstore_loc = self.temp_file.name self._objstore = ObjectStore(objstore_loc) self.task = TaskModel(objstore=self._objstore) ArchivesModel._archive_dir = '/tmp' ArchivesModel._default_include = [] ArchivesModel._default_exclude = []
class VolumeGroupsTests(unittest.TestCase): def setUp(self): objstore_loc = config.get_object_store() + "_ginger" self._objstore = ObjectStore(objstore_loc) self.task_model = TaskModel(objstore=self._objstore) def test_get_vg_list(self): vgs = vol_group.VolumeGroupsModel(objstore=self._objstore) vgs_list = vgs.get_list() self.assertGreaterEqual(len(vgs_list), 0) def test_create_vg_missing_name(self): vgs = vol_group.VolumeGroupsModel(objstore=self._objstore) pvpaths = ["/dev/sdb1"] params = {"pv_paths": pvpaths} self.assertRaises(MissingParameter, vgs.create, params) def test_create_vg_missing_pvpaths(self): vgs = vol_group.VolumeGroupsModel(objstore=self._objstore) vgname = "testvg" params = {"vg_name": vgname} self.assertRaises(MissingParameter, vgs.create, params) @mock.patch("wok.plugins.ginger.models.utils._create_vg", autospec=True) def test_create_vg(self, mock_create_vg): vgs = vol_group.VolumeGroupsModel(objstore=self._objstore) vgname = "testvg" pvpaths = ["/dev/sdb1"] params = {"vg_name": vgname, "pv_paths": pvpaths} task_obj = vgs.create(params) self.task_model.wait(task_obj.get("id")) mock_create_vg.assert_called_with(vgname, pvpaths) @mock.patch("wok.plugins.ginger.models.utils._extend_vg", autospec=True) def test_extend_vg(self, mock_extend_vg): vg = vol_group.VolumeGroupModel(objstore=self._objstore) vgname = "testvg" pvpaths = ["/dev/sdb2"] vg.extend(vgname, pvpaths) mock_extend_vg.assert_called_with(vgname, pvpaths) @mock.patch("wok.plugins.ginger.models.utils._reduce_vg", autospec=True) def test_reduce_vg(self, mock_reduce_vg): vg = vol_group.VolumeGroupModel(objstore=self._objstore) vgname = "testvg" pvpaths = ["/dev/sdb2"] vg.reduce(vgname, pvpaths) mock_reduce_vg.assert_called_with(vgname, pvpaths) @mock.patch("wok.plugins.ginger.models.utils._remove_vg", autospec=True) def test_delete_vg(self, mock_delete_vg): vg = vol_group.VolumeGroupModel(objstore=self._objstore) vgname = "testvg" vg.delete(vgname) mock_delete_vg.assert_called_with(vgname)
class FirmwareProgressTests(unittest.TestCase): def setUp(self): objstore_loc = config.get_object_store() + '_ginger' self._objstore = ObjectStore(objstore_loc) self.task = TaskModel(objstore=self._objstore) def test_fwprogress_without_update_flash(self): fwprogress = FirmwareProgressModel(objstore=self._objstore) task_info = fwprogress.lookup() self.task.wait(task_info['id']) task_info = self.task.lookup(task_info['id']) self.assertEquals('finished', task_info['status']) self.assertIn('Error', task_info['message']) self.assertEquals('/plugins/ginger/fwprogress', task_info['target_uri'])
class ArchiveModel(object): def __init__(self, **kargs): self._objstore = kargs['objstore'] self.task = TaskModel(**kargs) def lookup(self, archive_id): with self._objstore as session: info = session.get(ArchivesModel._objstore_type, archive_id) return info def _session_delete_archive(self, session, archive_id): # Assume session is already locked. try: ar_params = session.get(ArchivesModel._objstore_type, archive_id) except NotFoundError: return if ar_params['file'] != '': try: os.unlink(ar_params['file']) except OSError as e: # It's OK if the user already removed the file manually if e.errno not in (errno.EACCES, errno.ENOENT): raise OperationFailed('GINHBK0002E', {'name': ar_params['file']}) session.delete(ArchivesModel._objstore_type, archive_id) def delete(self, archive_id): with self._objstore as session: self._session_delete_archive(session, archive_id) def _restore_tar(self, archive_id): backup_dir = os.path.join( PluginPaths('ginger').state_dir, 'ginger_backups') backup_file = os.path.join(backup_dir, archive_id + '.tar.gz') cmd = ['tar', '-xzf', backup_file, '-C', '/'] out, err, rc = run_command(cmd) if rc != 0: raise OperationFailed('GINHBK0001E', { 'name': backup_file, 'cmd': ' '.join(cmd) }) def _restore_task(self, rb, backup_id): rb('entering task to restore config backup') try: self._restore_tar(backup_id) rb('OK', True) except (InvalidOperation) as e: rb(e.message, False) except (OperationFailed) as e: rb(e.message, False) raise OperationFailed('GINHBK0013E', {'err': e.message}) def restore(self, archive_id): taskid = AsyncTask(u'/backup/restore/%s' % (archive_id), self._restore_task, archive_id).id return self.task.lookup(taskid)
class PartitionModel(object): def __init__(self, **kargs): self.objstore = kargs['objstore'] self.task = TaskModel(**kargs) def lookup(self, name, dev=None): try: part_details = get_partition_details(name) part_path = part_details['path'] vg_name = _get_vgname(part_path) if vg_name: part_details['vgname'] = vg_name else: part_details['vgname'] = "N/A" return part_details except NotFoundError: raise NotFoundError("GINPART00014E", {'name': name}) except OperationFailed as e: raise OperationFailed("GINPART00003E", {'name': name, 'err': e.message}) def format(self, name, fstype): if utils._is_mntd(name): raise OperationFailed('GINPART00004E') task_params = {'name': name, 'fstype': fstype} taskid = AsyncTask(u'/partitions/%s/fstype%s' % (name, fstype), self._format_task, task_params).id return self.task.lookup(taskid) def _format_task(self, cb, params): name = params['name'] fstype = params['fstype'] try: utils._makefs(fstype, name) except (OperationFailed): raise OperationFailed('GINPART00005E') cb('OK', True) def change_type(self, name, type): try: utils.change_part_type(name, type) except OperationFailed as e: raise OperationFailed("GINPART00006E", {'err': e.message}) return name def delete(self, name): try: utils.delete_part(name) except OperationFailed as e: raise OperationFailed("GINPART00007E", {'err': e.message})
class ArchiveModel(object): def __init__(self, **kargs): self._objstore = kargs['objstore'] self.task = TaskModel(**kargs) def lookup(self, archive_id): with self._objstore as session: info = session.get(ArchivesModel._objstore_type, archive_id) return info def _session_delete_archive(self, session, archive_id): # Assume session is already locked. try: ar_params = session.get(ArchivesModel._objstore_type, archive_id) except NotFoundError: return if ar_params['file'] != '': try: os.unlink(ar_params['file']) except OSError as e: # It's OK if the user already removed the file manually if e.errno not in (errno.EACCES, errno.ENOENT): raise OperationFailed( 'GINHBK0002E', {'name': ar_params['file']}) session.delete(ArchivesModel._objstore_type, archive_id) def delete(self, archive_id): with self._objstore as session: self._session_delete_archive(session, archive_id) def _restore_tar(self, archive_id): backup_dir = os.path.join(PluginPaths('ginger').state_dir, 'ginger_backups') backup_file = os.path.join(backup_dir, archive_id + '.tar.gz') cmd = ['tar', '-xzf', backup_file, '-C', '/'] out, err, rc = run_command(cmd) if rc != 0: raise OperationFailed('GINHBK0001E', {'name': backup_file, 'cmd': ' '.join(cmd)}) def _restore_task(self, rb, backup_id): rb('entering task to restore config backup') try: self._restore_tar(backup_id) rb('OK', True) except (InvalidOperation) as e: rb(e.message, False) except (OperationFailed) as e: rb(e.message, False) raise OperationFailed('GINHBK0013E', {'err': e.message}) def restore(self, archive_id): taskid = AsyncTask(u'/backup/restore/%s' % (archive_id), self._restore_task, archive_id).id return self.task.lookup(taskid)
class DASDdevModel(object): """ Model for viewing and formatting a DASD device """ def __init__(self, **kargs): self.objstore = kargs['objstore'] self.task = TaskModel(**kargs) self.dev_details = {} def lookup(self, bus_id): dasd_utils.validate_bus_id(bus_id) try: dasddevices = dasd_utils._get_dasd_dev_details(bus_id) self.dev_details = dasddevices[0] except IndexError as e: wok_log.error("DASD device %s not found." % bus_id) raise NotFoundError("GINDASD0006E", {'err': e}) return self.dev_details def format(self, bus_id, blk_size): dasd_utils.validate_bus_id(bus_id) woklock = threading.Lock() name = self.dev_details['name'] dasd_name_list = dasd_utils._get_dasd_names() if name not in dasd_name_list: raise NotFoundError('GINDASD0007E') task_params = {'blk_size': blk_size, 'name': name} try: woklock.acquire() taskid = add_task(u'/dasddevs/%s/blksize/%s' % (name, blk_size), self._format_task, self.objstore, task_params) except OperationFailed: woklock.release() wok_log.error("Formatting of DASD device %s failed" % bus_id) raise OperationFailed("GINDASD0008E", {'name': name}) finally: woklock.release() return self.task.lookup(taskid) def _format_task(self, cb, params): if 'name' not in params: raise MissingParameter("GINDASD0009E") name = params['name'] if 'blk_size' not in params: raise MissingParameter("GINDASD0010E") blk_size = params['blk_size'] try: dasd_utils._format_dasd(blk_size, name) except OperationFailed: wok_log.error("Formatting of DASD device %s failed" % name) raise OperationFailed('GINDASD0008E', {'name': name}) cb('OK', True)
class DASDdevModel(object): """ Model for viewing and formatting a DASD device """ dev_details = {} def __init__(self, **kargs): self.objstore = kargs['objstore'] self.task = TaskModel(**kargs) def lookup(self, bus_id): try: dasddevices = dasd_utils._get_dasd_dev_details(bus_id) global dev_details dev_details = dasddevices[0] except ValueError as e: wok_log.error("DASD device %s not found." % bus_id) raise NotFoundError("GINDASD0006E", {'err': e}) return dev_details def format(self, bus_id, blk_size): woklock = threading.Lock() name = dev_details['name'] dasd_name_list = dasd_utils._get_dasd_names() if name not in dasd_name_list: raise NotFoundError('GINDASD0007E') task_params = {'blk_size': blk_size, 'name': name} try: woklock.acquire() taskid = add_task(u'/dasddevs/%s/blksize/%s' % (name, blk_size), self._format_task, self.objstore, task_params) except OperationFailed as e: woklock.release() wok_log.error("Formatting of DASD device %s failed" % bus_id) raise OperationFailed("GINDASD0008E", {'err': e}) finally: woklock.release() return self.task.lookup(taskid) def _format_task(self, cb, params): if 'name' not in params: raise MissingParameter("GINDASD0009E") name = params['name'] if 'blk_size' not in params: raise MissingParameter("GINDASD0010E") blk_size = params['blk_size'] try: dasd_utils._format_dasd(blk_size, name) except OperationFailed as e: wok_log.error("Formatting of DASD device %s failed" % name) raise OperationFailed('GINDASD0008E', {'err': e}) cb('OK', True)
class PhysicalVolumesModel(object): """ Model class for listing and creating a PV """ def __init__(self, **kargs): self.objstore = kargs['objstore'] self.task = TaskModel(**kargs) def create(self, params): if 'pv_name' not in params: raise MissingParameter("GINPV00001E") pvname = params['pv_name'] taskid = AsyncTask(u'/pvs/pv_name/%s' % (pvname), self._create_task, params).id return self.task.lookup(taskid) def _create_task(self, cb, params): pvname = params['pv_name'] cb('entering task to create pv') try: cb('create pv') part = PartitionModel(objstore=self.objstore) part_name = pvname.split('/')[-1] dev_type = part.lookup(part_name) if dev_type['type'] == 'part': if 'dasd' in dev_type['name']: type = '4' change_dasdpart_type(part_name, type) else: type = '8e' # hex value for type Linux LVM part.change_type(part_name, type) utils._create_pv(pvname) except OperationFailed: raise OperationFailed("GINPV00002E", {'name': pvname}) cb('OK', True) def get_list(self): try: pv_names = utils._get_pv_devices() except OperationFailed as e: raise NotFoundError("GINPV00003E", {'err': e.message}) return pv_names
class LogicalVolumesTests(unittest.TestCase): def setUp(self): objstore_loc = config.get_object_store() + '_ginger' self._objstore = ObjectStore(objstore_loc) self.task_model = TaskModel(objstore=self._objstore) def test_get_lv_list(self): lvs = log_volume.LogicalVolumesModel(objstore=self._objstore) lvs_list = lvs.get_list() self.assertGreaterEqual(len(lvs_list), 0) def test_create_lv_missing_vgname(self): lvs = log_volume.LogicalVolumesModel(objstore=self._objstore) size = ['10M'] params = {'size': size} self.assertRaises(MissingParameter, lvs.create, params) def test_create_lv_missing_size(self): lvs = log_volume.LogicalVolumesModel(objstore=self._objstore) vgname = 'testvg' params = {'vg_name': vgname} self.assertRaises(MissingParameter, lvs.create, params) @mock.patch('wok.plugins.ginger.model.utils._create_lv', autospec=True) def test_create_lv(self, mock_create_lv): lvs = log_volume.LogicalVolumesModel(objstore=self._objstore) vgname = 'testvg' size = '10M' params = {'vg_name': vgname, 'size': size} task_obj = lvs.create(params) self.task_model.wait(task_obj.get('id')) mock_create_lv.assert_called_with(vgname, size) @mock.patch('wok.plugins.ginger.model.utils._remove_lv', autospec=True) def test_delete_lv(self, mock_delete_lv): lv = log_volume.LogicalVolumeModel(objstore=self._objstore) lvname = '/dev/testvg/lvol0' lv.delete(lvname) mock_delete_lv.assert_called_with(lvname)
class PackageUpdateModel(object): def __init__(self, **kargs): self.task = TaskModel(**kargs) self.objstore = kargs['objstore'] self.pkgs2update = [] try: self.host_swupdate = SoftwareUpdate() except: self.host_swupdate = None def lookup(self, name): if self.host_swupdate is None: raise OperationFailed('GGBPKGUPD0004E') return self.host_swupdate.getUpdate(name) def _resolve_dependencies(self, package=None, dep_list=None): """ Resolve the dependencies for a given package from the dictionary of eligible packages to be upgraded. """ if dep_list is None: dep_list = [] if package is None: return [] dep_list.append(package) deps = self.host_swupdate.getUpdate(package)['depends'] for pkg in set(deps).intersection(self.pkgs2update): if pkg in dep_list: break self._resolve_dependencies(pkg, dep_list) return dep_list def upgrade(self, name): """ Execute the update of a specific package (and its dependencies, if necessary) in the system. @param: Name @return: task """ if self.host_swupdate is None: raise OperationFailed('GGBPKGUPD0004E') self.pkgs2update = self.host_swupdate.getUpdates() pkgs_list = self._resolve_dependencies(name) msg = 'The following packages will be updated: ' + ', '.join(pkgs_list) wok_log.debug(msg) taskid = add_task('/plugins/gingerbase/host/packagesupdate/%s/upgrade' % name, self.host_swupdate.doUpdate, self.objstore, pkgs_list) return self.task.lookup(taskid)
class PackageUpdateModel(object): def __init__(self, **kargs): self.task = TaskModel(**kargs) self.objstore = kargs['objstore'] self.pkgs2update = [] try: self.host_swupdate = SoftwareUpdate() except: self.host_swupdate = None def lookup(self, name): if self.host_swupdate is None: raise OperationFailed('GGBPKGUPD0004E') return self.host_swupdate.getUpdate(name) def _resolve_dependencies(self, package=None, dep_list=None): """ Resolve the dependencies for a given package from the dictionary of eligible packages to be upgraded. """ if dep_list is None: dep_list = [] if package is None: return [] dep_list.append(package) deps = self.host_swupdate.getPackageDeps(package) for pkg in deps: if pkg in dep_list: break self._resolve_dependencies(pkg, dep_list) return dep_list def upgrade(self, name): """ Execute the update of a specific package (and its dependencies, if necessary) in the system. @param: Name @return: task """ if self.host_swupdate is None: raise OperationFailed('GGBPKGUPD0004E') self.pkgs2update = self.host_swupdate.getUpdates() pkgs_list = self._resolve_dependencies(name) msg = 'The following packages will be updated: ' + ', '.join(pkgs_list) wok_log.debug(msg) taskid = AsyncTask( '/plugins/gingerbase/host/packagesupdate/%s/upgrade' % name, self.host_swupdate.doUpdate, pkgs_list).id return self.task.lookup(taskid)
class PhysicalVolumesModel(object): """ Model class for listing and creating a PV """ def __init__(self, **kargs): self.objstore = kargs['objstore'] self.task = TaskModel(**kargs) def create(self, params): if 'pv_name' not in params: raise MissingParameter("GINPV00001E") pvname = params['pv_name'] taskid = add_task(u'/pvs/pv_name/%s' % (pvname), self._create_task, self.objstore, params) return self.task.lookup(taskid) def _create_task(self, cb, params): pvname = params['pv_name'] cb('entering task to create pv') try: cb('create pv') part = PartitionModel(objstore=self.objstore) part_name = pvname.split('/')[2] type = '8e' # hex value for type Linux LVM part.change_type(part_name, type) utils._create_pv(pvname) except OperationFailed: wok_log.error("PV create failed") raise OperationFailed("GINPV00002E", {'pvname': pvname}) cb('OK', True) def get_list(self): try: pv_names = utils._get_pv_devices() except OperationFailed as e: wok_log.error("Unable to fetch list of PVs") raise NotFoundError("GINPV00003E", {'err': e.message}) return pv_names
class VolumeGroupsModel(object): """ Model class for listing and creating a VG """ def __init__(self, **kargs): self.objstore = kargs['objstore'] self.task = TaskModel(**kargs) def create(self, params): if 'vg_name' not in params: raise MissingParameter("GINVG00013E") vgname = params['vg_name'] if "pv_paths" not in params: raise MissingParameter("GINVG00014E") taskid = AsyncTask(u'/vgs/vg_name/%s' % (vgname), self._create_task, params).id return self.task.lookup(taskid) def _create_task(self, cb, params): vgname = params['vg_name'] pv_paths = params['pv_paths'] cb('entering task to create vg') try: cb('create vg') utils._create_vg(vgname, pv_paths) except (OperationFailed) as e: raise OperationFailed('GINVG00001E', { 'name': vgname, 'err': e.message }) cb('OK', True) def get_list(self): try: vg_names = utils._get_vg_list() except OperationFailed as e: raise NotFoundError("GINVG00002E", {'err': e.message}) return vg_names
class SwUpdateProgressModel(object): def __init__(self, **kargs): self.task = TaskModel(**kargs) self.objstore = kargs['objstore'] def lookup(self, *name): try: swupdate = SoftwareUpdate() except: raise OperationFailed('GGBPKGUPD0004E') taskid = AsyncTask('/plugins/gingerbase/host/swupdateprogress', swupdate.tailUpdateLogs).id return self.task.lookup(taskid)
class PartitionModel(object): def __init__(self, **kargs): self.objstore = kargs['objstore'] self.task = TaskModel(**kargs) def lookup(self, name, dev=None): try: return get_partition_details(name) except OperationFailed as e: wok_log.error("lookup method of partition failed") raise OperationFailed("GINPART00003E", {'err': e}) def format(self, name, fstype): if utils._is_mntd(name): raise OperationFailed('GINPART00004E') task_params = {'name': name, 'fstype': fstype} taskid = add_task(u'/partitions/%s/fstype%s' % (name, fstype), self._format_task, self.objstore, task_params) return self.task.lookup(taskid) def _format_task(self, cb, params): name = '/dev/' + params['name'] fstype = params['fstype'] try: utils._makefs(fstype, name) except (OperationFailed): raise OperationFailed('GINPART00005E') cb('OK', True) def change_type(self, name, type): try: utils.change_part_type(name, type) except OperationFailed as e: wok_log.error("change type for partition failed") raise OperationFailed("GINPART00006E", {'err': e}) return name def delete(self, name): try: utils.delete_part(name) except OperationFailed as e: wok_log.error("delete partition failed") raise OperationFailed("GINPART00007E", {'err': e})
class SoftwareUpdateProgressModel(object): def __init__(self, **kargs): self.task = TaskModel(**kargs) self.objstore = kargs['objstore'] def lookup(self, *name): try: swupdate = SoftwareUpdate() except: raise OperationFailed('KCHPKGUPD0004E') taskid = add_task('/plugins/kimchi/host/swupdateprogress', swupdate.tailUpdateLogs, self.objstore, None) return self.task.lookup(taskid)
class SoftwareUpdateProgressModel(object): def __init__(self, **kargs): self.task = TaskModel(**kargs) self.objstore = kargs['objstore'] def lookup(self, *name): try: swupdate = SoftwareUpdate() except: raise OperationFailed('GGBPKGUPD0004E') taskid = add_task('/plugins/gingerbase/host/swupdateprogress', swupdate.tailUpdateLogs, self.objstore, None) return self.task.lookup(taskid)
class CIOIgnoreModel(object): """ model class for ignore list """ def __init__(self, **kargs): self.objstore = kargs.get('objstore') self.task = TaskModel(**kargs) def lookup(self, name): """ method to retrieve device IDs in ignore list :return: returns dictionary with key as 'ignored_devices and value as list of device ids(single device id or range of device ids) """ devices = {} command = [CIO_IGNORE, '-l'] out, err, rc = run_command(command) if rc: wok_log.error('failed to retrieve ignore list ' 'using \'cio_ignore -l\'. Error: %s' % err.strip()) raise OperationFailed('GS390XIOIG001E', {'error': err.strip()}) devices[IGNORED_DEVICES] = _parse_ignore_output(out) wok_log.info('Successfully retrieved devices from ignore list') return devices def remove(self, name, devices): """ Remove one or more device IDs from blacklist. :param devices: List of devices :return: task json """ # Check the type of devices. if not (isinstance(devices, list)): wok_log.error('Input is not of type list. Input: %s' % devices) raise InvalidParameter( 'GS390XINVINPUT', {'reason': 'input must ' 'be of type' ' list'}) wok_log.info('Removing devices %s from ignore list' % devices) taskid = add_task('/plugins/gingers390x/cioignore/remove', _remove_devices, self.objstore, devices) return self.task.lookup(taskid)
class LogicalVolumesModel(object): """ Model class for listing and creating a LV """ def __init__(self, **kargs): self.objstore = kargs['objstore'] self.task = TaskModel(**kargs) def create(self, params): if 'vg_name' not in params: raise MissingParameter('GINLV00001E') vgname = params['vg_name'] if 'size' not in params: raise MissingParameter('GINLV00002E') taskid = AsyncTask(u'/lvs/vg_name/%s' % (vgname), self._create_linear_task, params).id return self.task.lookup(taskid) def _create_linear_task(self, cb, params): vgname = params['vg_name'] size = params['size'] cb('entering task to create lv') try: cb('create lv') utils._create_lv(vgname, size) except (OperationFailed) as e: raise OperationFailed('GINLV00003E', {'err': e.message}) cb('OK', True) def get_list(self): try: lv_names = utils._get_lv_list() except OperationFailed as e: raise OperationFailed("GINLV00004E", {'err': e.message}) return lv_names
class CIOIgnoreModel(object): """ model class for ignore list """ def __init__(self, **kargs): self.objstore = kargs.get('objstore') self.task = TaskModel(**kargs) def lookup(self, name): """ method to retrieve device IDs in ignore list :return: returns dictionary with key as 'ignored_devices and value as list of device ids(single device id or range of device ids) """ devices = {} command = [CIO_IGNORE, '-l'] out, err, rc = run_command(command) if rc: wok_log.error('failed to retrieve ignore list ' 'using \'cio_ignore -l\'. Error: %s' % err.strip()) raise OperationFailed('GS390XIOIG001E', {'error': err.strip()}) devices[IGNORED_DEVICES] = _parse_ignore_output(out) wok_log.info('Successfully retrieved devices from ignore list') return devices def remove(self, name, devices): """ Remove one or more device IDs from blacklist. :param devices: List of devices :return: task json """ # Check the type of devices. if not (isinstance(devices, list)): wok_log.error('Input is not of type list. Input: %s' % devices) raise InvalidParameter('GS390XINVINPUT', {'reason': 'input must ' 'be of type' ' list'}) wok_log.info('Create task for removing devices \"% s\" from ignore' 'list' % devices) taskid = AsyncTask('/plugins/gingers390x/cioignore/remove', _remove_devices, devices).id return self.task.lookup(taskid)
class VolumeGroupsModel(object): """ Model class for listing and creating a VG """ def __init__(self, **kargs): self.objstore = kargs['objstore'] self.task = TaskModel(**kargs) def create(self, params): if 'vg_name' not in params: raise MissingParameter("GINVG00013E") vgname = params['vg_name'] if "pv_paths" not in params: raise MissingParameter("GINVG00014E") taskid = add_task(u'/vgs/vg_name/%s' % (vgname), self._create_task, self.objstore, params) return self.task.lookup(taskid) def _create_task(self, cb, params): vgname = params['vg_name'] pv_paths = params['pv_paths'] cb('entering task to create vg') try: cb('create vg') utils._create_vg(vgname, pv_paths) except (OperationFailed), e: wok_log.error('failed to create vg') raise OperationFailed('GINVG00001E', {'vgname': vgname, 'err': e.message}) cb('OK', True)
class LUNScanModel(object): """ model class for ignore list """ def __init__(self, **kargs): self.objstore = kargs.get('objstore') self.task = TaskModel(**kargs) def lookup(self, name): """ Get the status of LUN scanning :return: returns dictionary with key as 'lunscan' and value as boolean """ return utils.is_lun_scan_enabled() def enable(self, name): """ Enable LUN scanning """ utils.enable_lun_scan("1") return utils.is_lun_scan_enabled() def disable(self, name): """ Disable LUN scanning """ utils.enable_lun_scan("0") return utils.is_lun_scan_enabled() def trigger(self, name): """ Trigger LUN scanning """ taskid = AsyncTask('/plugins/gingers390/lunscan/trigger', utils.trigger_lun_scan, {}).id return self.task.lookup(taskid)
class LUNScanModel(object): """ model class for ignore list """ def __init__(self, **kargs): self.objstore = kargs.get('objstore') self.task = TaskModel(**kargs) def lookup(self, name): """ Get the status of LUN scanning :return: returns dictionary with key as 'lunscan' and value as boolean """ return utils.is_lun_scan_enabled() def enable(self, name): """ Enable LUN scanning """ utils.enable_lun_scan("1") return utils.is_lun_scan_enabled() def disable(self, name): """ Disable LUN scanning """ utils.enable_lun_scan("0") return utils.is_lun_scan_enabled() def trigger(self, name): """ Trigger LUN scanning """ taskid = add_task('/plugins/gingers390/lunscan/trigger', utils.trigger_lun_scan, self.objstore, {}) return self.task.lookup(taskid)
class LogicalVolumesModel(object): """ Model class for listing and creating a LV """ def __init__(self, **kargs): self.objstore = kargs['objstore'] self.task = TaskModel(**kargs) def create(self, params): if 'vg_name' not in params: raise MissingParameter('GINLV00001E') vgname = params['vg_name'] if 'size' not in params: raise MissingParameter('GINLV00002E') taskid = AsyncTask(u'/lvs/vg_name/%s' % (vgname), self._create_linear_task, params).id return self.task.lookup(taskid) def _create_linear_task(self, cb, params): vgname = params['vg_name'] size = params['size'] cb('entering task to create lv') try: cb('create lv') utils._create_lv(vgname, size) except (OperationFailed), e: raise OperationFailed('GINLV00003E', {'err': e.message}) cb('OK', True)
class HostModel(object): def __init__(self, **kargs): # self.conn = kargs['conn'] self.objstore = kargs['objstore'] self.task = TaskModel(**kargs) self.lscpu = LsCpu() def _get_ppc_cpu_model(self): """ method to get cpu_model for ppc architecture """ res = {} with open(PROC_CPUINFO) as f: for line in f: # Parse CPU, CPU's revision and CPU's clock information for key in ['cpu', 'revision', 'clock']: if key in line: info = line.split(':')[1].strip() if key == 'clock': value = float(info.split('MHz')[0].strip()) / 1000 else: value = info.split('(')[0].strip() res[key] = value # Power machines show, for each cpu/core, a block with # all cpu information. Here we control the scan of the # necessary information (1st block provides # everything), skipping the function when find all # information. if len(res.keys()) == 3: return '%(cpu)s (%(revision)s) @ %(clock)s GHz\ ' % res return '' def _get_x86_cpu_model(self): """ method to get cpu_model for x86 architecture """ try: with open(PROC_CPUINFO) as f: for line in f: if 'model name' in line: return line.split(':')[1].strip() break except Exception as e: wok_log.error('Failed to retrive cpu_model for ' '%s. Error: %s', ARCH, e.__str__()) return '' def _get_s390x_host_info(self): """ method to get additional host details specific to s390x architecture :return: dictionary """ host_info = {} host_info['cpus'] = self._get_cpus() host_info['cpus']['dedicated'] = 0 host_info['cpus']['shared'] = 0 host_info['cpu_model'] = '' host_info['virtualization'] = {} s390x_sysinfo = self._get_s390x_sysinfo() if 'manufacturer' in s390x_sysinfo.keys(): host_info['cpu_model'] = s390x_sysinfo['manufacturer'] if 'type' in s390x_sysinfo.keys(): host_info['cpu_model'] = \ host_info['cpu_model'] + '/' + s390x_sysinfo['type'] if 'model' in s390x_sysinfo.keys(): host_info['cpu_model'] = \ host_info['cpu_model'] + '/' + s390x_sysinfo['model'] if CPUS_DEDICATED in s390x_sysinfo.keys(): host_info['cpus']['dedicated'] = s390x_sysinfo[CPUS_DEDICATED] if CPUS_SHARED in s390x_sysinfo.keys(): host_info['cpus']['shared'] = s390x_sysinfo[CPUS_SHARED] host_info['virtualization']['hypervisor'] = \ self.lscpu.get_hypervisor() host_info['virtualization']['hypervisor_vendor'] = \ self.lscpu.get_hypervisor_vendor() host_info['virtualization'][LPAR_NAME] = '' host_info['virtualization'][LPAR_NUMBER] = '' if LPAR_NAME in s390x_sysinfo.keys(): host_info['virtualization'][LPAR_NAME] = s390x_sysinfo[LPAR_NAME] if LPAR_NUMBER in s390x_sysinfo.keys(): host_info['virtualization'][LPAR_NUMBER] = \ s390x_sysinfo[LPAR_NUMBER] return host_info def _get_s390x_sysinfo(self): """ This method retrieves following system information for s390 architecture * manufacturer: Manufacturer of host machine * type: Type of the host machine * model:Model of host machine * LPAR_NUMBER: LPAR Number of host * LPAR_NAME: Name of host LPAR * CPUS_DEDICATED: LPAR CPUs Dedicated * CPUS_SHARED: LPAR CPUs Shared :param self: object of the class self :return: dictionary with following keys - 'manufacturer', 'type', 'model', CPUS_SHARED, CPUS_DEDICATED, LPAR_NUMBER, LPAR_NAME """ s390x_sysinfo = {} try: with open(PROC_SYSINFO) as f: for line in f: if ':' in line and (len(line.split(':')) == 2): info = line.split(':') if info[0] == 'Model' and (len(info[1].split()) == 2): s390x_sysinfo['model'] = \ info[1].split()[0].strip() +\ ' ' + info[1].split()[1].strip() elif info[0] == 'Manufacturer': s390x_sysinfo['manufacturer'] = info[1].strip() elif info[0] == 'Type': s390x_sysinfo['type'] = info[1].strip() elif info[0] == 'LPAR Number': s390x_sysinfo[LPAR_NUMBER] = int(info[1].strip()) elif info[0] == 'LPAR Name': s390x_sysinfo[LPAR_NAME] = info[1].strip() elif info[0] == 'LPAR CPUs Dedicated': s390x_sysinfo[CPUS_DEDICATED] =\ int(info[1].strip()) elif info[0] == 'LPAR CPUs Shared': s390x_sysinfo[CPUS_SHARED] = int(info[1].strip()) except Exception as e: wok_log.error( 'Failed to retrieve information from %s file. ' 'Error: %s', PROC_SYSINFO, e.__str__()) return s390x_sysinfo def _get_memory(self): """ method to retrieve memory information for all architecture :return: dictionary with keys "online" and "offline" """ memory = {} online_memory = 0 offline_memory = 0 if ARCH.startswith('s390x'): online_mem_pat = r'^Total online memory :\s+(\d+)\s+MB$' offline_mem_pat = r'^Total offline memory:\s+(\d+)\s+MB$' out, err, rc = run_command(LSMEM) # output of lsmem in s390x architecture is expected to be # Address Range Size (MB) State\ # Removable Device # ========================================================\ # ======================= # 0x0000000000000000-0x000000000fffffff 256 online\ # no 0 # 0x0000000010000000-0x000000002fffffff 512 online\ # yes 1-2 # 0x0000000030000000-0x000000007fffffff 1280 online\ # no 3-7 # 0x0000000080000000-0x00000000ffffffff 2048 offline\ # - 8-15 # # Memory device size : 256 MB # Memory block size : 256 MB # Total online memory : 2048 MB # Total offline memory: 2048 MB if not rc: online_mem =\ re.search(online_mem_pat, out.strip(), re.M | re.I) offline_mem =\ re.search(offline_mem_pat, out.strip(), re.M | re.I) if online_mem and len(online_mem.groups()) == 1: online_memory = int(online_mem.group(1)) * 1024 * 1024 # converting MB to bytes # lsmem always returns memory in MB if offline_mem and len(offline_mem.groups()) == 1: offline_memory = int(offline_mem.group(1)) * 1024 * 1024 else: wok_log.error('Failed to retrieve memory information with' ' command %s. Error: %s' % (LSMEM, err)) else: if hasattr(psutil, 'phymem_usage'): online_memory = psutil.phymem_usage().total elif hasattr(psutil, 'virtual_memory'): online_memory = psutil.virtual_memory().total memory['online'] = online_memory memory['offline'] = offline_memory return memory def _get_cpus(self): """ method to retrieve online cpus count and offline cpus count for all architecture :return: dictionary with keys "online" and "offline" """ cpus = {} total_cpus = int(self.lscpu.get_total_cpus()) # psutil is unstable on how to get the number of # cpus, different versions call it differently online_cpus = 0 if hasattr(psutil, 'cpu_count'): online_cpus = psutil.cpu_count() elif hasattr(psutil, 'NUM_CPUS'): online_cpus = psutil.NUM_CPUS elif hasattr(psutil, '_psplatform'): for method_name in ['_get_num_cpus', 'get_num_cpus']: method = getattr(psutil._psplatform, method_name, None) if method is not None: online_cpus = method() break if online_cpus > 0: offline_cpus = 0 if total_cpus > online_cpus: offline_cpus = total_cpus - online_cpus else: online_cpus = 'unknown' offline_cpus = 'unknown' cpus['online'] = online_cpus cpus['offline'] = offline_cpus return cpus def _get_base_info(self): """ method to retrieve common host information for all architectures :return: dictionary with keys 'os_distro', 'os_version', 'os_codename' 'architecture', 'host', memory """ common_info = {} # Include IBM PowerKVM name to supported distro names _sup_distros = platform._supported_dists + ('ibm_powerkvm', ) # 'fedora' '17' 'Beefy Miracle' distro, version, codename = platform.linux_distribution( supported_dists=_sup_distros) common_info['os_distro'] = distro common_info['os_version'] = version common_info['os_codename'] = codename common_info['architecture'] = ARCH common_info['host'] = platform.node() common_info['memory'] = self._get_memory() common_info['cpu_threads'] = {} common_info['cpu_threads']['sockets'] = self.lscpu.get_sockets() common_info['cpu_threads']['cores_per_socket'] = \ self.lscpu.get_cores_per_socket() common_info['cpu_threads']['threads_per_core'] = \ self.lscpu.get_threads_per_core() if ARCH.startswith('s390x'): common_info['cpu_threads']['books'] = self.lscpu.get_books() return common_info def lookup(self, *name): """ method to get basic information for host """ host_info = self._get_base_info() if ARCH.startswith('s390x'): host_info.update(self._get_s390x_host_info()) elif ARCH.startswith('ppc'): host_info['cpus'] = self._get_cpus() host_info['cpu_model'] = self._get_ppc_cpu_model() else: host_info['cpus'] = self._get_cpus() host_info['cpu_model'] = self._get_x86_cpu_model() return host_info def swupdate(self, *name): try: swupdate = SoftwareUpdate() except Exception: raise OperationFailed('GGBPKGUPD0004E') pkgs = swupdate.getNumOfUpdates() if pkgs == 0: wok_log.debug(messages['GGBPKGUPD0001E']) return {'message': messages['GGBPKGUPD0001E']} wok_log.debug('Host is going to be updated.') taskid = AsyncTask('/plugins/gingerbase/host/swupdate', swupdate.doUpdate).id return self.task.lookup(taskid) def shutdown(self, args=None): # Check for running vms before shutdown running_vms = self.get_vmlist_bystate('running') if len(running_vms) > 0: raise OperationFailed('GGBHOST0001E') wok_log.info('Host is going to shutdown.') os.system('shutdown -h now') def reboot(self, args=None): # Check for running vms before reboot running_vms = self.get_vmlist_bystate('running') if len(running_vms) > 0: raise OperationFailed('GGBHOST0002E') wok_log.info('Host is going to reboot.') os.system('reboot') def get_vmlist_bystate(self, state='running'): try: libvirt_mod = __import__('libvirt') except Exception as e: wok_log.info('Unable to import libvirt module. Details:', e.message) # Ignore any error and assume there is no vm running in the host return [] libvirtd_running = ['systemctl', 'is-active', 'libvirtd', '--quiet'] _, _, rcode = run_command(libvirtd_running, silent=True) if rcode != 0: return [] try: conn = libvirt_mod.open(None) return [ dom.name().decode('utf-8') for dom in conn.listAllDomains(0) if (DOM_STATE_MAP[dom.info()[0]] == state) ] except Exception as e: wok_log.info( 'Unable to get virtual machines information. ' 'Details:', e.message) raise OperationFailed('GGBHOST0003E')
class VolumeGroupsTests(unittest.TestCase): def setUp(self): objstore_loc = config.get_object_store() + '_ginger' self._objstore = ObjectStore(objstore_loc) self.task_model = TaskModel(objstore=self._objstore) def test_get_vg_list(self): vgs = vol_group.VolumeGroupsModel(objstore=self._objstore) vgs_list = vgs.get_list() self.assertGreaterEqual(len(vgs_list), 0) def test_create_vg_missing_name(self): vgs = vol_group.VolumeGroupsModel(objstore=self._objstore) pvpaths = ['/dev/sdb1'] params = {'pv_paths': pvpaths} self.assertRaises(MissingParameter, vgs.create, params) def test_create_vg_missing_pvpaths(self): vgs = vol_group.VolumeGroupsModel(objstore=self._objstore) vgname = 'testvg' params = {'vg_name': vgname} self.assertRaises(MissingParameter, vgs.create, params) @mock.patch('wok.plugins.ginger.model.utils._create_vg', autospec=True) def test_create_vg(self, mock_create_vg): vgs = vol_group.VolumeGroupsModel(objstore=self._objstore) vgname = 'testvg' pvpaths = ['/dev/sdb1'] params = {'vg_name': vgname, 'pv_paths': pvpaths} task_obj = vgs.create(params) self.task_model.wait(task_obj.get('id')) mock_create_vg.assert_called_with(vgname, pvpaths) @mock.patch('wok.plugins.ginger.model.utils._extend_vg', autospec=True) def test_extend_vg(self, mock_extend_vg): vg = vol_group.VolumeGroupModel(objstore=self._objstore) vgname = 'testvg' pvpaths = ['/dev/sdb2'] vg.extend(vgname, pvpaths) mock_extend_vg.assert_called_with(vgname, pvpaths) @mock.patch('wok.plugins.ginger.model.utils._reduce_vg', autospec=True) def test_reduce_vg(self, mock_reduce_vg): vg = vol_group.VolumeGroupModel(objstore=self._objstore) vgname = 'testvg' pvpaths = ['/dev/sdb2'] vg.reduce(vgname, pvpaths) mock_reduce_vg.assert_called_with(vgname, pvpaths) @mock.patch('wok.plugins.ginger.model.utils._remove_vg', autospec=True) @mock.patch('wok.plugins.ginger.model.utils.get_lvm_version') @mock.patch('wok.plugins.ginger.model.vol_group.VolumeGroupModel.lookup') def test_delete_vg(self, mock_vg_lookup, mock_lvm_version, mock_delete_vg): mock_lvm_version.return_value = "2.02.98" mock_vg_lookup.return_value = {'vgName': 'testvg', 'Cur LV': 0} vg = vol_group.VolumeGroupModel(objstore=self._objstore) vgname = 'testvg' vg.delete(vgname) mock_delete_vg.assert_called_with(vgname) def test_parse_lvm_version(self): lvm_version_input = " 2.02.98(2) (2012-10-15)" lvm_version = utils._parse_lvm_version(lvm_version_input) if lvm_version != "2.02.98": self.fail() def test_parse_lvm_version_exception(self): lvm_version_input = " wrong version string" self.assertRaises( OperationFailed, utils._parse_lvm_version, lvm_version_input)
class StorageVolumesModel(object): def __init__(self, **kargs): self.conn = kargs['conn'] self.objstore = kargs['objstore'] self.task = TaskModel(**kargs) def create(self, pool_name, params): vol_source = ['url', 'capacity'] name = params.get('name') index_list = list(i for i in range(len(vol_source)) if vol_source[i] in params) if len(index_list) != 1: raise InvalidParameter("KCHVOL0018E", {'param': ",".join(vol_source)}) create_param = vol_source[index_list[0]] # Verify if the URL is valid if create_param == 'url': url = params['url'] try: urllib2.urlopen(url).close() except: raise InvalidParameter('KCHVOL0022E', {'url': url}) all_vol_names = self.get_list(pool_name) if name is None: # the methods listed in 'REQUIRE_NAME_PARAMS' cannot have # 'name' == None if create_param in REQUIRE_NAME_PARAMS: raise InvalidParameter('KCHVOL0016E') # if 'name' is omitted - except for the methods listed in # 'REQUIRE_NAME_PARAMS' - the default volume name will be the # file/URL basename. if create_param == 'url': name = os.path.basename(params['url']) else: name = 'upload-%s' % int(time.time()) name = get_unique_file_name(all_vol_names, name) params['name'] = name try: create_func = getattr(self, '_create_volume_with_%s' % create_param) except AttributeError: raise InvalidParameter("KCHVOL0019E", {'param': create_param}) pool_info = StoragePoolModel(conn=self.conn, objstore=self.objstore).lookup(pool_name) if pool_info['type'] in READONLY_POOL_TYPE: raise InvalidParameter("KCHVOL0012E", {'type': pool_info['type']}) if pool_info['state'] == 'inactive': raise InvalidParameter('KCHVOL0003E', {'pool': pool_name, 'volume': name}) if name in all_vol_names: raise InvalidParameter('KCHVOL0001E', {'name': name}) params['pool'] = pool_name targeturi = '/plugins/kimchi/storagepools/%s/storagevolumes/%s' \ % (pool_name, name) taskid = add_task(targeturi, create_func, self.objstore, params) return self.task.lookup(taskid) def _create_volume_with_capacity(self, cb, params): pool_name = params.pop('pool') vol_xml = """ <volume> <name>%(name)s</name> <allocation unit='bytes'>%(allocation)s</allocation> <capacity unit='bytes'>%(capacity)s</capacity> <source> </source> <target> <format type='%(format)s'/> </target> </volume> """ params.setdefault('allocation', 0) params.setdefault('format', 'qcow2') name = params['name'] try: pool = StoragePoolModel.get_storagepool(pool_name, self.conn) xml = vol_xml % params except KeyError, item: raise MissingParameter("KCHVOL0004E", {'item': str(item), 'volume': name}) try: pool.createXML(xml, 0) except libvirt.libvirtError as e: raise OperationFailed("KCHVOL0007E", {'name': name, 'pool': pool, 'err': e.get_error_message()}) vol_info = StorageVolumeModel(conn=self.conn, objstore=self.objstore).lookup(pool_name, name) vol_path = vol_info['path'] set_disk_used_by(self.objstore, vol_info['path'], []) if params.get('upload', False): upload_volumes[vol_path] = {'lock': threading.Lock(), 'offset': 0, 'cb': cb} cb('ready for upload') else: cb('OK', True)
def __init__(self, **kargs): self.conn = kargs['conn'] self.objstore = kargs['objstore'] self.task = TaskModel(**kargs) self.storagevolumes = StorageVolumesModel(**kargs) self.storagepool = StoragePoolModel(**kargs)
class StorageVolumeModel(object): def __init__(self, **kargs): self.conn = kargs['conn'] self.objstore = kargs['objstore'] self.task = TaskModel(**kargs) self.storagevolumes = StorageVolumesModel(**kargs) self.storagepool = StoragePoolModel(**kargs) @staticmethod def get_storagevolume(poolname, name, conn): pool = StoragePoolModel.get_storagepool(poolname, conn) if not pool.isActive(): raise InvalidOperation("KCHVOL0006E", {'name': pool}) try: return pool.storageVolLookupByName(name.encode("utf-8")) except libvirt.libvirtError as e: if e.get_error_code() == libvirt.VIR_ERR_NO_STORAGE_VOL: raise NotFoundError("KCHVOL0002E", {'name': name, 'pool': poolname}) else: raise def lookup(self, pool, name): vol = StorageVolumeModel.get_storagevolume(pool, name, self.conn) path = vol.path() info = vol.info() xml = vol.XMLDesc(0) try: fmt = xpath_get_text(xml, "/volume/target/format/@type")[0] except IndexError: # Not all types of libvirt storage can provide volume format # infomation. When there is no format information, we assume # it's 'raw'. fmt = 'raw' iso_img = None # 'raw' volumes from 'logical' pools may actually be 'iso'; # libvirt always reports them as 'raw' pool_info = self.storagepool.lookup(pool) if pool_info['type'] == 'logical' and fmt == 'raw': try: iso_img = IsoImage(path) except IsoFormatError: # not 'iso' afterall pass else: fmt = 'iso' # 'raw' volumes can not be valid image disks (e.g. XML, PDF, TXT are # raw files), so it's necessary check the 'content' of them isvalid = True if fmt == 'raw': try: ms = magic.open(magic.NONE) ms.load() if ms.file(path).lower() not in VALID_RAW_CONTENT: isvalid = False ms.close() except UnicodeDecodeError: isvalid = False used_by = get_disk_used_by(self.objstore, self.conn, path) res = dict(type=VOLUME_TYPE_MAP[info[0]], capacity=info[1], allocation=info[2], path=path, used_by=used_by, format=fmt, isvalid=isvalid) if fmt == 'iso': if os.path.islink(path): path = os.path.join(os.path.dirname(path), os.readlink(path)) os_distro = os_version = 'unknown' try: if iso_img is None: iso_img = IsoImage(path) os_distro, os_version = iso_img.probe() bootable = True except IsoFormatError: bootable = False res.update( dict(os_distro=os_distro, os_version=os_version, path=path, bootable=bootable)) return res def wipe(self, pool, name): volume = StorageVolumeModel.get_storagevolume(pool, name, self.conn) try: volume.wipePattern(libvirt.VIR_STORAGE_VOL_WIPE_ALG_ZERO, 0) except libvirt.libvirtError as e: raise OperationFailed("KCHVOL0009E", {'name': name, 'err': e.get_error_message()}) def delete(self, pool, name): pool_info = StoragePoolModel(conn=self.conn, objstore=self.objstore).lookup(pool) if pool_info['type'] in READONLY_POOL_TYPE: raise InvalidParameter("KCHVOL0012E", {'type': pool_info['type']}) volume = StorageVolumeModel.get_storagevolume(pool, name, self.conn) try: volume.delete(0) except libvirt.libvirtError as e: raise OperationFailed("KCHVOL0010E", {'name': name, 'err': e.get_error_message()}) def resize(self, pool, name, size): volume = StorageVolumeModel.get_storagevolume(pool, name, self.conn) # When decreasing the storage volume capacity, the flag # VIR_STORAGE_VOL_RESIZE_SHRINK must be used flags = 0 if volume.info()[1] > size: # FIXME: Even using VIR_STORAGE_VOL_RESIZE_SHRINK flag it is not # possible to decrease the volume capacity due a libvirt bug # For reference: # - https://bugzilla.redhat.com/show_bug.cgi?id=1021802 flags = libvirt.VIR_STORAGE_VOL_RESIZE_SHRINK try: volume.resize(size, flags) except libvirt.libvirtError as e: raise OperationFailed("KCHVOL0011E", {'name': name, 'err': e.get_error_message()}) def clone(self, pool, name, new_pool=None, new_name=None): """Clone a storage volume. Arguments: pool -- The name of the original pool. name -- The name of the original volume. new_pool -- The name of the destination pool (optional). If omitted, the new volume will be created on the same pool as the original one. new_name -- The name of the new volume (optional). If omitted, a new value based on the original volume's name will be used. Return: A Task running the clone operation. """ # the same pool will be used if no pool is specified if new_pool is None: new_pool = pool # a default name based on the original name will be used if no name # is specified if new_name is None: base, ext = os.path.splitext(name) new_name = get_next_clone_name(self.storagevolumes.get_list(pool), base, ext) params = {'pool': pool, 'name': name, 'new_pool': new_pool, 'new_name': new_name} taskid = add_task(u'/plugins/kimchi/storagepools/%s/storagevolumes/%s' % (pool, new_name), self._clone_task, self.objstore, params) return self.task.lookup(taskid) def _clone_task(self, cb, params): """Asynchronous function which performs the clone operation. This function copies all the data inside the original volume into the new one. Arguments: cb -- A callback function to signal the Task's progress. params -- A dict with the following values: "pool": The name of the original pool. "name": The name of the original volume. "new_pool": The name of the destination pool. "new_name": The name of the new volume. """ orig_pool_name = params['pool'] orig_vol_name = params['name'] new_pool_name = params['new_pool'] new_vol_name = params['new_name'] try: cb('setting up volume cloning') orig_vir_vol = StorageVolumeModel.get_storagevolume(orig_pool_name, orig_vol_name, self.conn) orig_vol = self.lookup(orig_pool_name, orig_vol_name) new_vir_pool = StoragePoolModel.get_storagepool(new_pool_name, self.conn) cb('building volume XML') root_elem = E.volume() root_elem.append(E.name(new_vol_name)) root_elem.append(E.capacity(unicode(orig_vol['capacity']), unit='bytes')) target_elem = E.target() target_elem.append(E.format(type=orig_vol['format'])) root_elem.append(target_elem) new_vol_xml = ET.tostring(root_elem, encoding='utf-8', pretty_print=True) cb('cloning volume') new_vir_pool.createXMLFrom(new_vol_xml, orig_vir_vol, 0) except (InvalidOperation, NotFoundError, libvirt.libvirtError), e: raise OperationFailed('KCHVOL0023E', {'name': orig_vol_name, 'pool': orig_pool_name, 'err': e.get_error_message()}) new_vol = self.lookup(new_pool_name, new_vol_name) cb('adding volume to the object store') set_disk_used_by(self.objstore, new_vol['path'], []) cb('OK', True)
def __init__(self, **kargs): self._objstore = kargs['objstore'] self.task = TaskModel(**kargs) self._create_archive_dir()
def __init__(self, **kargs): self.objstore = kargs['objstore'] self.task = TaskModel(**kargs)
def setUp(self): objstore_loc = config.get_object_store() + '_ginger' self._objstore = ObjectStore(objstore_loc) self.task_model = TaskModel(objstore=self._objstore)
def setUp(self): objstore_loc = config.get_object_store() + '_ginger' self._objstore = ObjectStore(objstore_loc) self.task = TaskModel(objstore=self._objstore)
def __init__(self, **kargs): self.conn = kargs['conn'] self.objstore = kargs['objstore'] self.task = TaskModel(**kargs) self.vmstorages = VMStoragesModel(**kargs) self.vmstorage = VMStorageModel(**kargs)
def __init__(self, **kargs): self.conn = kargs['conn'] self.objstore = kargs['objstore'] self.caps = CapabilitiesModel(**kargs) self.task = TaskModel(**kargs)
class VMHostDevModel(object): def __init__(self, **kargs): self.conn = kargs['conn'] self.objstore = kargs['objstore'] self.task = TaskModel(**kargs) def lookup(self, vmid, dev_name): dom = VMModel.get_vm(vmid, self.conn) xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: hostdev = root.devices.hostdev except AttributeError: raise NotFoundError('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name }) dev_model = DeviceModel(conn=self.conn) for e in hostdev: deduced_name = DeviceModel.deduce_dev_name(e, self.conn) if deduced_name == dev_name: dev_info = dev_model.lookup(dev_name) return { 'name': dev_name, 'type': e.attrib['type'], 'product': dev_info.get('product', None), 'vendor': dev_info.get('vendor', None), 'multifunction': dev_info.get('multifunction', None), 'vga3d': dev_info.get('vga3d', None) } raise NotFoundError('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name }) def delete(self, vmid, dev_name): dom = VMModel.get_vm(vmid, self.conn) xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: hostdev = root.devices.hostdev except AttributeError: raise NotFoundError('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name }) task_params = { 'vmid': vmid, 'dev_name': dev_name, 'dom': dom, 'hostdev': hostdev } task_uri = u'/plugins/kimchi/vms/%s/hostdevs/%s' % \ (VMModel.get_vm(vmid, self.conn).name(), dev_name) taskid = add_task(task_uri, self._detach_device, self.objstore, task_params) return self.task.lookup(taskid) def _detach_device(self, cb, params): cb('Detaching device.') vmid = params['vmid'] dev_name = params['dev_name'] dom = params['dom'] hostdev = params['hostdev'] pci_devs = [(DeviceModel.deduce_dev_name(e, self.conn), e) for e in hostdev if e.attrib['type'] == 'pci'] dev_model = DeviceModel(conn=self.conn) dev_info = dev_model.lookup(dev_name) is_3D_device = dev_model.is_device_3D_controller(dev_info) if is_3D_device and DOM_STATE_MAP[dom.info()[0]] != "shutoff": raise InvalidOperation('KCHVMHDEV0006E', {'name': dev_info['name']}) if self._hotunplug_multifunction_pci(dom, hostdev, dev_name): if is_3D_device: cb('Updating MMIO from VM...') devsmodel = VMHostDevsModel(conn=self.conn) devsmodel.update_mmio_guest(vmid, False) cb('OK', True) return for e in hostdev: if DeviceModel.deduce_dev_name(e, self.conn) == dev_name: xmlstr = etree.tostring(e) cb('Detaching device from VM...') dom.detachDeviceFlags(xmlstr, get_vm_config_flag(dom, mode='all')) if e.attrib['type'] == 'pci': cb('Deleting affected PCI devices...') self._delete_affected_pci_devices(dom, dev_name, pci_devs) if is_3D_device: cb('Updating MMIO from VM...') devsmodel = VMHostDevsModel(conn=self.conn) devsmodel.update_mmio_guest(vmid, False) break else: raise NotFoundError('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name }) cb('OK', True) def _get_devices_same_addr(self, hostdev, domain, bus, slot): devices = [] for device in hostdev: if device.attrib['type'] != 'pci': continue address = device.source.address if int(address.attrib['domain'], 16) != domain or \ int(address.attrib['bus'], 16) != bus or \ int(address.attrib['slot'], 16) != slot: continue devices.append(etree.tostring(device)) return devices def _hotunplug_multifunction_pci(self, dom, hostdev, dev_name): domain, bus, slot, _ = dev_name.split('_')[1:] # get all devices attached to the guest in the same domain+bus+slot # that the one we are going to detach because they must be detached # together devices = self._get_devices_same_addr(hostdev, int(domain, 16), int(bus, 16), int(slot, 16)) if len(devices) <= 1: return False devices_xml = '<devices>%s</devices>' % ''.join(devices) dom.detachDeviceFlags(devices_xml, get_vm_config_flag(dom, mode='all')) return True def _delete_affected_pci_devices(self, dom, dev_name, pci_devs): dev_model = DeviceModel(conn=self.conn) try: dev_model.lookup(dev_name) except NotFoundError: return affected_names = set( DevicesModel(conn=self.conn).get_list( _passthrough_affected_by=dev_name)) for pci_name, e in pci_devs: if pci_name in affected_names: xmlstr = etree.tostring(e) dom.detachDeviceFlags(xmlstr, get_vm_config_flag(dom, mode='all'))
def __init__(self, **kargs): # self.conn = kargs['conn'] self.objstore = kargs['objstore'] self.task = TaskModel(**kargs) self.lscpu = LsCpu()
class ArchivesModel(object): _objstore_type = 'ginger_backup_archive' _archive_dir = os.path.join(PluginPaths('ginger').state_dir, 'ginger_backups') def __init__(self, **kargs): self._objstore = kargs['objstore'] self.task = TaskModel(**kargs) self._create_archive_dir() @classmethod def _create_archive_dir(cls): try: os.makedirs(cls._archive_dir) except OSError as e: # It's OK if archive_dir already exists if e.errno != errno.EEXIST: wok_log.error('Error creating archive dir %s: %s', cls._archive_dir, e) raise OperationFailed('GINHBK0003E', {'dir': cls._archive_dir}) @property def _default_include(self): # This function builds a new copy of the list for each invocation, # so that the caller can modify the returned list as wish without # worrying about changing the original reference. return list(cherrypy.request.app.config['backup']['default_include']) @property def _default_exclude(self): # See _default_include() comments for explanation. return list(cherrypy.request.app.config['backup']['default_exclude']) def _create_archive(self, params): error = None try: params['file'] = _tar_create_archive(self._archive_dir, params['identity'], params['include'], params['exclude']) params['checksum'] = {'algorithm': 'sha256', 'value': _sha256sum(params['file'])} with self._objstore as session: session.store(self._objstore_type, params['identity'], params) except InvalidOperation: raise except OperationFailed: raise except Exception as e: error = e reason = 'GINHBK0009E' if error is not None: msg = 'Error creating archive %s: %s' % (params['identity'], error.message) wok_log.error(msg) try: with self._objstore as session: session.delete(self._objstore_type, params['identity'], ignore_missing=True) except Exception as e_session: wok_log.error('Error cleaning archive meta data %s. ' 'Error: %s', params['identity'], e_session) if params['file'] != '': try: os.unlink(params['file']) except Exception as e_file: wok_log.error('Error cleaning archive file %s. ' 'Error: %s', params['file'], e_file) raise OperationFailed(reason, {'identity': params['identity']}) def create(self, params): uuid_uuid4 = uuid.uuid4() if isinstance(uuid_uuid4, unicode): uuid_uuid4 = uuid_uuid4.encode('utf-8') archive_id = str(uuid_uuid4) stamp = int(time.mktime(time.localtime())) # Though formally we ask front-end to not send "include" at all when # it's empty, but in implementation we try to be tolerant. # Front-end can also send [] to indicate the "include" is empty. include = params.get('include') exclude = params.get('exclude', []) if not include: include = self._default_include if not exclude: exclude = self._default_exclude ar_params = {'identity': archive_id, 'include': include, 'exclude': exclude, 'description': params.get('description', ''), 'checksum': {}, 'timestamp': stamp, 'file': ''} taskid = AsyncTask(u'/backup/create/%s' % (archive_id), self._create_task, ar_params).id return self.task.lookup(taskid) def _create_task(self, cb, params): cb('entering task to create config backup') try: self._create_archive(params) cb('OK', True) except (InvalidOperation) as e: cb(e.message, False) except (OperationFailed) as e: cb(e.message, False) raise OperationFailed('GINHBK0011E', {'params': 'params', 'err': e.message}) def _session_get_list(self, session): # Assume session is already locked. return session.get_list(self._objstore_type, sort_key='timestamp') def get_list(self): with self._objstore as session: files = [x.split('.')[0] for x in os.listdir(self._archive_dir)] for db_file in self._session_get_list(session): if db_file not in files: session.delete(ArchivesModel._objstore_type, db_file) return self._session_get_list(session)
class HostModel(object): def __init__(self, **kargs): # self.conn = kargs['conn'] self.objstore = kargs['objstore'] self.task = TaskModel(**kargs) self.host_info = self._get_host_info() def _get_ppc_cpu_info(self): res = {} with open('/proc/cpuinfo') as f: for line in f.xreadlines(): # Parse CPU, CPU's revision and CPU's clock information for key in ['cpu', 'revision', 'clock']: if key in line: info = line.split(':')[1].strip() if key == 'clock': value = float(info.split('MHz')[0].strip()) / 1000 else: value = info.split('(')[0].strip() res[key] = value # Power machines show, for each cpu/core, a block with # all cpu information. Here we control the scan of the # necessary information (1st block provides # everything), skipping the function when find all # information. if len(res.keys()) == 3: return "%(cpu)s (%(revision)s) @ %(clock)s GHz\ " % res return "" def _get_host_info(self): res = {} if platform.machine().startswith('ppc'): res['cpu_model'] = self._get_ppc_cpu_info() else: with open('/proc/cpuinfo') as f: for line in f.xreadlines(): if "model name" in line: res['cpu_model'] = line.split(':')[1].strip() break res['cpus'] = 0 res['memory'] = 0L # Include IBM PowerKVM name to supported distro names _sup_distros = platform._supported_dists + ('ibm_powerkvm',) # 'fedora' '17' 'Beefy Miracle' distro, version, codename = platform.linux_distribution( supported_dists=_sup_distros) res['os_distro'] = distro res['os_version'] = version res['os_codename'] = unicode(codename, "utf-8") return res def lookup(self, *name): cpus = psutil.NUM_CPUS # psutil is unstable on how to get the number of # cpus, different versions call it differently if hasattr(psutil, 'cpu_count'): cpus = psutil.cpu_count() elif hasattr(psutil, 'NUM_CPUS'): cpus = psutil.NUM_CPUS elif hasattr(psutil, '_psplatform'): for method_name in ['_get_num_cpus', 'get_num_cpus']: method = getattr(psutil._psplatform, method_name, None) if method is not None: cpus = method() break self.host_info['cpus'] = cpus if hasattr(psutil, 'phymem_usage'): self.host_info['memory'] = psutil.phymem_usage().total elif hasattr(psutil, 'virtual_memory'): self.host_info['memory'] = psutil.virtual_memory().total return self.host_info def swupdate(self, *name): try: swupdate = SoftwareUpdate() except: raise OperationFailed('GGBPKGUPD0004E') pkgs = swupdate.getNumOfUpdates() if pkgs == 0: raise OperationFailed('GGBPKGUPD0001E') wok_log.debug('Host is going to be updated.') taskid = add_task('/plugins/gingerbase/host/swupdate', swupdate.doUpdate, self.objstore, None) return self.task.lookup(taskid) def shutdown(self, args=None): # Check for running vms before shutdown running_vms = self.get_vmlist_bystate('running') if len(running_vms) > 0: raise OperationFailed("GGBHOST0001E") wok_log.info('Host is going to shutdown.') os.system('shutdown -h now') def reboot(self, args=None): # Check for running vms before reboot running_vms = self.get_vmlist_bystate('running') if len(running_vms) > 0: raise OperationFailed("GGBHOST0002E") wok_log.info('Host is going to reboot.') os.system('reboot') def get_vmlist_bystate(self, state='running'): try: libvirt_mod = __import__('libvirt') except Exception, e: wok_log.info("Unable to import libvirt module. Details:", e.message) # Ignore any error and assume there is no vm running in the host return [] try: conn = libvirt_mod.open(None) return [dom.name().decode('utf-8') for dom in conn.listAllDomains(0) if (DOM_STATE_MAP[dom.info()[0]] == state)] except Exception, e: wok_log.info("Unable to get virtual machines information. " "Details:", e.message) raise OperationFailed("GGBHOST0003E")
class VMHostDevModel(object): def __init__(self, **kargs): self.conn = kargs['conn'] self.objstore = kargs['objstore'] self.events = kargs['eventsloop'] self.task = TaskModel(**kargs) self.devs_model = DevicesModel(**kargs) self.dev_model = DeviceModel(**kargs) self._cb = None self.events.registerDetachDevicesEvent( self.conn, self._event_devices, self) def lookup(self, vmid, dev_name): dom = VMModel.get_vm(vmid, self.conn) xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: hostdev = root.devices.hostdev except AttributeError: raise NotFoundError('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name}) for e in hostdev: deduced_name = DeviceModel.deduce_dev_name(e, self.conn) if deduced_name == dev_name: dev_info = self.dev_model.lookup(dev_name) return { 'name': dev_name, 'type': e.attrib['type'], 'product': dev_info.get('product', None), 'vendor': dev_info.get('vendor', None), 'multifunction': dev_info.get('multifunction', None), 'vga3d': dev_info.get('vga3d', None), } raise NotFoundError('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name}) def delete(self, vmid, dev_name): dom = VMModel.get_vm(vmid, self.conn) xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: hostdev = root.devices.hostdev except AttributeError: raise NotFoundError('KCHVMHDEV0001E', { 'vmid': vmid, 'dev_name': dev_name}) task_params = { 'vmid': vmid, 'dev_name': dev_name, 'dom': dom, 'hostdev': hostdev, 'lock': threading.RLock(), } task_uri = u'/plugins/kimchi/vms/%s/hostdevs/%s' % ( VMModel.get_vm(vmid, self.conn).name(), dev_name, ) taskid = AsyncTask(task_uri, self._detach_device, task_params).id return self.task.lookup(taskid) def _event_devices(self, conn, dom, alias, opaque): """ Callback to handle add/remove devices event """ if opaque._cb is None: wok_log.error('opaque must be valid') return wok_log.info('Device %s removed successfully' % alias) # Re-attach device to host if it's not managed mode if not opaque._managed: try: dev = conn.get().nodeDeviceLookupByName(alias) dev.reAttach() except libvirt.libvirtError as e: wok_log.error( 'Unable to attach device %s back to host. Error: %s', alias, str( e) ) else: wok_log.info( "Device %s was attached in 'managed' mode. " 'Skipping re-attach().' % alias ) opaque._cb('OK', True) def _detach_device(self, cb, params): cb('Detaching device') self._cb = cb vmid = params['vmid'] dev_name = params['dev_name'] dom = params['dom'] hostdev = params['hostdev'] lock = params['lock'] with lock: pci_devs = { DeviceModel.deduce_dev_name(e, self.conn): e for e in hostdev if e.attrib['type'] == 'pci' } dev_info = self.dev_model.lookup(dev_name) is_3D_device = self.dev_model.is_device_3D_controller(dev_info) if is_3D_device and DOM_STATE_MAP[dom.info()[0]] != 'shutoff': raise InvalidOperation( 'KCHVMHDEV0006E', {'name': dev_info['name']}) if not pci_devs.get(dev_name): raise NotFoundError( 'KCHVMHDEV0001E', {'vmid': vmid, 'dev_name': dev_name} ) dev_name_elem = pci_devs[dev_name] self._managed = dev_name_elem.get('managed', 'no') == 'yes' # check for multifunction and detach all functions together try: multi = self.unplug_multifunction_pci( dom, hostdev, dev_name_elem) except libvirt.libvirtError: multi = False # successfully detached all functions: finish operation if multi: if is_3D_device: devsmodel = VMHostDevsModel(conn=self.conn) devsmodel.update_mmio_guest(vmid, False) if DOM_STATE_MAP[dom.info()[0]] == 'shutoff': cb('OK', True) return # detach individually xmlstr = etree.tostring(dev_name_elem) dom.detachDeviceFlags(xmlstr, get_vm_config_flag(dom, mode='all')) if dev_name_elem.attrib['type'] == 'pci': self._delete_affected_pci_devices(dom, dev_name, pci_devs) if is_3D_device: devsmodel = VMHostDevsModel(conn=self.conn) devsmodel.update_mmio_guest(vmid, False) if DOM_STATE_MAP[dom.info()[0]] == 'shutoff': cb('OK', True) def get_devices_same_addr(self, hostdevs, device_elem): def elem_has_valid_address(elem): if ( elem.get('type') != 'pci' or elem.address is None or elem.address.get('domain') is None or elem.address.get('bus') is None or elem.address.get('slot') is None ): return False return True if not elem_has_valid_address(device_elem): return [] devices = [] device_domain = device_elem.address.get('domain') device_bus = device_elem.address.get('bus') device_slot = device_elem.address.get('slot') for dev in hostdevs: if not elem_has_valid_address(dev): continue dev_domain = dev.address.get('domain') dev_bus = dev.address.get('bus') dev_slot = dev.address.get('slot') if ( dev_domain == device_domain and dev_bus == device_bus and dev_slot == device_slot ): devices.append(etree.tostring(dev).decode('utf-8')) return devices def is_hostdev_multifunction(self, dev_elem): if ( dev_elem.address is None or dev_elem.address.get('multifunction') is None or dev_elem.address.get('function') is None ): return False is_multi = ( dev_elem.address.get('multifunction') == 'on' and dev_elem.address.get('function') == '0x0' ) return is_multi def unplug_multifunction_pci(self, dom, hostdevs, dev_elem): if not self.is_hostdev_multifunction(dev_elem): return False devices = self.get_devices_same_addr(hostdevs, dev_elem) if len(devices) <= 1: return False devices_xml = '<devices>%s</devices>' % ''.join(devices) dom.detachDeviceFlags(devices_xml, get_vm_config_flag(dom, mode='all')) return True def _delete_affected_pci_devices(self, dom, dev_name, pci_devs): try: self.dev_model.lookup(dev_name) except NotFoundError: return affected_names = set( DevicesModel(conn=self.conn).get_list( _passthrough_affected_by=dev_name) ) for pci_name, e in pci_devs.items(): if pci_name in affected_names: xmlstr = etree.tostring(e) dom.detachDeviceFlags( xmlstr, get_vm_config_flag(dom, mode='all'))
class SwapsModel(object): """ Model representing the collection of swap devices """ def __init__(self, **kargs): self.objstore = kargs['objstore'] self.task = TaskModel(**kargs) def create(self, params): file_loc = '' if 'file_loc' not in params or not params['file_loc']: raise InvalidParameter('GINSP00001E') if 'type' not in params: raise InvalidParameter('GINSP00002E') else: if params['type'] == 'file' and 'size' not in params: raise InvalidParameter('GINSP00003E') if params['type'] == 'device' or params['type'] == 'file': taskid = AsyncTask(u'/swaps/file_loc/%s' % (file_loc), self._create_task, params).id return self.task.lookup(taskid) else: raise InvalidParameter('GINSP00004E') def _create_task(self, cb, params): type = params['type'] file_loc = params['file_loc'] cb('entering task to create swap file') with RollbackContext() as rollback: try: if type == 'file': cb('create a file') size = params['size'] utils._create_file(size, file_loc) except (InvalidParameter) as e: cb('OK', False) raise InvalidParameter("GINSP00020E") except (OperationFailed) as e: cb('OK', False) raise OperationFailed('GINSP00005E', {'file_loc': file_loc, 'err': e.message}) try: if type == 'device': dev = file_loc.split("/")[-1] if dev.startswith('dm-'): dmname = utils.get_dm_name(file_loc.split("/")[-1]) else: dmname = dev part = PartitionModel(objstore=self.objstore) dev_type = part.lookup(dmname) if dev_type['type'] == 'part': type = '82' # hex value for type Linux Swap part.change_type(dmname, type) cb('create swap from file') utils._make_swap(file_loc) cb('activate swap device') utils._activate_swap(file_loc) cb('persist swap device') import fs_utils fs_utils.persist_swap_dev(file_loc) cb('OK', True) except (OperationFailed) as e: rollback.prependDefer(SwapsModel.delete_swap_file, file_loc) cb('OK', False) raise OperationFailed('GINSP00005E', {'file_loc': file_loc, 'err': e.message}) @staticmethod def delete_swap_file(file_loc): """ Method to delete a swap device :param file_loc: location of the file or device path :return: """ try: utils._swapoff_device(file_loc) # Remove only file type swap devices from filesystem if not file_loc.startswith('/dev'): os.remove(file_loc) except Exception as e: raise OperationFailed('GINSP00006E', {'err': e.message}) def get_list(self): out, err, rc = run_command(["cat", "/proc/swaps"]) if rc != 0: raise OperationFailed("GINSP00007E", {'err': err}) return utils._get_swapdev_list_parser(out)
class VMHostDevsModel(object): def __init__(self, **kargs): self.conn = kargs['conn'] self.objstore = kargs['objstore'] self.events = kargs['eventsloop'] self.caps = CapabilitiesModel(**kargs) self.devs_model = DevicesModel(**kargs) self.dev_model = DeviceModel(**kargs) self.task = TaskModel(**kargs) self._cb = None self.events.registerAttachDevicesEvent( self.conn, self._event_devices, self) def get_list(self, vmid): dom = VMModel.get_vm(vmid, self.conn) xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: hostdev = root.devices.hostdev except AttributeError: return [] return [DeviceModel.deduce_dev_name(e, self.conn) for e in hostdev] def _passthrough_device_validate(self, dev_name): eligible_dev_names = self.devs_model.get_list(_passthrough='true') if dev_name not in eligible_dev_names: raise InvalidParameter('KCHVMHDEV0002E', {'dev_name': dev_name}) def _event_devices(self, conn, dom, alias, opaque): """ Callback to handle add/remove devices event """ if opaque._cb is None: wok_log.error('opaque must be valid') return wok_log.info('Device %s added successfuly' % alias) opaque._cb('OK', True) def create(self, vmid, params): dev_name = params['name'] dev_info = self.dev_model.lookup(dev_name) if dev_info['device_type'] == 'pci': taskid = AsyncTask( u'/plugins/kimchi/vms/%s/hostdevs/' % VMModel.get_vm(vmid, self.conn).name(), self._attach_pci_device, {'vmid': vmid, 'dev_info': dev_info, 'lock': threading.RLock()}, ).id return self.task.lookup(taskid) with RollbackContext() as rollback: try: dev = self.conn.get().nodeDeviceLookupByName(dev_name) dev.dettach() except Exception: raise OperationFailed('KCHVMHDEV0005E', {'name': dev_name}) else: rollback.prependDefer(dev.reAttach) rollback.commitAll() taskid = AsyncTask( u'/plugins/kimchi/vms/%s/hostdevs/' % VMModel.get_vm(vmid, self.conn).name(), '_attach_%s_device' % dev_info['device_type'], {'vmid': vmid, 'dev_info': dev_info, 'lock': threading.RLock()}, ).id return self.task.lookup(taskid) def _get_pci_devices_xml(self, pci_infos, slot, driver): hostdevs = '' # all devices included in the xml will be sorted in reverse (the # function 0 will be the last one) and will include the guest # address details for dev_info in sorted(pci_infos, key=itemgetter('function'), reverse=True): dev_info['detach_driver'] = driver hostdevs += self._get_pci_device_xml(dev_info, slot, True) return '<devices>%s</devices>' % hostdevs def have_usb_controller(self, vmid): dom = VMModel.get_vm(vmid, self.conn) root = objectify.fromstring(dom.XMLDesc(0)) try: controllers = root.devices.controller except AttributeError: return False for controller in controllers: if 'model' not in controller.attrib: continue if ( controller.attrib['type'] == 'usb' and controller.attrib['model'] in USB_MODELS_PCI_HOTPLUG ): return True return False def _get_pci_device_xml(self, dev_info, slot, is_multifunction): if 'detach_driver' not in dev_info: dev_info['detach_driver'] = 'kvm' source = E.source( E.address( domain=str(dev_info['domain']), bus=str(dev_info['bus']), slot=str(dev_info['slot']), function=str(dev_info['function']), ) ) driver = E.driver(name=dev_info['detach_driver']) if is_multifunction: if dev_info['function'] == 0: multi = E.address( type='pci', domain='0', bus='0', slot=str(slot), function=str(dev_info['function']), multifunction='on', ) else: multi = E.address( type='pci', domain='0', bus='0', slot=str(slot), function=str(dev_info['function']), ) host_dev = E.hostdev( source, driver, multi, mode='subsystem', type='pci', managed='yes' ) else: host_dev = E.hostdev( source, driver, mode='subsystem', type='pci', managed='yes' ) return etree.tostring(host_dev) @staticmethod def _validate_pci_passthrough_env(): # Linux kernel < 3.5 doesn't provide /sys/kernel/iommu_groups if os.path.isdir('/sys/kernel/iommu_groups'): if not glob.glob('/sys/kernel/iommu_groups/*'): raise InvalidOperation('KCHVMHDEV0003E') # Enable virt_use_sysfs on RHEL6 and older distributions # In recent Fedora, there is no virt_use_sysfs. out, err, rc = run_command( ['getsebool', 'virt_use_sysfs'], silent=True) if rc == 0 and out.rstrip('\n') != 'virt_use_sysfs --> on': out, err, rc = run_command( ['setsebool', '-P', 'virt_use_sysfs=on']) if rc != 0: wok_log.warning('Unable to turn on sebool virt_use_sysfs') def _available_slot(self, dom): xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) try: devices = root.devices slots = [ self.dev_model._toint(dev.attrib['slot']) for dev in devices.findall('.//address') if 'slot' in dev.attrib ] except AttributeError: return 1 slots = sorted(slots) free = 0 for free, slot in enumerate(slots, start=1): if free < slot: return free return free + 1 def _attach_pci_device(self, cb, params): cb('Attaching PCI device') self._cb = cb vmid = params['vmid'] dev_info = params['dev_info'] lock = params['lock'] try: self._passthrough_device_validate(dev_info['name']) except InvalidParameter as e: cb(str(e), False) raise with lock: try: self._validate_pci_passthrough_env() except InvalidOperation as e: cb(str(e), False) raise dom = VMModel.get_vm(vmid, self.conn) driver = 'vfio' if self.caps.kernel_vfio else 'kvm' # 'vfio' systems requires a usb controller in order to support pci # hotplug on Power. if ( driver == 'vfio' and platform.machine().startswith('ppc') and DOM_STATE_MAP[dom.info()[0]] != 'shutoff' and not self.have_usb_controller(vmid) ): msg = WokMessage('KCHVMHDEV0008E', {'vmid': vmid}) cb(msg.get_text(), False) raise InvalidOperation('KCHVMHDEV0008E', {'vmid': vmid}) # Attach all PCI devices in the same IOMMU group affected_names = self.devs_model.get_list( _passthrough_affected_by=dev_info['name'] ) passthrough_names = self.devs_model.get_list( _cap='pci', _passthrough='true' ) group_names = list(set(affected_names) & set(passthrough_names)) pci_infos = [self.dev_model.lookup( dev_name) for dev_name in group_names] pci_infos.append(dev_info) pci_infos = sorted(pci_infos, key=itemgetter('name')) # does not allow hot-plug of 3D graphic cards is_3D_device = self.dev_model.is_device_3D_controller(dev_info) if is_3D_device and DOM_STATE_MAP[dom.info()[0]] != 'shutoff': msg = WokMessage('KCHVMHDEV0006E', {'name': dev_info['name']}) cb(msg.get_text(), False) raise InvalidOperation( 'KCHVMHDEV0006E', {'name': dev_info['name']}) # all devices in the group that is going to be attached to the vm # must be detached from the host first self._attach_all_devices(pci_infos) # when attaching a 3D graphic device it might be necessary to # increase the window size memory in order to be able to attach # more than one device to the same guest if is_3D_device: self.update_mmio_guest(vmid, True) self._attach_multifunction_devices(dom, pci_infos, driver, vmid) if DOM_STATE_MAP[dom.info()[0]] == 'shutoff': cb('OK', True) def _attach_multifunction_devices(self, dom, pci_infos, driver, vmid): slot = 0 is_multifunction = len(pci_infos) > 1 device_flags = get_vm_config_flag(dom, mode='all') with RollbackContext() as rollback: # multifuction: try to attach all functions together within one # xml file. It requires libvirt support. if is_multifunction: # search for the first available slot in guest xml slot = self._available_slot(dom) xmlstr = self._get_pci_devices_xml(pci_infos, slot, driver) try: dom.attachDeviceFlags(xmlstr, device_flags) except libvirt.libvirtError: # If operation fails, we try the other way, where each # function is attached individually pass else: rollback.prependDefer( dom.detachDeviceFlags, xmlstr, device_flags ) rollback.commitAll() if DOM_STATE_MAP[dom.info()[0]] == 'shutoff': self._cb('OK', True) return # attach each function individually (multi or single function) for pci_info in pci_infos: pci_info['detach_driver'] = driver xmlstr = self._get_pci_device_xml( pci_info, slot, is_multifunction) try: dom.attachDeviceFlags(xmlstr, device_flags) except libvirt.libvirtError: msg = WokMessage( 'KCHVMHDEV0007E', { 'device': pci_info['name'], 'vm': vmid} ) self._cb(msg.get_text(), False) wok_log.error( 'Failed to attach host device %s to VM %s: \n%s', pci_info['name'], vmid, xmlstr, ) raise rollback.prependDefer( dom.detachDeviceFlags, xmlstr, device_flags) rollback.commitAll() def _attach_all_devices(self, pci_infos): with RollbackContext() as rollback: for pci_info in pci_infos: try: dev = self.conn.get().nodeDeviceLookupByName( pci_info['name']) dev.dettach() except Exception: msg = WokMessage('KCHVMHDEV0005E', { 'name': pci_info['name']}) self._cb(msg.get_text(), False) raise OperationFailed( 'KCHVMHDEV0005E', {'name': pci_info['name']} ) else: rollback.prependDefer(dev.reAttach) rollback.commitAll() def _count_3D_devices_attached(self, dom): counter = 0 root = objectify.fromstring(dom.XMLDesc(0)) try: hostdev = root.devices.hostdev except AttributeError: return counter for device in hostdev: if device.attrib['type'] != 'pci': continue name = DeviceModel.deduce_dev_name(device, self.conn) info = self.dev_model.lookup(name) if 'vga3d' in info and info['vga3d']: counter += 1 return counter def update_mmio_guest(self, vmid, is_attaching): dom = VMModel.get_vm(vmid, self.conn) # get the number of 3D graphic cards already attached to the guest # based on this number we will decide if the memory size will be # increased or not counter = self._count_3D_devices_attached(dom) if counter == 0 and is_attaching: return size = 0 if is_attaching: # suppose this is the 3rd graphic card to be attached to the same # guest, counter will be 2+1 (2 existing + this attachment) times # 32G (0x80000000) size = hex((counter + 1) * WINDOW_SIZE_BAR) else: size = hex(counter * WINDOW_SIZE_BAR) # if the guest already has the xml file we will simply update the # value, otherwise we will add the new field new_xml = self._update_win_memory_size(dom, counter, size) if new_xml is None and is_attaching: new_xml = self._add_win_memory_size(dom, size) # update the XML if new_xml is not None: self.conn.get().defineXML(new_xml) def _update_win_memory_size(self, dom, counter, wnd_size): root = objectify.fromstring(dom.XMLDesc(0)) # look for the existing argument in <qemu:commandline> and try # to update the value (or remove if there is only one (or none) # graphic card attached. cmdline = root.findall('{%s}commandline' % QEMU_NAMESPACE) for line in cmdline: for arg in line.iterchildren(): if not arg.values()[0].startswith(CMDLINE_FIELD_NAME): continue if counter > 1: arg.set('value', CMDLINE_FIELD_NAME + '=' + wnd_size) else: line.remove(arg.getprevious()) line.remove(arg) return etree.tostring(root, encoding='unicode', pretty_print=True) return None def _add_win_memory_size(self, dom, wnd_size): root = objectify.fromstring(dom.XMLDesc(0)) val = CMDLINE_FIELD_NAME + '=' + wnd_size cmdline = root.find('{%s}commandline' % QEMU_NAMESPACE) # <qemu:commandline> doesn't exist, create the full commandline xml # with the required values and return if cmdline is None: args = {} args['-global'] = val root.append(etree.fromstring(get_qemucmdline_xml(args))) return etree.tostring(root, encoding='unicode', pretty_print=True) # <qemu:commandline> exists but there is no <qemu:arg value global> # so, we add those missing arguments inside the exising cmdline EM = ElementMaker(namespace=QEMU_NAMESPACE, nsmap={'qemu': QEMU_NAMESPACE}) cmdline.append(EM.arg(value='-global')) cmdline.append(EM.arg(value=val)) return etree.tostring(root, encoding='unicode', pretty_print=True) def _get_scsi_device_xml(self, dev_info): adapter = E.adapter(name=('scsi_host%s' % dev_info['host'])) address = E.address( type='scsi', bus=str(dev_info['bus']), target=str(dev_info['target']), unit=str(dev_info['lun']), ) host_dev = E.hostdev( E.source(adapter, address), mode='subsystem', type='scsi', sgio='unfiltered' ) return etree.tostring(host_dev) def _attach_scsi_device(self, cb, params): cb('Attaching SCSI device...') self._cb = cb vmid = params['vmid'] dev_info = params['dev_info'] lock = params['lock'] try: self._passthrough_device_validate(dev_info['name']) except InvalidParameter as e: cb(str(e), False) raise with lock: dom = VMModel.get_vm(vmid, self.conn) with RollbackContext() as rollback: xmlstr = self._get_scsi_device_xml(dev_info) device_flags = get_vm_config_flag(dom, mode='all') try: cb('Attaching device to VM') dom.attachDeviceFlags(xmlstr, device_flags) except libvirt.libvirtError: msg = WokMessage( 'KCHVMHDEV0007E', { 'device': dev_info['name'], 'vm': vmid} ) cb(msg.get_text(), False) wok_log.error( 'Failed to attach host device %s to VM %s: \n%s', dev_info['name'], vmid, xmlstr, ) raise rollback.prependDefer( dom.detachDeviceFlags, xmlstr, device_flags) rollback.commitAll() if DOM_STATE_MAP[dom.info()[0]] == 'shutoff': cb('OK', True) def _get_usb_device_xml(self, dev_info): source = E.source( E.vendor(id=dev_info['vendor']['id']), E.product(id=dev_info['product']['id']), E.address(bus=str(dev_info['bus']), device=str(dev_info['device'])), startupPolicy='optional', ) host_dev = E.hostdev(source, mode='subsystem', ype='usb', managed='yes') return etree.tostring(host_dev) def _attach_usb_device(self, cb, params): cb('Attaching USB device...') self._cb = cb vmid = params['vmid'] dev_info = params['dev_info'] dom = VMModel.get_vm(vmid, self.conn) lock = params['lock'] try: self._passthrough_device_validate(dev_info['name']) except InvalidParameter as e: cb(str(e), False) raise with lock: with RollbackContext() as rollback: xmlstr = self._get_usb_device_xml(dev_info) device_flags = get_vm_config_flag(dom, mode='all') try: cb('Attaching device to VM') dom.attachDeviceFlags(xmlstr, device_flags) except libvirt.libvirtError: msg = WokMessage( 'KCHVMHDEV0007E', { 'device': dev_info['name'], 'vm': vmid} ) cb(msg.get_text(), False) wok_log.error( 'Failed to attach host device %s to VM %s: \n%s', dev_info['name'], vmid, xmlstr, ) raise rollback.prependDefer( dom.detachDeviceFlags, xmlstr, device_flags) rollback.commitAll() if DOM_STATE_MAP[dom.info()[0]] == 'shutoff': cb('OK', True)
def __init__(self, **kargs): self.task = TaskModel(**kargs) self.objstore = kargs['objstore']
class ArchivesModel(object): _objstore_type = 'ginger_backup_archive' _archive_dir = os.path.join( PluginPaths('ginger').state_dir, 'ginger_backups') def __init__(self, **kargs): self._objstore = kargs['objstore'] self.task = TaskModel(**kargs) self._create_archive_dir() @classmethod def _create_archive_dir(cls): try: os.makedirs(cls._archive_dir) except OSError as e: # It's OK if archive_dir already exists if e.errno != errno.EEXIST: wok_log.error('Error creating archive dir %s: %s', cls._archive_dir, e) raise OperationFailed('GINHBK0003E', {'dir': cls._archive_dir}) @property def _default_include(self): # This function builds a new copy of the list for each invocation, # so that the caller can modify the returned list as wish without # worrying about changing the original reference. return list(cherrypy.request.app.config['backup']['default_include']) @property def _default_exclude(self): # See _default_include() comments for explanation. return list(cherrypy.request.app.config['backup']['default_exclude']) def _create_archive(self, params): error = None try: params['file'] = _tar_create_archive(self._archive_dir, params['identity'], params['include'], params['exclude']) params['checksum'] = { 'algorithm': 'sha256', 'value': _sha256sum(params['file']) } with self._objstore as session: session.store(self._objstore_type, params['identity'], params) except InvalidOperation: raise except OperationFailed: raise except Exception as e: error = e reason = 'GINHBK0009E' if error is not None: msg = 'Error creating archive %s: %s' % (params['identity'], error.message) wok_log.error(msg) try: with self._objstore as session: session.delete(self._objstore_type, params['identity'], ignore_missing=True) except Exception as e_session: wok_log.error( 'Error cleaning archive meta data %s. ' 'Error: %s', params['identity'], e_session) if params['file'] != '': try: os.unlink(params['file']) except Exception as e_file: wok_log.error( 'Error cleaning archive file %s. ' 'Error: %s', params['file'], e_file) raise OperationFailed(reason, {'identity': params['identity']}) def create(self, params): uuid_uuid4 = uuid.uuid4() if isinstance(uuid_uuid4, unicode): uuid_uuid4 = uuid_uuid4.encode('utf-8') archive_id = str(uuid_uuid4) stamp = int(time.mktime(time.localtime())) # Though formally we ask front-end to not send "include" at all when # it's empty, but in implementation we try to be tolerant. # Front-end can also send [] to indicate the "include" is empty. include = params.get('include') exclude = params.get('exclude', []) if not include: include = self._default_include if not exclude: exclude = self._default_exclude ar_params = { 'identity': archive_id, 'include': include, 'exclude': exclude, 'description': params.get('description', ''), 'checksum': {}, 'timestamp': stamp, 'file': '' } taskid = AsyncTask(u'/backup/create/%s' % (archive_id), self._create_task, ar_params).id return self.task.lookup(taskid) def _create_task(self, cb, params): cb('entering task to create config backup') try: self._create_archive(params) cb('OK', True) except (InvalidOperation) as e: cb(e.message, False) except (OperationFailed) as e: cb(e.message, False) raise OperationFailed('GINHBK0011E', { 'params': 'params', 'err': e.message }) def _session_get_list(self, session): # Assume session is already locked. return session.get_list(self._objstore_type, sort_key='timestamp') def get_list(self): with self._objstore as session: files = [x.split('.')[0] for x in os.listdir(self._archive_dir)] for db_file in self._session_get_list(session): if db_file not in files: session.delete(ArchivesModel._objstore_type, db_file) return self._session_get_list(session)
def __init__(self, **kargs): # self.conn = kargs['conn'] self.objstore = kargs['objstore'] self.task = TaskModel(**kargs) self.host_info = self._get_host_info()