def _add_zfs_pool(self, line, block_devices): name, size_str, uuid = line.split() size = util.human_to_bytes(size_str) drive_mms = block_devices.paths_to_major_minors( _get_all_zpool_devices(name)) if drive_mms is None: daemon_log.warning("Could not find major minors for zpool '%s'" % name) return datasets = _get_zpool_datasets(name, drive_mms) zvols = _get_zpool_zvols(name, drive_mms, block_devices) pool_md = { "name": name, "path": name, # fabricate a major:minor. Do we ever use them as numbers? "block_device": "zfspool:%s" % name, "uuid": uuid, "size": size, "drives": drive_mms } # write new data to store (_pool/datasets/Zvols) write_to_store(uuid, { 'pool': pool_md, 'datasets': datasets, 'zvols': zvols }) self._update_pool_or_datasets(block_devices, pool_md, datasets, zvols)
def _get_zpool_datasets(pool_name, drives): """ Retrieve datasets belonging to a zpool """ out = AgentShell.try_run(['zfs', 'list', '-H', '-o', 'name,avail,guid']) zpool_datasets = {} if out.strip() != "no datasets available": for line in filter(None, out.split('\n')): name, size_str, uuid = line.split() size = util.human_to_bytes(size_str) if name.startswith("%s/" % pool_name): # This will need discussion, but for now fabricate a major:minor. Do we ever use them as numbers? major_minor = "zfsset:%s" % uuid zpool_datasets[uuid] = { "name": name, "path": name, "block_device": major_minor, "uuid": uuid, "size": size, "drives": drives } daemon_log.debug("zfs mount '%s'" % name) return zpool_datasets
def _add_zfs_pool(self, line, block_devices): pool, size_str, uuid, health = line.split() if health in self.acceptable_health: size = util.human_to_bytes(size_str) drive_mms = block_devices.paths_to_major_minors( self._get_all_zpool_devices(pool)) if drive_mms is None: daemon_log.warn("Could not find major minors for zpool '%s'" % pool) return # This will need discussion, but for now fabricate a major:minor. Do we ever use them as numbers? block_device = "zfspool:%s" % pool datasets = self._get_zpool_datasets(pool, uuid, drive_mms, block_devices) zvols = self._get_zpool_zvols(pool, drive_mms, uuid, block_devices) if (datasets == {}) and (zvols == {}): block_devices.block_device_nodes[block_device] = { 'major_minor': block_device, 'path': pool, 'serial_80': None, 'serial_83': None, 'size': size, 'filesystem_type': None, 'parent': None } # Do this to cache the device, type see blockdevice and filesystem for info. BlockDevice('zfs', pool) FileSystem('zfs', pool) self._zpools[uuid] = { "name": pool, "path": pool, "block_device": block_device, "uuid": uuid, "size": size, "drives": drive_mms, } if datasets != {}: self._datasets.update(datasets) if zvols != {}: self._zvols.update(zvols)
def _get_lvs(self, vg_name): try: out = AgentShell.try_run(["lvs", "--units", "b", "--noheadings", "-o", "lv_name,lv_uuid,lv_size,lv_path", vg_name]) except OSError as os_error: # If no lvs install then no logical volumes if os_error.errno == errno.ENOENT: return raise lines = [l for l in out.split("\n") if len(l) > 0] for line in lines: name, uuid, size_str, path = line.split() size = util.human_to_bytes(size_str) yield (name, uuid, size, path)
def _get_vgs(self): try: out = AgentShell.try_run(["vgs", "--units", "b", "--noheadings", "-o", "vg_name,vg_uuid,vg_size"]) except OSError as os_error: # If no vgs installed then no volume groups if os_error.errno == errno.ENOENT: return raise lines = [l for l in out.split("\n") if len(l) > 0] for line in lines: name, uuid, size_str = line.split() size = util.human_to_bytes(size_str) yield (name, uuid, size)
def _get_zpool_datasets(self, pool_name, zpool_uuid, drives, block_devices): out = AgentShell.try_run( ['zfs', 'list', '-H', '-o', 'name,avail,guid']) zpool_datasets = {} if out.strip() != "no datasets available": for line in filter(None, out.split('\n')): name, size_str, uuid = line.split() size = util.human_to_bytes(size_str) if name.startswith("%s/" % pool_name): # This will need discussion, but for now fabricate a major:minor. Do we ever use them as numbers? major_minor = "zfsset:%s" % (len(self.datasets) + 1) block_devices.block_device_nodes[major_minor] = { 'major_minor': major_minor, 'path': name, 'serial_80': None, 'serial_83': None, 'size': size, 'filesystem_type': 'zfs', 'parent': None } # Do this to cache the device, type see blockdevice and filesystem for info. BlockDevice('zfs', name) FileSystem('zfs', name) zpool_datasets[uuid] = { "name": name, "path": name, "block_device": major_minor, "uuid": uuid, "size": size, "drives": drives } daemon_log.debug("zfs mount '%s'" % name) return zpool_datasets