def clean_all(self): """Cleans everything related to iocage.""" IOCDestroy().__stop_jails__() zfs = libzfs.ZFS(history=True, history_prefix="<iocage>") datasets = zfs.get_dataset(f"{self.pool}/iocage") for dataset in datasets.dependents: try: if dataset.type == libzfs.DatasetType.FILESYSTEM: dataset.umount(force=True) except libzfs.ZFSException as err: # This is either not mounted or doesn't exist anymore, # we don't care either way. if err.code == libzfs.Error.NOENT: continue else: raise dataset.delete() datasets.umount(force=True) datasets.delete()
def import_pool(self, name_or_guid, options, any_host, cachefile, new_name): found = False with libzfs.ZFS() as zfs: for pool in zfs.find_import( cachefile=cachefile, search_paths=['/dev/disk/by-partuuid'] if osc.IS_LINUX else None ): if pool.name == name_or_guid or str(pool.guid) == name_or_guid: found = pool break if not found: raise CallError(f'Pool {name_or_guid} not found.', errno.ENOENT) try: zfs.import_pool(found, new_name or found.name, options, any_host=any_host) except libzfs.ZFSException as e: # We only log if some datasets failed to mount after pool import if e.code != libzfs.Error.MOUNTFAILED: raise else: self.logger.error( 'Failed to mount datasets after importing "%s" pool: %s', name_or_guid, str(e), exc_info=True )
async def do_create(self, data): """ Take a snapshot from a given dataset. Returns: bool: True if succeed otherwise False. """ zfs = libzfs.ZFS() dataset = data.get('dataset', '') name = data.get('name', '') recursive = data.get('recursive', False) vmsnaps_count = data.get('vmsnaps_count', 0) properties = data.get('properties', None) if not dataset or not name: return False try: ds = zfs.get_dataset(dataset) except libzfs.ZFSException as err: self.logger.error("{0}".format(err)) return False try: ds.snapshot(f'{dataset}@{name}', recursive=recursive, fsopts=properties) if vmsnaps_count > 0: ds.properties['freenas:vmsynced'] = libzfs.ZFSUserProperty('Y') self.logger.info(f"Snapshot taken: {dataset}@{name}") return True except libzfs.ZFSException as err: self.logger.error(f"{err}") return False
def query(self, filters=None, options=None): # Special case for faster listing of snapshot names (#53149) if options and options.get('select') == ['name']: # Using zfs list -o name is dozens of times faster than py-libzfs cmd = ['zfs', 'list', '-H', '-o', 'name', '-t', 'snapshot'] order_by = options.get('order_by') # -s name makes it even faster if not order_by or order_by == ['name']: cmd += ['-s', 'name'] cp = subprocess.run( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, ) if cp.returncode != 0: raise CallError(f'Failed to retrieve snapshots: {cp.stderr}') snaps = [{'name': i} for i in cp.stdout.strip().split('\n')] if filters: return filter_list(snaps, filters, options) return snaps with libzfs.ZFS() as zfs: # Handle `id` filter to avoid getting all snapshots first if filters and len(filters) == 1 and list( filters[0][:2]) == ['id', '=']: try: snapshots = [ zfs.get_snapshot(filters[0][2]).__getstate__() ] except libzfs.ZFSException as e: if e.code != libzfs.Error.NOENT: raise snapshots = [] else: snapshots = [i.__getstate__() for i in list(zfs.snapshots)] # FIXME: awful performance with hundreds/thousands of snapshots return filter_list(snapshots, filters, options)
async def do_create(self, data): """ Take a snapshot from a given dataset. Returns: bool: True if succeed otherwise False. """ zfs = libzfs.ZFS() dataset = data.get('dataset', '') name = data.get('name', '') recursive = data.get('recursive', False) vmsnaps_count = data.get('vmsnaps_count', 0) if not dataset or not name: return False try: ds = zfs.get_dataset(dataset) except libzfs.ZFSException as err: self.logger.error("{0}".format(err)) return False try: if recursive: ds.snapshots('{0}@{1}'.format(dataset, name, recursive=True)) else: ds.snapshot('{0}@{1}'.format(dataset, name)) if vmsnaps_count > 0: ds.properties['freenas:vmsynced'] = libzfs.ZFSUserProperty('Y') self.logger.info("Snapshot taken: {0}@{1}".format(dataset, name)) return True except libzfs.ZFSException as err: self.logger.error("{0}".format(err)) return False
async def __get_quota_excess(self): excess = [] zfs = libzfs.ZFS() for properties in await self.middleware.threaded( lambda: [i.properties for i in zfs.datasets]): quota = properties.get("quota") # zvols do not have a quota property in libzfs if quota is None or quota.value == "none": continue used = int(properties["used"].rawvalue) available = used + int(properties["available"].rawvalue) try: percent_used = 100 * used / available except ZeroDivisionError: percent_used = 100 if percent_used >= 95: level = 2 elif percent_used >= 80: level = 1 else: continue stat_info = await self.middleware.threaded( os.stat, properties["mountpoint"].value) uid = stat_info.st_uid excess.append({ "dataset_name": properties["name"].value, "level": level, "used": used, "available": available, "percent_used": percent_used, "uid": uid, }) return excess
def do_create(self, data): """ Creates a ZFS dataset. """ verrors = ValidationErrors() if '/' not in data['name']: verrors.add('name', 'You need a full name, e.g. pool/newdataset') if verrors: raise verrors properties = data.get('properties') or {} sparse = properties.pop('sparse', False) params = {} for k, v in data['properties'].items(): params[k] = v # it's important that we set xattr=sa for various # performance reasons related to ea handling # pool.dataset.create already sets this by default # so mirror the behavior here if data['type'] == 'FILESYSTEM' and 'xattr' not in params: params['xattr'] = 'sa' try: with libzfs.ZFS() as zfs: pool = zfs.get(data['name'].split('/')[0]) pool.create(data['name'], params, fstype=getattr(libzfs.DatasetType, data['type']), sparse_vol=sparse) except libzfs.ZFSException as e: self.logger.error('Failed to create dataset', exc_info=True) raise CallError(f'Failed to create dataset: {e}')
def __init__(self, release, props, num, pkglist=None, plugin=False, migrate=False, config=None, silent=False, template=False, short=False, basejail=False, empty=False, uuid=None, clone=False, exit_on_error=False, callback=None): self.pool = iocage.lib.ioc_json.IOCJson().json_get_value("pool") self.iocroot = iocage.lib.ioc_json.IOCJson( self.pool).json_get_value("iocroot") self.release = release self.props = props self.num = num self.pkglist = pkglist self.plugin = plugin self.migrate = migrate self.config = config self.template = template self.short = short self.basejail = basejail self.empty = empty self.uuid = uuid self.clone = clone self.silent = silent self.exit_on_error = exit_on_error self.callback = callback self.zfs = libzfs.ZFS(history=True, history_prefix="<iocage>")
async def clone(self, data): """ Clone a given snapshot to a new dataset. Returns: bool: True if succeed otherwise False. """ snapshot = data.get('snapshot', '') dataset_dst = data.get('dataset_dst', '') if not snapshot or not dataset_dst: return False try: with libzfs.ZFS() as zfs: snp = zfs.get_snapshot(snapshot) snp.clone(dataset_dst) self.logger.info("Cloned snapshot {0} to dataset {1}".format( snapshot, dataset_dst)) return True except libzfs.ZFSException as err: self.logger.error("{0}".format(err)) return False
def scrub_action(self, name, action): """ Start/Stop/Pause a scrub on pool `name`. """ if action != 'PAUSE': try: with libzfs.ZFS() as zfs: pool = zfs.get(name) if action == 'START': pool.start_scrub() else: pool.stop_scrub() except libzfs.ZFSException as e: raise CallError(str(e), e.code) else: proc = subprocess.Popen( f'zpool scrub -p {name}'.split(' '), stdout=subprocess.PIPE, stderr=subprocess.PIPE ) proc.communicate() if proc.returncode != 0: raise CallError('Unable to pause scrubbing')
def clone(self, data): """ Clone a given snapshot to a new dataset. Returns: bool: True if succeed otherwise False. """ snapshot = data.get('snapshot', '') dataset_dst = data.get('dataset_dst', '') props = data['dataset_properties'] try: with libzfs.ZFS() as zfs: snp = zfs.get_snapshot(snapshot) snp.clone(dataset_dst, props) dataset = zfs.get_dataset(dataset_dst) if dataset.type.name == 'FILESYSTEM': dataset.mount_recursive() self.logger.info("Cloned snapshot {0} to dataset {1}".format(snapshot, dataset_dst)) return True except libzfs.ZFSException as err: self.logger.error("{0}".format(err)) raise CallError(f'Failed to clone snapshot: {err}')
def cli(ctx, zpool, mountpoint): """ Calls ZFS set to change the property org.freebsd.ioc:active to yes. """ logger = ctx.parent.logger zfs = libzfs.ZFS(history=True, history_prefix="<iocage>") iocage_pool = None for pool in zfs.pools: if pool.name == zpool: iocage_pool = pool if iocage_pool is None: logger.error(f"ZFS pool '{zpool}' not found") exit(1) try: datasets = libiocage.lib.Datasets.Datasets(pool=iocage_pool, zfs=zfs, logger=logger) datasets.activate(mountpoint=mountpoint) logger.log(f"ZFS pool '{zpool}' activated") except: exit(1)
async def pool_extend(self, pool): pool.pop('fstype', None) """ If pool is encrypted we need to check if the pool is imported or if all geli providers exist. """ try: zpool = libzfs.ZFS().get(pool['name']) except libzfs.ZFSException: zpool = None if zpool: pool['status'] = zpool.status pool['scan'] = zpool.scrub.__getstate__() else: pool.update({ 'status': 'OFFLINE', 'scan': None, }) if pool['encrypt'] > 0: if zpool: pool['is_decrypted'] = True else: decrypted = True for ed in await self.middleware.call( 'datastore.query', 'storage.encrypteddisk', [('encrypted_volume', '=', pool['id'])]): if not os.path.exists( f'/dev/{ed["encrypted_provider"]}.eli'): decrypted = False break pool['is_decrypted'] = decrypted else: pool['is_decrypted'] = True return pool
def do_create(self, data): """ Take a snapshot from a given dataset. Returns: bool: True if succeed otherwise False. """ dataset = data.get('dataset', '') name = data.get('name', '') recursive = data.get('recursive', False) properties = data.get('properties', None) if not dataset or not name: return False vmware_context = None if data['vmware_sync']: vmware_context = self.middleware.call_sync('vmware.snapshot_begin', dataset, recursive) try: with libzfs.ZFS() as zfs: ds = zfs.get_dataset(dataset) ds.snapshot(f'{dataset}@{name}', recursive=recursive, fsopts=properties) if vmware_context and vmware_context['vmsynced']: ds.properties['freenas:vmsynced'] = libzfs.ZFSUserProperty('Y') self.logger.info(f"Snapshot taken: {dataset}@{name}") return True except libzfs.ZFSException as err: self.logger.error(f"{err}") return False finally: if vmware_context: self.middleware.call_sync('vmware.snapshot_end', vmware_context)
async def get_disks(self, name): zfs = libzfs.ZFS() try: zpool = zfs.get(name) except libzfs.ZFSException as e: raise CallError(str(e), errno.ENOENT) await self.middleware.run_in_thread(geom.scan) labelclass = geom.class_by_name('LABEL') for absdev in zpool.disks: dev = absdev.replace('/dev/', '').replace('.eli', '') find = labelclass.xml.findall(f".//provider[name='{dev}']/../consumer/provider") name = None if find: name = geom.provider_by_id(find[0].get('ref')).geom.name else: g = geom.geom_by_name('DEV', dev) if g: name = g.consumer.provider.geom.name if name and geom.geom_by_name('DISK', name): yield name else: self.logger.debug(f'Could not find disk for {dev}')
def __init__(self, jail=None, rc=False, callback=None, silent=False, activate=False, skip_jails=False): self.zfs = libzfs.ZFS(history=True, history_prefix="<iocage>") if not activate: self.pool = PoolAndDataset().get_pool() self.iocroot = PoolAndDataset().get_iocroot() if not skip_jails: # When they need to destroy a jail with a missing or bad # configuration, this gets in our way otherwise. self.jails = self.list("uuid") self.skip_jails = skip_jails self.jail = jail self.rc = rc self._all = True if self.jail and 'ALL' in self.jail else False self.callback = ioc_common.callback if not callback else callback self.silent = silent
def __init__(self): self.pool = iocage_lib.ioc_json.IOCJson().json_get_value("pool") self.iocroot = iocage_lib.ioc_json.IOCJson( self.pool).json_get_value("iocroot") self.zfs = libzfs.ZFS(history=True, history_prefix="<iocage>") self.ds = self.zfs.get_dataset
def __init__(self, path, silent=False, callback=None): self.pool = ioc_json.IOCJson(' ').json_get_value('pool') self.path = path self.zfs = libzfs.ZFS(history=True, history_prefix='<iocage>') self.callback = callback self.silent = silent
def get_snapshot_safely(snap): try: with libzfs.ZFS() as zfs: return zfs.get_snapshot(snap).__getstate__() except libzfs.ZFSException: pass
def get_snapshots(dataset_name): with libzfs.ZFS() as zfs: return zfs.get_dataset(dataset_name).__getstate__( snapshots=True)['snapshots']
def get_snapshots_recursively(dataset_name): with libzfs.ZFS() as zfs: return zfs.get_dataset(dataset_name).__getstate__( snapshots_recursive=True)['snapshots_recursive']
def get_dataset(self, dataset_name): try: with libzfs.ZFS() as zfs: return zfs.get_dataset(dataset_name).__getstate__() except libzfs.ZFSException: pass
def get(identifier): with libzfs.ZFS() as zfs: return zfs.get(identifier).__getstate__()
import bsd.geom as geom from bsd.sysctl import sysctlbyname import libzfs import enum import freenasOS.Manifest as Manifest import freenasOS.Configuration as Configuration from . import Install from .Install import InstallationError from . import Utils from .Utils import InitLog, LogIt, Title, Project, SetProject from .Utils import BootMethod, DiskRealName, SmartSize, RunCommand zfs = libzfs.ZFS() # This is used to get progress information. This assumes # interactive; other callers of Install() will need to provide # their own. Note the ProgressHandler class in freenasOS.Installer class InstallationHandler(object): def __init__(self): self.package = None self.gauge = None def __enter__(self): return self def __exit__(sef, type, value, traceback): pass
#TODO generally make formatting better to fit the quality of the source material #TODO see the entire file for todos on various assumptions that need to be fixed, this code is not reliable in edge cases (like an empty table) import subprocess import sys import libzfs zfsr = libzfs.ZFS() #A halfassed table implementation transpose = lambda m: zip(*m) def dict_table(obj, default_cols=None): #https://github.com/freenas/py-libzfs/issues/63 obj = list(obj) if len(obj) == 0: return [] header = obj[0].properties.keys() rows = [ [ v.value for v in o.properties.values()] for o in obj ] #TODO ugh _table = [header] + rows _table = list(transpose(sorted([c for c in transpose(_table) if c[0] in default_cols], key=lambda i: i[0] != "name" ))) #sort columns; name needs to be first so interactions work right header, rows = _table[0], _table[1:] return table(header, rows) ## def prop_table(obj): return table(["NAME", "PROPERTY", "VALUE", "SOURCE"], obj ) #why is .properties a dict but .features a list generator def prop(obj): return [ [obj.name,k,v.value,v.source.name] for k,v in obj.properties.items() ] def feat(obj): #TODO source column
def query(self, filters, options): """ In `query-options` we can provide `extra` arguments which control which data should be retrieved for a dataset. `query-options.extra.snapshots` is a boolean which when set will retrieve snapshots for the dataset in question by adding a snapshots key to the dataset data. `query-options.extra.retrieve_children` is a boolean set to true by default. When set to true, will retrieve all children datasets which can cause a performance penalty. When set to false, will not retrieve children datasets which does not incur the performance penalty. `query-options.extra.properties` is a list of properties which should be retrieved. If null ( by default ), it would retrieve all properties, if empty, it will retrieve no property. We provide 2 ways how zfs.dataset.query returns dataset's data. First is a flat structure ( default ), which means that all the datasets in the system are returned as separate objects which also contain all the data their is for their children. This retrieval type is slightly slower because of duplicates which exist in each object. Second type is hierarchical where only top level datasets are returned in the list and they contain all the children there are for them in `children` key. This retrieval type is slightly faster. These options are controlled by `query-options.extra.flat` attribute which defaults to true. `query-options.extra.user_properties` controls if user defined properties of datasets should be retrieved or not. While we provide a way to exclude all properties from data retrieval, we introduce a single attribute `query-options.extra.retrieve_properties` which if set to false will make sure that no property is retrieved whatsoever and overrides any other property retrieval attribute. """ options = options or {} extra = options.get('extra', {}).copy() props = extra.get('properties', None) flat = extra.get('flat', True) user_properties = extra.get('user_properties', True) retrieve_properties = extra.get('retrieve_properties', True) retrieve_children = extra.get('retrieve_children', True) snapshots = extra.get('snapshots') snapshots_recursive = extra.get('snapshots_recursive') snapshots_properties = extra.get('snapshots_properties', []) if not retrieve_properties: # This is a short hand version where consumer can specify that they don't want any property to # be retrieved user_properties = False props = [] with libzfs.ZFS() as zfs: # Handle `id` filter specially to avoiding getting all datasets kwargs = dict(props=props, user_props=user_properties, snapshots=snapshots, retrieve_children=retrieve_children, snapshots_recursive=snapshots_recursive, snapshot_props=snapshots_properties) if filters and filters[0][0] == 'id': if filters[0][1] == '=': kwargs['datasets'] = [filters[0][2]] if filters[0][1] == 'in': kwargs['datasets'] = filters[0][2] datasets = zfs.datasets_serialized(**kwargs) if flat: datasets = self.flatten_datasets(datasets) else: datasets = list(datasets) return filter_list(datasets, filters, options)
def find_import(self): with libzfs.ZFS() as zfs: return [i.__getstate__() for i in zfs.find_import()]
def query(self, filters, options): zfs = libzfs.ZFS() # FIXME: awful performance with hundreds/thousands of snapshots return filter_list([i.__getstate__() for i in list(zfs.snapshots)], filters, options)
def upgrade(self, pool): try: with libzfs.ZFS() as zfs: zfs.get(pool).upgrade() except libzfs.ZFSException as e: raise CallError(str(e))
def get_devices(self, name): try: with libzfs.ZFS() as zfs: return [i.replace('/dev/', '') for i in zfs.get(name).disks] except libzfs.ZFSException as e: raise CallError(str(e), errno.ENOENT)