def verify(self, guid, name=None, properties=None): zfs = libzfs.ZFS() pool = first_or_default(lambda p: str(p.guid) == guid, zfs.find_import()) if not pool: raise VerifyException(errno.ENOENT, 'Pool with GUID {0} not found'.format(guid)) return get_disk_names(self.dispatcher, pool)
def run(self, guid, name=None, properties=None): zfs = libzfs.ZFS() opts = properties or {} try: pool = first_or_default(lambda p: str(p.guid) == guid, zfs.find_import()) zfs.import_pool(pool, name, opts) except libzfs.ZFSException, err: raise TaskException(errno.EFAULT, str(err))
def release(self, identifier, sender): if identifier not in transactions: raise RpcException(errno.ENOENT, 'Transaction not found') t = transactions[identifier] s = first_or_default(lambda s: s.sid == sender.session_id, t) if not s: raise RpcException(errno.EINVAL, 'Transaction is not held by current session') t.purge(s)
def get_groups(): users = dispatcher.call_sync('users.query') groups = dispatcher.call_sync('groups.query') return {g['name']: map(lambda m: first_or_default(lambda u: u['id'] == m, users)['username'], g['members'] ) for g in groups }
def acquire(self, identifier, timeout, sender): t = transactions.setdefault(identifier, Transaction(self.dispatcher, identifier)) s = first_or_default(lambda s: s.sid == sender.session_id, t) if s: raise RpcException(errno.EBUSY, 'Transaction is already held by current session') s = Session(timeout, sender.user.name, sender.session_id) t.sessions.append(s) gevent.spawn(t.purge, s) self.dispatcher.dispatch_event('ui.transaction.acquired', { 'identifier': identifier, 'sid': sender.session_id, 'user': sender.user.name })
def run(self, context, args, kwargs, opargs): if len(args) < 1: raise CommandException("Not enough arguments passed") id = args[0] oldname = args[0] if not args[0].isdigit(): vols = context.connection.call_sync("volumes.find") vol = first_or_default(lambda v: v["name"] == args[0], vols) if not vol: raise CommandException("Importable volume {0} not found".format(args[0])) id = vol["id"] oldname = vol["name"] context.submit_task("volume.import", id, kwargs.get("newname", oldname))
def run(self, context, args, kwargs, opargs): if len(args) < 1: raise CommandException('Not enough arguments passed') id = args[0] oldname = args[0] if not args[0].isdigit(): vols = context.connection.call_sync('volumes.find') vol = first_or_default(lambda v: v['name'] == args[0], vols) if not vol: raise CommandException('Importable volume {0} not found'.format(args[0])) id = vol['id'] oldname = vol['name'] context.submit_task('volume.import', id, kwargs.get('newname', oldname))
def acquire(self, identifier, timeout, sender): t = transactions.setdefault(identifier, Transaction(self.dispatcher, identifier)) s = first_or_default(lambda s: s.sid == sender.session_id, t) if s: raise RpcException( errno.EBUSY, 'Transaction is already held by current session') s = Session(timeout, sender.user.name, sender.session_id) t.sessions.append(s) gevent.spawn(t.purge, s) self.dispatcher.dispatch_event( 'ui.transaction.acquired', { 'identifier': identifier, 'sid': sender.session_id, 'user': sender.user.name })
def get_one(self, name): return first_or_default(lambda d: d["name"] == name, self.parent.entity["properties"])
def get_one(self, name): return first_or_default(lambda d: d['name'] == name, self.parent.entity['properties'])
def get_one(self, name): self.parent.load() return first_or_default(lambda d: d['name'] == name, self.parent.entity['datasets'])
def update_disk_cache(dispatcher, path): geom.scan() name = os.path.basename(path) gdisk = geom.geom_by_name('DISK', name) gpart = geom.geom_by_name('PART', name) gmultipath = geom.geom_by_name('MULTIPATH', path.split('/')[-1]) disk = get_disk_by_path(path) if not disk: return old_id = disk['id'] if gmultipath: # Path represents multipath device (not disk device) # MEDIACHANGE event -> use first member for hardware queries cons = gmultipath.consumers.next() gdisk = cons.provider.geom if not gdisk: return disk_info = info_from_device(gdisk.name) serial = disk_info['serial'] provider = gdisk.provider partitions = list(generate_partitions_list(gpart)) identifier = device_to_identifier(name, serial) data_part = first_or_default(lambda x: x['type'] == 'freebsd-zfs', partitions) data_uuid = data_part["uuid"] if data_part else None swap_part = first_or_default(lambda x: x['type'] == 'freebsd-swap', partitions) swap_uuid = swap_part["uuid"] if swap_part else None disk.update({ 'mediasize': provider.mediasize, 'sectorsize': provider.sectorsize, 'max_rotation': disk_info['max_rotation'], 'smart_capable': disk_info['smart_capable'], 'smart_enabled': disk_info['smart_enabled'], 'smart_status': disk_info['smart_status'], 'id': identifier, 'schema': gpart.config.get('scheme') if gpart else None, 'partitions': partitions, 'data_partition_uuid': data_uuid, 'data_partition_path': os.path.join("/dev/gptid", data_uuid) if data_uuid else None, 'swap_partition_uuid': swap_uuid, 'swap_partition_path': os.path.join("/dev/gptid", swap_uuid) if swap_uuid else None, }) if gmultipath: disk['multipath.status'] = gmultipath.config['State'] disk['multipath.mode'] = gmultipath.config['Mode'] disk['multipath.uuid'] = gmultipath.config['UUID'] # Purge old cache entry if identifier has changed if old_id != identifier: logger.debug( 'Removing disk cache entry for <%s> because identifier changed', old_id) diskinfo_cache.remove(old_id) diskinfo_cache.put(identifier, disk) dispatcher.datastore.delete('disks', old_id) persist_disk(dispatcher, disk)
def get_disk_by_lunid(lunid): return wrap( first_or_default(lambda d: d['lunid'] == lunid, diskinfo_cache.validvalues()))
def get_one(self, name): self.parent.load() return first_or_default(lambda d: d["name"] == name, self.parent.entity["datasets"])
def update_disk_cache(dispatcher, path): geom.scan() name = os.path.basename(path) gdisk = geom.geom_by_name('DISK', name) gpart = geom.geom_by_name('PART', name) gmultipath = geom.geom_by_name('MULTIPATH', path.split('/')[-1]) disk = get_disk_by_path(path) if not disk: return old_id = disk['id'] if gmultipath: # Path represents multipath device (not disk device) # MEDIACHANGE event -> use first member for hardware queries cons = gmultipath.consumers.next() gdisk = cons.provider.geom if not gdisk: return disk_info = info_from_device(gdisk.name) serial = disk_info['serial'] provider = gdisk.provider partitions = list(generate_partitions_list(gpart)) identifier = device_to_identifier(name, serial) data_part = first_or_default(lambda x: x['type'] == 'freebsd-zfs', partitions) data_uuid = data_part["uuid"] if data_part else None swap_part = first_or_default(lambda x: x['type'] == 'freebsd-swap', partitions) swap_uuid = swap_part["uuid"] if swap_part else None disk.update({ 'mediasize': provider.mediasize, 'sectorsize': provider.sectorsize, 'max_rotation': disk_info['max_rotation'], 'smart_capable': disk_info['smart_capable'], 'smart_enabled': disk_info['smart_enabled'], 'smart_status': disk_info['smart_status'], 'id': identifier, 'schema': gpart.config.get('scheme') if gpart else None, 'partitions': partitions, 'data_partition_uuid': data_uuid, 'data_partition_path': os.path.join("/dev/gptid", data_uuid) if data_uuid else None, 'swap_partition_uuid': swap_uuid, 'swap_partition_path': os.path.join("/dev/gptid", swap_uuid) if swap_uuid else None, }) if gmultipath: disk['multipath.status'] = gmultipath.config['State'] disk['multipath.mode'] = gmultipath.config['Mode'] disk['multipath.uuid'] = gmultipath.config['UUID'] # Purge old cache entry if identifier has changed if old_id != identifier: logger.debug('Removing disk cache entry for <%s> because identifier changed', old_id) diskinfo_cache.remove(old_id) diskinfo_cache.put(identifier, disk) dispatcher.datastore.delete('disks', old_id) persist_disk(dispatcher, disk)
def get_disk_by_lunid(lunid): return wrap(first_or_default(lambda d: d['lunid'] == lunid, diskinfo_cache.validvalues()))