def extend(vol): config = wrap(self.get_config(vol['name'])) if not config: vol['status'] = 'UNKNOWN' else: topology = config['groups'] for vdev, _ in iterate_vdevs(topology): try: vdev['path'] = self.dispatcher.call_sync( 'disks.partition_to_disk', vdev['path']) except RpcException, err: if err.code == errno.ENOENT: pass vol.update({ 'description': config.get('properties.org\\.freenas:description.value'), 'topology': topology, 'root_vdev': config['root_vdev'], 'status': config['status'], 'upgraded': is_upgraded(config), 'scan': config['scan'], 'properties': config['properties'], 'datasets': map(extend_dataset, flatten_datasets(config['root_dataset'])) })
def extend(vol): config = wrap(self.get_config(vol['name'])) if not config: vol['status'] = 'UNKNOWN' else: topology = config['groups'] for vdev, _ in iterate_vdevs(topology): try: vdev['path'] = self.dispatcher.call_sync( 'disks.partition_to_disk', vdev['path'] ) except RpcException, err: if err.code == errno.ENOENT: pass vol.update({ 'description': config.get('properties.org\\.freenas:description.value'), 'topology': topology, 'root_vdev': config['root_vdev'], 'status': config['status'], 'upgraded': is_upgraded(config), 'scan': config['scan'], 'properties': config['properties'], 'datasets': map(extend_dataset, flatten_datasets(config['root_dataset'])) })
def find_media(self): result = [] for disk in wrap(self.dispatcher.call_sync('disks.query', [('path', 'in', self.get_available_disks())])): # Try whole disk first typ, label = fstyp(disk['path']) if typ: result.append({ 'path': disk['path'], 'size': disk['mediasize'], 'fstype': typ, 'label': label or disk['description'] }) continue for part in disk['status.partitions']: path = part['paths'][0] typ, label = fstyp(path) if typ: result.append({ 'path': path, 'size': part['mediasize'], 'fstype': typ, 'label': label or disk['description'] }) return result
def find_media(self): result = [] for disk in wrap( self.dispatcher.call_sync( 'disks.query', [('path', 'in', self.get_available_disks())])): # Try whole disk first typ, label = fstyp(disk['path']) if typ: result.append({ 'path': disk['path'], 'size': disk['mediasize'], 'fstype': typ, 'label': label or disk['description'] }) continue for part in disk['status.partitions']: path = part['paths'][0] typ, label = fstyp(path) if typ: result.append({ 'path': path, 'size': part['mediasize'], 'fstype': typ, 'label': label or disk['description'] }) return result
def extend_dataset(ds): ds = wrap(ds) return { 'name': ds['name'], 'type': ds['type'], 'properties': include( ds['properties'], 'used', 'available', 'compression', 'atime', 'dedup', 'quota', 'refquota', 'reservation', 'refreservation', 'casesensitivity', 'volsize', 'volblocksize', ), 'share_type': ds.get('properties.org\\.freenas:share_type.value') }
def query(self, filter=None, params=None): try: zfs = libzfs.ZFS() result = map(lambda o: o.__getstate__(recursive=False), list(zfs.datasets)) return wrap(result).query(*(filter or []), **(params or {})) except libzfs.ZFSException, err: raise RpcException(errno.EFAULT, str(err))
def query(self, filter=None, params=None): def extend(disk): disk['online'] = self.is_online(disk['path']) disk['status'] = diskinfo_cache.get(disk['id']) return disk return wrap(self.datastore.query('disks', callback=extend)).query(*(filter or []), **(params or {}))
def query(self, filter=None, params=None): def extend(disk): disk['online'] = self.is_online(disk['path']) disk['status'] = diskinfo_cache.get(disk['id']) return disk return wrap(self.datastore.query('disks', callback=extend)).query( *(filter or []), **(params or {}))
def select_fn(fn, obj): obj = fn(obj) if fn else obj obj = wrap(obj) if isinstance(select, (list, tuple)): return [obj.get(i) for i in select] if isinstance(select, basestring): return obj.get(select)
def query(self, filter=None, params=None): def extend(obj): nr = obj['active'] obj['active'] = 'N' in nr obj['on_reboot'] = 'R' in nr obj['id'] = obj.pop('name') return obj clones = map(extend, ListClones()) return wrap(clones).query(*(filter or []), **(params or {}))
def run(self, context, args, kwargs, opargs): ns = EntityNamespace.SingleItemNamespace(None, self.parent) ns.orig_entity = wrap(copy.deepcopy(self.parent.skeleton_entity)) ns.entity = wrap(copy.deepcopy(self.parent.skeleton_entity)) if not args and not kwargs: context.ml.cd(ns) return if len(args) > 0: prop = self.parent.primary_key prop.do_set(ns.entity, args.pop(0)) for k, v in kwargs.items(): if not self.parent.has_property(k): output_msg('Property {0} not found'.format(k)) return for k, v in kwargs.items(): prop = self.parent.get_mapping(k) prop.do_set(ns.entity, v) self.parent.save(ns, new=True)
def generate_disk_cache(dispatcher, path): diskinfo_cache_lock.acquire() geom.scan() name = os.path.basename(path) gdisk = geom.geom_by_name('DISK', name) multipath_info = None disk_info = info_from_device(gdisk.name) serial = disk_info['serial'] identifier = device_to_identifier(name, serial) ds_disk = dispatcher.datastore.get_by_id('disks', identifier) # Path repesents disk device (not multipath device) and has NAA ID attached lunid = gdisk.provider.config.get('lunid') if lunid: # Check if device could be part of multipath configuration d = get_disk_by_lunid(lunid) if (d and d['path'] != path) or (ds_disk and ds_disk['is_multipath']): multipath_info = attach_to_multipath(dispatcher, d, ds_disk, path) provider = gdisk.provider camdev = CamDevice(gdisk.name) disk = wrap({ 'path': path, 'is_multipath': False, 'description': provider.config['descr'], 'serial': serial, 'lunid': provider.config.get('lunid'), 'model': disk_info['model'], 'interface': disk_info['interface'], 'is_ssd': disk_info['is_ssd'], 'id': identifier, 'controller': camdev.__getstate__(), }) if multipath_info: disk.update(multipath_info) diskinfo_cache.put(identifier, disk) update_disk_cache(dispatcher, path) dispatcher.call_sync('disks.configure_disk', identifier) logger.info('Added <%s> (%s) to disk cache', identifier, disk['description']) diskinfo_cache_lock.release()
def query(self, params, options): return wrap(self.context.connection.call_sync( self.query_call, params, options))
def call_task_sync(self, name, *args, **kwargs): return wrap(self.connection.call_task_sync(name, *args))
def query(self, filter=None, params=None): zfs = libzfs.ZFS() return wrap(zfs).query(*(filter or []), **(params or {}))
def get_one(self, name): return wrap( self.context.connection.call_sync( self.query_call, [(self.primary_key_name, '=', name)], {'single': True}))
def get_disk_by_lunid(lunid): return wrap( first_or_default(lambda d: d['lunid'] == lunid, diskinfo_cache.validvalues()))
def query(self, params, options): return wrap( self.context.connection.call_sync(self.query_call, params, options))
def get_disk_by_lunid(lunid): return wrap(first_or_default(lambda d: d['lunid'] == lunid, diskinfo_cache.validvalues()))
def get_one(self, name): return wrap(self.context.connection.call_sync( self.query_call, [(self.primary_key_name, '=', name)], {'single': True}))