class SystemInfoProvider(Provider): def __init__(self): self.__version = None @accepts() @returns(h.array(str)) def uname_full(self): return os.uname() @accepts() @returns(str) @description("Return the full version string, e.g. FreeNAS-8.1-r7794-amd64.") def version(self): if self.__version is None: # See #9113 conf = Configuration.Configuration() manifest = conf.SystemManifest() if manifest: self.__version = manifest.Version() else: with open(VERSION_FILE) as fd: self.__version = fd.read().strip() return self.__version @accepts() @returns(float, float, float) def load_avg(self): return os.getloadavg() @accepts() @returns(h.object(properties={ 'cpu_model': str, 'cpu_cores': int, 'memory_size': long, })) def hardware(self): return { 'cpu_model': get_sysctl("hw.model"), 'cpu_cores': get_sysctl("hw.ncpu"), 'memory_size': get_sysctl("hw.physmem") } @accepts() @returns(h.object(properties={ 'system_time': str, 'boot_time': str, 'uptime': str, 'timezone': str, })) def time(self): boot_time = datetime.fromtimestamp(psutil.BOOT_TIME, tz=tz.tzlocal()) return { 'system_time': datetime.now(tz=tz.tzlocal()).isoformat(), 'boot_time': boot_time.isoformat(), 'uptime': (datetime.now(tz=tz.tzlocal()) - boot_time).total_seconds(), 'timezone': time.tzname[time.daylight], }
class SystemDatasetProvider(Provider): @private @description("Initializes the .system dataset") @accepts() @returns() def init(self): pool = self.configstore.get('system.dataset.pool') create_system_dataset(self.dispatcher, pool) mount_system_dataset(self.dispatcher, pool, SYSTEM_DIR) @private @description( "Creates directory in .system dataset and returns reference to it") @accepts(str) @returns(str) def request_directory(self, name): path = os.path.join(SYSTEM_DIR, name) if os.path.exists(path): if os.path.isdir(path): return path raise RpcException(errno.EPERM, 'Cannot grant directory {0}'.format(name)) os.mkdir(path) return path @description("Returns current .system dataset parameters") @returns(h.object()) def status(self): return { 'id': self.configstore.get('system.dataset.id'), 'pool': self.configstore.get('system.dataset.pool') }
def _init(dispatcher, plugin): # Register Schemas plugin.register_schema_definition( 'update', { 'type': 'object', 'properties': { 'train': { 'type': 'string' }, 'check_auto': { 'type': 'boolean' }, 'update_server': { 'type': 'string', 'readOnly': True }, }, }) plugin.register_schema_definition( 'update-progress', h.object( properties={ 'operation': h.enum(str, ['DOWNLOADING', 'INSTALLING']), 'details': str, 'indeterminate': bool, 'percent': int, 'reboot': bool, 'pkg_name': str, 'pkg_version': str, 'filename': str, 'filesize': int, 'num_files_done': int, 'num_files_total': int, 'error': bool, 'finished': bool, })) plugin.register_schema_definition( 'update-ops', { 'type': 'object', 'properties': { 'new_name': { 'type': 'string' }, 'previous_version': { 'type': 'string' }, 'operation': { 'type': 'string', 'enum': ['delete', 'install', 'upgrade'] }, 'new_version': { 'type': 'string' }, 'previous_name': { 'type': 'string' }, } }) plugin.register_schema_definition( 'update-info', { 'type': 'object', 'properties': { 'notes': { 'type': 'object' }, 'notice': { 'type': 'string' }, 'changelog': { 'type': 'string' }, 'operations': { '$ref': 'update-ops' }, } }) plugin.register_schema_definition( 'update-train', { 'type': 'object', 'properties': { 'name': { 'type': 'string' }, 'description': { 'type': 'string' }, 'sequence': { 'type': 'string' }, 'current': { 'type': 'boolean' }, } }) # Register providers plugin.register_provider("update", UpdateProvider) # Register task handlers plugin.register_task_handler("update.configure", UpdateConfigureTask) plugin.register_task_handler("update.check", CheckUpdateTask) plugin.register_task_handler("update.download", DownloadUpdateTask) plugin.register_task_handler("update.manual", UpdateManualTask) plugin.register_task_handler("update.update", UpdateApplyTask) plugin.register_task_handler("update.verify", UpdateVerifyTask) # Register Event Types plugin.register_event_type('update.in_progress', schema=h.ref('update-progress')) plugin.register_event_type('update.changed') # Register reources plugin.register_resource(Resource(update_resource_string), ['system']) # Get the Update Cache (if any) at system boot (and hence in init here) generate_update_cache(dispatcher)
if not self.datastore.exists('groups', ('id', '=', i)): gid = i break if not gid: raise RpcException(errno.ENOSPC, 'No free GIDs available') return gid @description("Create an user in the system") @accepts(h.all_of( h.ref('user'), h.required('username', 'group'), h.forbidden('builtin', 'logged-in', 'sessions'), h.object({'password': {'type': 'string'}}), h.any_of( h.required('password'), h.required('unixhash', 'smbhash'), h.required('password_disabled')), )) class UserCreateTask(Task): def describe(self, user): return "Adding user {0}".format(user['username']) def verify(self, user): errors = [] for code, message in check_unixname(user['username']): errors.append(('name', code, message))
def get_boot_pool(self): name = self.configstore.get('system.boot_pool_name') zfs = libzfs.ZFS() return zfs.get(name).__getstate__() @accepts(str) @returns(h.array(str)) def get_disks(self, name): try: zfs = libzfs.ZFS() pool = zfs.get(name) return pool.disks except libzfs.ZFSException, err: raise RpcException(errno.EFAULT, str(err)) @returns(h.object()) def get_capabilities(self): return { 'vdev_types': { 'disk': { 'min_devices': 1, 'max_devices': 1 }, 'mirror': { 'min_devices': 2 }, 'raidz1': { 'min_devices': 2 }, 'raidz2': { 'min_devices': 3
def configure_disk(self, id): disk = self.datastore.get_by_id('disks', id) acc_level = getattr(AcousticLevel, disk.get('acoustic_level', 'DISABLED')).value powermgmt = disk.get('apm_mode', 0) system('/usr/local/sbin/ataidle', '-P', str(powermgmt), '-A', str(acc_level), disk['path']) if disk.get('standby_mode'): standby_mode = str(disk['standby_mode']) gevent.spawn_later( 60, lambda: system('/usr/local/sbin/ataidle', '-I', standby_mode, disk['path'])) @accepts(str, str, h.object()) class DiskGPTFormatTask(Task): def describe(self, disk, fstype, params=None): return "Formatting disk {0}".format(os.path.basename(disk)) def verify(self, disk, fstype, params=None): if not get_disk_by_path(disk): raise VerifyException(errno.ENOENT, "Disk {0} not found".format(disk)) if fstype not in ['freebsd-zfs']: raise VerifyException(errno.EINVAL, "Unsupported fstype {0}".format(fstype)) return ['disk:{0}'.format(disk)]
def _init(dispatcher, plugin): # Register Schemas plugin.register_schema_definition('update', { 'type': 'object', 'properties': { 'train': {'type': 'string'}, 'check_auto': {'type': 'boolean'}, 'update_server': {'type': 'string', 'readOnly': True}, }, }) plugin.register_schema_definition('update-progress', h.object(properties={ 'operation': h.enum(str, ['DOWNLOADING', 'INSTALLING']), 'details': str, 'indeterminate': bool, 'percent': int, 'reboot': bool, 'pkg_name': str, 'pkg_version': str, 'filename': str, 'filesize': int, 'num_files_done': int, 'num_files_total': int, 'error': bool, 'finished': bool, })) plugin.register_schema_definition('update-ops', { 'type': 'object', 'properties': { 'new_name': {'type': 'string'}, 'previous_version': {'type': 'string'}, 'operation': { 'type': 'string', 'enum': ['delete', 'install', 'upgrade'] }, 'new_version': {'type': 'string'}, 'previous_name': {'type': 'string'}, } }) plugin.register_schema_definition('update-info', { 'type': 'object', 'properties': { 'notes': {'type': 'object'}, 'notice': {'type': 'string'}, 'changelog': {'type': 'string'}, 'operations': {'$ref': 'update-ops'}, } }) plugin.register_schema_definition('update-train', { 'type': 'object', 'properties': { 'name': {'type': 'string'}, 'description': {'type': 'string'}, 'sequence': {'type': 'string'}, 'current': {'type': 'boolean'}, } }) # Register providers plugin.register_provider("update", UpdateProvider) # Register task handlers plugin.register_task_handler("update.configure", UpdateConfigureTask) plugin.register_task_handler("update.check", CheckUpdateTask) plugin.register_task_handler("update.download", DownloadUpdateTask) plugin.register_task_handler("update.manual", UpdateManualTask) plugin.register_task_handler("update.update", UpdateApplyTask) plugin.register_task_handler("update.verify", UpdateVerifyTask) # Register Event Types plugin.register_event_type('update.in_progress', schema=h.ref('update-progress')) plugin.register_event_type('update.changed') # Register reources plugin.register_resource(Resource(update_resource_string), ['system']) # Get the Update Cache (if any) at system boot (and hence in init here) generate_update_cache(dispatcher)
raise TaskException( errno.ENXIO, 'Cannot generate certificate: {0}'.format(str(e))) return pkey @accepts( h.object({ 'properties': { 'name': { 'type': 'string' }, 'certificate': { 'type': 'string' }, 'privatekey': { 'type': 'string' }, 'passphrase': { 'type': 'string' }, }, 'additionalProperties': False, 'required': ['name', 'certificate', 'privatekey', 'passphrase'], })) class CertificateImportTask(Task): def verify(self, certificate): if self.datastore.exists('crypto.certificates', ('name', '=', certificate['name'])): raise VerifyException( errno.EEXIST, 'Certificate with given name already exists')
break if not gid: raise RpcException(errno.ENOSPC, 'No free GIDs available') return gid @description("Create an user in the system") @accepts( h.all_of( h.ref('user'), h.required('username', 'group'), h.forbidden('builtin', 'logged-in', 'sessions'), h.object({'password': { 'type': 'string' }}), h.any_of(h.required('password'), h.required('unixhash', 'smbhash'), h.required('password_disabled')), )) class UserCreateTask(Task): def describe(self, user): return "Adding user {0}".format(user['username']) def verify(self, user): errors = [] for code, message in check_unixname(user['username']): errors.append(('name', code, message))
disk = self.datastore.get_by_id('disks', id) acc_level = getattr(AcousticLevel, disk.get('acoustic_level', 'DISABLED')).value powermgmt = disk.get('apm_mode', 0) system('/usr/local/sbin/ataidle', '-P', str(powermgmt), '-A', str(acc_level), disk['path']) if disk.get('standby_mode'): standby_mode = str(disk['standby_mode']) gevent.spawn_later(60, lambda: system( '/usr/local/sbin/ataidle', '-I', standby_mode, disk['path'] )) @accepts(str, str, h.object()) class DiskGPTFormatTask(Task): def describe(self, disk, fstype, params=None): return "Formatting disk {0}".format(os.path.basename(disk)) def verify(self, disk, fstype, params=None): if not get_disk_by_path(disk): raise VerifyException(errno.ENOENT, "Disk {0} not found".format(disk)) if fstype not in ['freebsd-zfs']: raise VerifyException(errno.EINVAL, "Unsupported fstype {0}".format(fstype)) return ['disk:{0}'.format(disk)] def run(self, disk, fstype, params=None): if params is None:
self.datastore.update('crypto.certificates', signing_cert['id'], signing_cert) self.dispatcher.call_sync('etcd.generation.generate_group', 'crypto') except DatastoreException, e: raise TaskException(errno.EBADMSG, 'Cannot create internal cert: {0}'.format(str(e))) except RpcException, e: raise TaskException(errno.ENXIO, 'Cannot generate certificate: {0}'.format(str(e))) return pkey @accepts(h.object({ 'properties': { 'name': {'type': 'string'}, 'certificate': {'type': 'string'}, 'privatekey': {'type': 'string'}, 'passphrase': {'type': 'string'}, }, 'additionalProperties': False, 'required': ['name', 'certificate', 'privatekey', 'passphrase'], })) class CertificateImportTask(Task): def verify(self, certificate): if self.datastore.exists('crypto.certificates', ('name', '=', certificate['name'])): raise VerifyException(errno.EEXIST, 'Certificate with given name already exists') try: load_privatekey(certificate['privatekey'], certificate.get('passphrase')) except Exception: raise VerifyException(errno.EINVAL, 'Invalid passphrase')
def get_boot_pool(self): name = self.configstore.get('system.boot_pool_name') zfs = libzfs.ZFS() return zfs.get(name).__getstate__() @accepts(str) @returns(h.array(str)) def get_disks(self, name): try: zfs = libzfs.ZFS() pool = zfs.get(name) return pool.disks except libzfs.ZFSException, err: raise RpcException(errno.EFAULT, str(err)) @returns(h.object()) def get_capabilities(self): return { 'vdev_types': { 'disk': { 'min_devices': 1, 'max_devices': 1 }, 'mirror': { 'min_devices': 2 }, 'raidz1': { 'min_devices': 2 }, 'raidz2': { 'min_devices': 3
'upgraded': is_upgraded(config), 'scan': config['scan'], 'properties': config['properties'], 'datasets': map(extend_dataset, flatten_datasets(config['root_dataset'])) }) return vol return self.datastore.query('volumes', *(filter or []), callback=extend, **(params or {})) @description("Finds volumes available for import") @accepts() @returns(h.array( h.object(properties={ 'id': str, 'name': str, 'topology': h.ref('zfs-topology'), 'status': str }) )) def find(self): result = [] for pool in self.dispatcher.call_sync('zfs.pool.find'): topology = pool['groups'] for vdev, _ in iterate_vdevs(topology): try: vdev['path'] = self.dispatcher.call_sync( 'disks.partition_to_disk', vdev['path'] ) except RpcException: pass
return vol return self.datastore.query('volumes', *(filter or []), callback=extend, **(params or {})) @description("Finds volumes available for import") @accepts() @returns( h.array( h.object( properties={ 'id': str, 'name': str, 'topology': h.ref('zfs-topology'), 'status': str }))) def find(self): result = [] for pool in self.dispatcher.call_sync('zfs.pool.find'): topology = pool['groups'] for vdev, _ in iterate_vdevs(topology): try: vdev['path'] = self.dispatcher.call_sync( 'disks.partition_to_disk', vdev['path']) except RpcException: pass if self.datastore.exists('volumes', ('id', '=', pool['guid'])):
class ServiceInfoProvider(Provider): @description("Lists available services") @query("service") def query(self, filter=None, params=None): def extend(i): if 'pidfile' in i: # Check if process is alive by reading pidfile try: fd = open(i['pidfile'], 'r') pid = int(fd.read().strip()) except IOError: pid = None state = 'stopped' except ValueError: pid = None state = 'stopped' else: try: os.kill(pid, 0) except OSError: state = 'unknown' else: state = 'running' elif 'rcng' in i and 'rc-scripts' in i['rcng']: rc_scripts = i['rcng']['rc-scripts'] pid = None state = 'running' try: if type(rc_scripts) is unicode: system("/usr/sbin/service", rc_scripts, 'onestatus') if type(rc_scripts) is list: for x in rc_scripts: system("/usr/sbin/service", x, 'onestatus') except SubprocessException: state = 'stopped' else: pid = None state = 'unknown' entry = { 'name': i['name'], 'state': state, } if pid is not None: entry['pid'] = pid return entry return self.datastore.query('service_definitions', *(filter or []), callback=extend, **(params or {})) @accepts(str) @returns(h.object()) def get_service_config(self, service): svc = self.datastore.get_one('service_definitions', ('name', '=', service)) if not svc: raise RpcException(errno.EINVAL, 'Invalid service name') node = ConfigNode('service.{0}'.format(service), self.configstore) return node @private @accepts(str) @returns() def ensure_started(self, service): # XXX launchd! svc = self.datastore.get_one('service_definitions', ('name', '=', service)) if not svc: raise RpcException(errno.ENOENT, 'Service {0} not found'.format(service)) rc_scripts = svc['rcng']['rc-scripts'] try: if type(rc_scripts) is unicode: system("/usr/sbin/service", rc_scripts, 'onestart') if type(rc_scripts) is list: for i in rc_scripts: system("/usr/sbin/service", i, 'onestart') except SubprocessException, e: pass