def update(self, job_id, updated_params): job = self.context.scheduler.get_job(job_id) self.context.logger.info('Updating job with ID {0}'.format(job_id)) if 'name' in updated_params or 'args' in updated_params: name = updated_params.get('name', job.args[0]) args = updated_params.get('args', job.args[1:]) self.context.scheduler.modify_job(job_id, args=[name] + args) if 'enabled' in updated_params: if updated_params['enabled']: self.context.scheduler.resume_job(job_id) else: self.context.scheduler.pause_job(job_id) if 'schedule' in updated_params: if 'coalesce' in updated_params['schedule']: self.context.scheduler.modify_job( job_id, coalesce=updated_params['schedule']['coalesce']) self.context.scheduler.reschedule_job( job_id, trigger='cron', **exclude(updated_params['schedule'], 'coalesce') )
def _get_schema(cls): if hasattr(cls, 'params_schema'): return cls.params_schema return [ convert_schema(i) for i in exclude(cls.run.__annotations__, 'return').values() ]
def __process_system(self, args): if args["subsystem"] == "HOSTNAME": if args["type"] == "CHANGE": params = exclude(args, "system", "subsystem", "type") params["description"] = "System hostname changed" params["jid"] = int(args["jid"]) self.emit_event("system.hostname.change", **params) if args["subsystem"] == "VFS": if args["type"] == "MOUNT": params = exclude(args, "system", "subsystem", "type") params["description"] = "Filesystem {0} mounted".format(args["path"]) self.emit_event("system.fs.mounted", **params) if args["type"] == "UNMOUNT": params = exclude(args, "system", "subsystem", "type") params["description"] = "Filesystem {0} unmounted".format(args["path"]) self.emit_event("system.fs.unmounted", **params)
def __process_system(self, args): if args["subsystem"] == "HOSTNAME": if args["type"] == "CHANGE": params = exclude(args, "system", "subsystem", "type") params["description"] = "System hostname changed" params["jid"] = int(args["jid"]) self.emit_event("system.hostname.change", **params) if args["subsystem"] == "VFS": if args["type"] == "MOUNT": params = exclude(args, "system", "subsystem", "type") params["description"] = "Filesystem {0} mounted".format( args["path"]) self.emit_event("system.fs.mounted", **params) if args["type"] == "UNMOUNT": params = exclude(args, "system", "subsystem", "type") params["description"] = "Filesystem {0} unmounted".format( args["path"]) self.emit_event("system.fs.unmounted", **params)
def dump_collection(ds, name): metadata = { 'name': name, 'pkey-type': ds.collection_get_pkey_type(name), 'attributes': ds.collection_get_attrs(name), 'migration': ds.collection_get_migration_policy(name), 'migrations': ds.collection_get_migrations(name) } return { 'metadata': metadata, 'data': {x['id']: exclude(x, 'id') for x in ds.query(name)} }
def run(self, id): peer = self.datastore.get_by_id('peers', id) hostid = self.dispatcher.call_sync('system.info.host_uuid') remote_client = None if not peer: raise TaskException( errno.ENOENT, 'FreeNAS peer entry {0} does not exist'.format(id)) try: remote_client = get_freenas_peer_client( self, peer['credentials']['address']) remote_peer = remote_client.call_sync('peer.query', [('id', '=', hostid)], {'single': True}) if not remote_peer: raise TaskException( errno.ENOENT, 'Remote side of peer {0} does not exist'.format( peer['name'])) ip_at_remote_side = remote_client.local_address[0] hostname = self.dispatcher.call_sync( 'system.general.get_config')['hostname'] port = self.dispatcher.call_sync('service.sshd.get_config')['port'] remote_peer['name'] = hostname remote_peer['credentials']['port'] = port remote_peer['credentials']['address'] = hostname call_task_and_check_state(remote_client, 'peer.freenas.delete_local', hostid) remote_peer = exclude(remote_peer, 'created_at', 'updated_at') call_task_and_check_state(remote_client, 'peer.freenas.create_local', remote_peer, ip_at_remote_side) finally: if remote_client: remote_client.disconnect()
def run(self, pool_name, path, updated_params): ds = wrap(self.dispatcher.call_sync('zfs.dataset.query', [('name', '=', path)], {'single': True})) if 'name' in updated_params: self.join_subtasks(self.run_subtask('zfs.rename', ds['name'], updated_params['name'])) ds['name'] = updated_params['name'] if 'properties' in updated_params: props = exclude(updated_params['properties'], 'used', 'available', 'dedup', 'casesensitivity') self.join_subtasks(self.run_subtask('zfs.configure', pool_name, ds['name'], props)) if 'permissions_type' in updated_params: oldtyp = ds['properties.org\\.freenas:permissions_type.value'] typ = updated_params['permissions_type'] if oldtyp != 'ACL' and typ == 'ACL': self.switch_to_acl(pool_name, ds['name']) if oldtyp != 'PERMS' and typ == 'PERMS': self.switch_to_chmod(pool_name, ds['name'])
def run(self, id): peer = self.datastore.get_by_id('peers', id) hostid = self.dispatcher.call_sync('system.info.host_uuid') remote_client = None if not peer: raise TaskException(errno.ENOENT, 'FreeNAS peer entry {0} does not exist'.format(id)) try: remote_client = get_freenas_peer_client(self, peer['credentials']['address']) remote_peer = remote_client.call_sync('peer.query', [('id', '=', hostid)], {'single': True}) if not remote_peer: raise TaskException(errno.ENOENT, 'Remote side of peer {0} does not exist'.format(peer['name'])) ip_at_remote_side = remote_client.local_address[0] hostname = self.dispatcher.call_sync('system.general.get_config')['hostname'] port = self.dispatcher.call_sync('service.sshd.get_config')['port'] remote_peer['name'] = hostname remote_peer['credentials']['port'] = port remote_peer['credentials']['address'] = hostname call_task_and_check_state( remote_client, 'peer.freenas.delete_local', hostid ) remote_peer = exclude(remote_peer, 'created_at', 'updated_at') call_task_and_check_state( remote_client, 'peer.freenas.create_local', remote_peer, ip_at_remote_side ) finally: if remote_client: remote_client.disconnect()
def enumerate_methods(self): methods = [] for name, method in inspect.getmembers(self, predicate=inspect.ismethod): if name.startswith('_'): continue if name in ('initialize', 'get_metadata', 'enumerate_methods'): continue result = {'name': name, 'private': False} return_annotation = method.__annotations__.get('return') args_annotations = exclude(method.__annotations__, 'return') if method.__doc__: result['docstring'] = inspect.getdoc(method) if return_annotation: result['result-schema'] = convert_schema(return_annotation) if args_annotations: result['params-schema'] = self._build_annotations_schema(args_annotations) if hasattr(method, 'description'): result['description'] = method.description if hasattr(method, 'params_schema'): result['params-schema'] = self._build_params_schema(method) if hasattr(method, 'result_schema'): result['result-schema'] = self._build_result_schema(method) if hasattr(method, 'private'): result['private'] = method.private methods.append(result) return methods
def _get_schema(cls): if hasattr(cls, 'params_schema'): return cls.params_schema return [convert_schema(i) for i in exclude(cls.run.__annotations__, 'return').values()]
def __process_iscsi(self, args): if args['subsystem'] == 'SESSION' and args['type'] == 'UPDATE': self.emit_event('iscsi.session.update', **exclude(args, "system", "subsystem", "type"))
def __process_system(self, args): if args["subsystem"] == "HOSTNAME": if args["type"] == "CHANGE": params = exclude(args, "system", "subsystem", "type") params["description"] = "System hostname changed" self.emit_event("system.hostname.change", **params)
def collect(): return {x['id']: exclude(x, 'id') for x in ds.query(name)}
def get_config(self): return exclude( ConfigNode('service.sshd', self.configstore).__getstate__(), 'keys')
def get_config(self): return exclude(ConfigNode('service.sshd', self.configstore).__getstate__(), 'keys')