class NetworkProvider(Provider): @returns(h.ref('network-config')) def get_config(self): node = ConfigNode('network', self.configstore).__getstate__() node.update({ 'gateway': self.dispatcher.call_sync( 'networkd.configuration.get_default_routes'), 'dns': self.dispatcher.call_sync('networkd.configuration.get_dns_config') }) return node @returns(h.array(str)) def get_my_ips(self): ips = [] ifaces = self.dispatcher.call_sync( 'networkd.configuration.query_interfaces') ifaces.pop('mgmt0', None) for i, v in ifaces.items(): if 'LOOPBACK' in v['flags'] or v[ 'link_state'] != 'LINK_STATE_UP' or 'UP' not in v['flags']: continue for aliases in v['aliases']: if aliases['address'] and aliases['type'] != 'LINK': ips.append(aliases['address']) return list(set(ips))
class OpenVpnProvider(Provider): """ I think that some of the information here needs to be excluded using exclude() I need consultaion on that. """ @returns(h.ref('service-openvpn')) def get_config(self): return ConfigNode('service.openvpn', self.configstore).__getstate__() @returns(h.ref('service-openvpn')) def get_readable_config(self): vpn_config = ConfigNode('service.openvpn', self.configstore).__getstate__() if vpn_config['ca']: vpn_config['ca'] = self.datastore.query( 'crypto.certificates', ('id', '=', vpn_config['ca']), select=('name'))[0] if vpn_config['cert']: vpn_config['cert'] = self.datastore.query( 'crypto.certificates', ('id', '=', vpn_config['cert']), select=('name'))[0] if vpn_config['key']: vpn_config['key'] = self.datastore.query( 'crypto.certificates', ('id', '=', vpn_config['key']), select=('name'))[0] return vpn_config
class SwapProvider(Provider): @accepts() @returns(h.array(h.ref('SwapMirror'))) @description("Returns information about swap mirrors present in the system" ) def info(self): return list(get_swap_info(self.dispatcher).values())
class StatProvider(Provider): @query('Statistic') @generator def query(self, filter=None, params=None): stats = self.dispatcher.call_sync('statd.output.get_current_state') return q.query(stats, *(filter or []), stream=True, **(params or {})) @returns(h.array(str)) @generator def get_data_sources(self): return self.dispatcher.call_sync('statd.output.get_data_sources') def get_data_sources_tree(self): return self.dispatcher.call_sync('statd.output.get_data_sources_tree') @accepts(h.one_of(str, h.array(str)), h.ref('GetStatsParams')) @returns(h.ref('GetStatsResult')) def get_stats(self, data_source, params): return { 'data': list( self.dispatcher.call_sync('statd.output.get_stats', data_source, params)) } def normalize(self, name, value): return normalize(name, value)
class UPSProvider(Provider): @private @accepts() @returns(h.ref('ServiceUps')) def get_config(self): return ConfigNode('service.ups', self.configstore).__getstate__() @accepts() @returns(h.array(h.array(str))) def drivers(self): driver_list = '/etc/local/nut/driver.list' if not os.path.exists(driver_list): return [] drivers = [] with open(driver_list, 'rb') as f: d = f.read() r = io.StringIO() for line in re.sub(r'[ \t]+', ' ', d.decode('utf-8'), flags=re.M).split('\n'): r.write(line.strip() + '\n') r.seek(0) reader = csv.reader(r, delimiter=' ', quotechar='"') for row in reader: if len(row) == 0 or row[0].startswith('#'): continue if row[-2] == '#': last = -3 else: last = -1 if row[last].find(' (experimental)') != -1: row[last] = row[last].replace(' (experimental)', '').strip() drivers.append({ 'driver_name': row[last], 'description': '{0} ({1})'.format(' '.join(row[0:last]), row[last]) }) return drivers @accepts() @returns(h.array(h.array(str))) def get_usb_devices(self): usb_devices_list = [] try: usbconfig_output = system('usbconfig')[0] if not usbconfig_output.startswith('No device match'): for device in usbconfig_output.rstrip().split('\n'): device_path = os.path.join('/dev', device.split()[0][:-1]) device_description = re.findall(r'<.*?>', device)[0].strip('><') usb_devices_list.append({ 'device': device_path, 'description': device_description }) except SubprocessException as e: raise TaskException(errno.EBUSY, e.err) return usb_devices_list
class SupportProvider(Provider): @accepts(str, Password) @returns(h.array(str)) def categories(self, user, password): version = self.dispatcher.call_sync('system.info.version') sw_name = version.split('-')[0].lower() try: r = requests.post( 'https://%s/%s/api/v1.0/categories' % (PROXY_ADDRESS, sw_name), data=json.dumps({ 'user': user, 'password': unpassword(password), 'project': REDMINE_PROJECT_NAME, }), headers={'Content-Type': 'application/json'}, timeout=10, ) data = r.json() except simplejson.JSONDecodeError as e: logger.debug('Failed to decode ticket attachment response: %s', e.text) raise RpcException(errno.EINVAL, 'Failed to decode ticket response') except requests.ConnectionError as e: raise RpcException(errno.ENOTCONN, 'Connection failed: {0}'.format(str(e))) except requests.Timeout as e: raise RpcException(errno.ETIMEDOUT, 'Connection timed out: {0}'.format(str(e))) if 'error' in data: raise RpcException(errno.EINVAL, data['message']) return data @returns(h.array(str)) def categories_no_auth(self): version = self.dispatcher.call_sync('system.info.version') sw_name = version.split('-')[0].lower() try: r = requests.post( 'https://%s/%s/api/v1.0/categoriesnoauth' % (PROXY_ADDRESS, sw_name), data=json.dumps({'project': REDMINE_PROJECT_NAME}), headers={'Content-Type': 'application/json'}, timeout=10, ) data = r.json() except simplejson.JSONDecodeError as e: logger.debug('Failed to decode ticket attachment response: %s', r.text) raise RpcException(errno.EINVAL, 'Failed to decode ticket response') except requests.ConnectionError as e: raise RpcException(errno.ENOTCONN, 'Connection failed: {0}'.format(str(e))) except requests.Timeout as e: raise RpcException(errno.ETIMEDOUT, 'Connection timed out: {0}'.format(str(e))) if 'error' in data: raise RpcException(errno.EINVAL, data['message']) return data
class DynDNSProvider(Provider): @private @accepts() @returns(h.ref('ServiceDyndns')) def get_config(self): return ConfigNode('service.dyndns', self.configstore).__getstate__() @accepts() @returns(h.object()) def providers(self): return PROVIDERS
class SystemInfoProvider(Provider): def __init__(self): self.__version = None @accepts() @returns(h.array(str)) def uname_full(self): return os.uname() @accepts() @returns(str) @description("Return the full version string, e.g. FreeNAS-8.1-r7794-amd64.") def version(self): if self.__version is None: # See #9113 conf = Configuration.Configuration() manifest = conf.SystemManifest() if manifest: self.__version = manifest.Version() else: with open(VERSION_FILE) as fd: self.__version = fd.read().strip() return self.__version @accepts() @returns({'type': 'array', 'items': {'type': 'number'}, 'maxItems': 3, 'minItems': 3}) def load_avg(self): return list(os.getloadavg()) @accepts() @returns(h.object(properties={ 'cpu_model': str, 'cpu_cores': int, 'cpu_clockrate': int, 'memory_size': int, 'vm_guest': h.one_of(str, None) })) def hardware(self): vm_guest = get_sysctl("kern.vm_guest") return { 'cpu_model': get_sysctl("hw.model"), 'cpu_cores': get_sysctl("hw.ncpu"), 'cpu_clockrate': get_sysctl("hw.clockrate"), 'memory_size': get_sysctl("hw.physmem"), 'vm_guest': None if vm_guest == 'none' else vm_guest } @accepts() @returns(str) def host_uuid(self): return get_sysctl("kern.hostuuid")[:-1]
class SystemGeneralProvider(Provider): @accepts() @returns(h.ref('SystemGeneral')) def get_config(self): return { 'hostname': self.configstore.get('system.hostname'), 'description': self.configstore.get('system.description'), 'tags': self.configstore.get('system.tags'), 'language': self.configstore.get('system.language'), 'timezone': self.configstore.get('system.timezone'), 'syslog_server': self.configstore.get('system.syslog_server'), 'console_keymap': self.configstore.get('system.console.keymap') } @accepts() @returns(h.array(h.array(str))) def keymaps(self): if not os.path.exists(KEYMAPS_INDEX): return [] rv = [] with open(KEYMAPS_INDEX, 'r', encoding='utf-8', errors='ignore') as f: d = f.read() fnd = re.findall(r'^(?P<name>[^#\s]+?)\.kbd:en:(?P<desc>.+)$', d, re.M) for name, desc in fnd: rv.append((name, desc)) return rv @accepts() @returns(h.array(str)) def timezones(self): result = [] for root, _, files in os.walk(ZONEINFO_DIR): for f in files: if f in ('zone.tab', 'posixrules'): continue result.append( os.path.join(root, f).replace(ZONEINFO_DIR + '/', '')) return sorted(result) @private @accepts(str, str) @returns(str) def cowsay(self, line, cow_file='default'): if cow_file != 'default' and os.path.exists(cow_file): return system('/usr/local/bin/cowsay', '-f', cow_file, '-s', line) else: return system('/usr/local/bin/cowsay', '-s', line)
class ISCSIProvider(Provider): @accepts() @returns(h.ref('service-iscsi')) def get_config(self): node = ConfigNode('service.iscsi', self.configstore).__getstate__() node['portals'] = self.datastore.query('iscsi.portals') return node
class CertificateProvider(Provider): @query('CryptoCertificate') @generator def query(self, filter=None, params=None): def extend(certificate): if certificate['type'].startswith('CA_'): cert_path = '/etc/certificates/CA' else: cert_path = '/etc/certificates' if certificate.get('certificate'): certificate['certificate_path'] = os.path.join( cert_path, '{0}.crt'.format(certificate['name'])) if certificate.get('privatekey'): certificate['privatekey'] = Password(certificate['privatekey']) certificate['privatekey_path'] = os.path.join( cert_path, '{0}.key'.format(certificate['name'])) if certificate.get('csr'): certificate['csr_path'] = os.path.join( cert_path, '{0}.csr'.format(certificate['name'])) return certificate return q.query(self.datastore.query_stream('crypto.certificates', callback=extend), *(filter or []), stream=True, **(params or {})) @accepts() @returns(h.object()) def get_country_codes(self): return COUNTRY_CODES
class PeerProvider(Provider): @query('Peer') @generator def query(self, filter=None, params=None): def extend_query(): for t in self.peer_types(): for peer in self.dispatcher.call_sync(f'peer.{t}.query', [], {'exclude': 'status'}): peer['status'] = peers_status.get(peer['id'], { 'state': 'UNKNOWN', 'rtt': None }) yield peer return q.query(extend_query(), *(filter or []), stream=True, **(params or {})) @returns(h.array(str)) def peer_types(self): result = [] for p in self.dispatcher.plugins.values(): if p.metadata and p.metadata.get('type') == 'peering': result.append(p.metadata.get('subtype')) return result
class SystemDatasetProvider(Provider): @private @description("Initializes the .system dataset") @accepts() def init(self): pool = self.configstore.get('system.dataset.pool') dsid = self.configstore.get('system.dataset.id') create_system_dataset(self.dispatcher, dsid, pool) mount_system_dataset(self.dispatcher, dsid, pool, SYSTEM_DIR) link_directories(self.dispatcher) @private @description( "Creates directory in .system dataset and returns reference to it") @accepts(str) @returns(str) def request_directory(self, name): path = os.path.join(SYSTEM_DIR, name) if os.path.exists(path): if os.path.isdir(path): return path raise RpcException(errno.EPERM, 'Cannot grant directory {0}'.format(name)) os.mkdir(path) return path @description("Returns current .system dataset parameters") @returns(h.object()) def status(self): return { 'id': self.configstore.get('system.dataset.id'), 'pool': self.configstore.get('system.dataset.pool') }
class PeerSSHProvider(Provider): @query('peer') @generator def query(self, filter=None, params=None): return q.query(self.dispatcher.call_sync('peer.query', [('type', '=', 'ssh')]), *(filter or []), stream=True, **(params or {})) @private @accepts(str) @returns(h.ref('peer-status')) def get_status(self, id): peer = self.dispatcher.call_sync('peer.query', [('id', '=', id), ('type', '=', 'ssh')], {'single': True}) if not peer: return id, {'state': 'UNKNOWN', 'rtt': None} credentials = peer['credentials'] s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: start_time = datetime.now() s.connect((credentials['address'], credentials.get('port', 22))) delta = datetime.now() - start_time return {'state': 'ONLINE', 'rtt': delta.total_seconds()} except socket.error: return {'state': 'OFFLINE', 'rtt': None} finally: s.close()
class SNMPProvider(Provider): @accepts() @returns(h.ref('ServiceSnmp')) def get_config(self): state = ConfigNode('service.snmp', self.configstore).__getstate__() state['v3_password'] = Password(state['v3_password']) return state
class SystemUIProvider(Provider): @accepts() @returns(h.ref('SystemUi')) def get_config(self): protocol = [] if self.configstore.get('service.nginx.http.enable'): protocol.append('HTTP') if self.configstore.get('service.nginx.https.enable'): protocol.append('HTTPS') return { 'webui_protocol': protocol, 'webui_listen': self.configstore.get('service.nginx.listen'), 'webui_http_port': self.configstore.get('service.nginx.http.port'), 'webui_http_redirect_https': self.configstore.get('service.nginx.http.redirect_https'), 'webui_https_certificate': self.configstore.get('service.nginx.https.certificate'), 'webui_https_port': self.configstore.get('service.nginx.https.port') }
class IPMIProvider(Provider): @accepts() @returns(bool) def is_ipmi_loaded(self): return os.path.exists('/dev/ipmi0') @accepts() @returns(h.array(int)) def channels(self): return channels @query('ipmi') @generator def query(self, filter=None, params=None): if not self.is_ipmi_loaded(): raise RpcException(errno.ENXIO, 'The IPMI device could not be found') result = [] for channel in self.channels(): try: out, err = system('/usr/local/bin/ipmitool', 'lan', 'print', str(channel)) except SubprocessException as e: raise RpcException(errno.EFAULT, 'Cannot receive IPMI configuration: {0}'.format(e.err.strip())) raw = {k.strip(): v.strip() for k, v in RE_ATTRS.findall(out)} ret = {IPMI_ATTR_MAP[k]: v for k, v in list(raw.items()) if k in IPMI_ATTR_MAP} ret['id'] = channel ret['vlan_id'] = None if ret.get('vlan_id') == 'Disabled' else ret.get('vlan_id') ret['dhcp'] = True if ret.get('dhcp') == 'DHCP Address' else False result.append(ret) return q.query(result, *(filter or []), stream=True, **(params or {}))
class TFTPProvider(Provider): @private @accepts() @returns(h.ref('service-tftpd')) def get_config(self): config = ConfigNode('service.tftpd', self.configstore).__getstate__() config['umask'] = get_unix_permissions(config['umask']) return config
class WebDAVProvider(Provider): @private @accepts() @returns(h.ref('ServiceWebdav')) def get_config(self): state = ConfigNode('service.webdav', self.configstore).__getstate__() state['password'] = Password(state['password']) return state
class SSHProvider(Provider): @private @accepts() @returns(h.ref('service-sshd')) def get_config(self): return exclude( ConfigNode('service.sshd', self.configstore).__getstate__(), 'keys')
class FTPProvider(Provider): @private @accepts() @returns(h.ref('ServiceFtp')) def get_config(self): config = ConfigNode('service.ftp', self.configstore).__getstate__() config['filemask'] = get_unix_permissions(config['filemask']) config['dirmask'] = get_unix_permissions(config['dirmask']) return config
class SystemTimeProvider(Provider): @accepts() @returns(h.ref('SystemTime')) def get_config(self): boot_time = datetime.utcfromtimestamp(psutil.boot_time()) return { 'system_time': datetime.now(tz=tz.tzlocal()), 'boot_time': boot_time, 'uptime': (datetime.utcnow() - boot_time).total_seconds(), 'timezone': time.tzname[time.daylight], }
class IPFSServiceProvider(Provider): def __init__(self): self.ipfs_api = None def initialize(self, context): super(IPFSServiceProvider, self).initialize(context) self.ipfs_api = ipfsapi.Client('127.0.0.1', 5001) @private @accepts() @returns(h.ref('ServiceIpfs')) def get_config(self): return ConfigNode('service.ipfs', self.configstore).__getstate__()
class PeerAmazonS3Provider(Provider): @query('peer') @generator def query(self, filter=None, params=None): return q.query(self.dispatcher.call_sync('peer.query', [('type', '=', 'amazon-s3')]), *(filter or []), stream=True, **(params or {})) @private @accepts(str) @returns(h.ref('peer-status')) def get_status(self, id): return {'state': 'NOT_SUPPORTED', 'rtt': None}
class PeerVMwareProvider(Provider): @query('Peer') @generator def query(self, filter=None, params=None): def extend_query(): for i in self.datastore.query_stream('peers', ('type', '=', 'vmware')): password = q.get(i, 'credentials.password') if password: q.set(i, 'credentials.password', Password(password)) i['status'] = lazy(self.get_status, i['id']) yield i return q.query( extend_query(), *(filter or []), stream=True, **(params or {}) ) @private @accepts(str) @returns(h.ref('PeerStatus')) def get_status(self, id): si = None peer = self.datastore.get_by_id('peers', id) if peer['type'] != 'vmware': raise RpcException(errno.EINVAL, 'Invalid peer type: {0}'.format(peer['type'])) try: start_time = datetime.now() ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ssl_context.verify_mode = ssl.CERT_NONE si = connect.SmartConnect( host=q.get(peer, 'credentials.address'), user=q.get(peer, 'credentials.username'), pwd=unpassword(q.get(peer, 'credentials.password')), sslContext=ssl_context ) delta = datetime.now() - start_time except: return {'state': 'OFFLINE', 'rtt': None} finally: if si: connect.Disconnect(si) return {'state': 'ONLINE', 'rtt': delta.total_seconds()}
class SMBProvider(Provider): @private @accepts() @returns(h.ref('service-smb')) def get_config(self): config = ConfigNode('service.smb', self.configstore).__getstate__() if 'filemask' in config: if config['filemask'] is not None: config['filemask'] = get_unix_permissions(config['filemask']) if 'dirmask' in config: if config['dirmask'] is not None: config['dirmask'] = get_unix_permissions(config['dirmask']) return config @returns(bool) def ad_enabled(self): return self.datastore.exists('directories', ('plugin', '=', 'winbind'), ('enabled', '=', True))
class BootPoolProvider(Provider): @returns(h.ref('ZfsPool')) def get_config(self): pool = self.dispatcher.call_sync('zfs.pool.get_boot_pool') @lazy def collect_disks(): disks = [] for vdev, _ in iterate_vdevs(pool['groups']): disk_id = None disk = None try: disk_id = self.dispatcher.call_sync('disk.partition_to_disk', vdev['path']) disk = self.dispatcher.call_sync( 'disk.query', [('id', '=', disk_id), ('online', '=', True)], {'single': True} ) except RpcException: pass disks.append({ 'disk_id': disk_id, 'path': q.get(disk, 'path', vdev['path']), 'guid': vdev['guid'], 'status': vdev['status'] }) return disks return { 'name': pool['id'], 'guid': pool['guid'], 'status': pool['status'], 'scan': pool['scan'], 'properties': include( pool['properties'], 'size', 'capacity', 'health', 'version', 'delegation', 'failmode', 'autoreplace', 'dedupratio', 'free', 'allocated', 'readonly', 'comment', 'expandsize', 'fragmentation', 'leaked' ), 'disks': collect_disks }
class VMwareProvider(Provider): @generator @accepts(str, str, str, bool) @returns(h.ref('VmwareDatastore')) def get_datastores(self, address, username, password, full=False): ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ssl_context.verify_mode = ssl.CERT_NONE try: si = connect.SmartConnect(host=address, user=username, pwd=password, sslContext=ssl_context) content = si.RetrieveContent() vm_view = content.viewManager.CreateContainerView( content.rootFolder, [vim.VirtualMachine], True) except vmodl.MethodFault as err: raise RpcException(errno.EFAULT, err.msg) try: for datastore in content.viewManager.CreateContainerView( content.rootFolder, [vim.Datastore], True).view: vms = [] if full: for vm in vm_view.view: if datastore not in vm.datastore: continue vms.append({ 'id': vm.config.uuid, 'name': vm.summary.config.name, 'on': vm.summary.runtime.powerState == 'poweredOn', 'snapshottable': can_be_snapshotted(vm) }) yield { 'id': datastore.info.url, 'name': datastore.info.name, 'free_space': datastore.info.freeSpace, 'virtual_machines': vms } finally: connect.Disconnect(si)
class PeerAmazonS3Provider(Provider): @query('Peer') @generator def query(self, filter=None, params=None): def extend_query(): for i in self.datastore.query_stream('peers', ('type', '=', 'amazon-s3')): i['status'] = lazy(self.get_status, i['id']) yield i return q.query(extend_query(), *(filter or []), stream=True, **(params or {})) @private @accepts(str) @returns(h.ref('peer-status')) def get_status(self, id): return {'state': 'NOT_SUPPORTED', 'rtt': None}
class BackupProvider(Provider): @generator def query(self, filter=None, params=None): def extend(backup): return backup return q.query(self.datastore.query_stream('backup', callback=extend), *(filter or []), stream=True, **(params or {})) @description("Returns list of supported backup providers") @accepts() @returns(h.ref('backup-providers')) def supported_providers(self): result = {} for p in list(self.dispatcher.plugins.values()): if p.metadata and p.metadata.get('type') == 'backup': result[p.metadata['method']] = {} return result
@description("Provides info about DynamicDNS service configuration") class DynDNSProvider(Provider): @accepts() @returns(h.ref("service-dyndns")) def get_config(self): return ConfigNode("service.dyndns", self.configstore) @accepts() @returns(h.object()) def providers(self): return PROVIDERS @description("Configure DynamicDNS service") @accepts(h.ref("service-dyndns")) class DynDNSConfigureTask(Task): def describe(self, share): return "Configuring DynamicDNS service" def verify(self, dyndns): errors = [] node = ConfigNode("service.dyndns", self.configstore) if errors: raise ValidationException(errors) return ["system"] def run(self, dyndns):
Provider, Task ) logger = logging.getLogger('LDAPPlugin') @description("Provides access to LDAP configuration") class LDAPProvider(Provider): @returns(h.ref('ldap-config')) def get_config(self): pass @description("Updates LDAP settings") @accepts(h.ref('ldap-config')) class LDAPConfigureTask(Task): def verify(self, config): return ['system'] def run(self, config): pass def _init(dispatcher, plugin): plugin.register_schema_definition('ldap-config', { 'type': 'object', 'properties': { 'hostname': { 'type': 'string' }, 'binddn': { 'type': 'string' },
def validate_netbios_name(netbiosname): regex = re.compile(r"^[a-zA-Z0-9\.\-_!@#\$%^&\(\)'\{\}~]{1,15}$") return regex.match(netbiosname) @description('Provides info about CIFS service configuration') class CIFSProvider(Provider): @accepts() @returns(h.ref('service-cifs')) def get_config(self): return ConfigNode('service.cifs', self.configstore) @description('Configure CIFS service') @accepts(h.ref('service-cifs')) class CIFSConfigureTask(Task): def describe(self, cifs): return 'Configuring CIFS service' def verify(self, cifs): errors = [] node = ConfigNode('service.cifs', self.configstore).__getstate__() netbiosname = cifs.get('netbiosname') if netbiosname is not None: for n in netbiosname: if not validate_netbios_name(n): errors.append(('netbiosname', errno.EINVAL, 'Invalid name {0}'.format(n))) else:
return self.datastore.query('alert.classes') @description('Provides access to the alerts filters') class AlertsFiltersProvider(Provider): @query('alert-filter') def query(self, filter=None, params=None): return self.datastore.query( 'alert.filters', *(filter or []), **(params or {}) ) @description("Creates an Alert Filter") @accepts(h.all_of( h.ref('alert-filter'), h.required('id') )) class AlertFilterCreateTask(Task): @classmethod def early_describe(cls): return 'Creating alert filter' def describe(self, alertfilter): return TaskDescription('Creating alert filter {name}', name=alertfilter.get('name', '') if alertfilter else '') def verify(self, alertfilter): return [] def run(self, alertfilter): id = self.datastore.insert('alert.filters', alertfilter)
for row in table.xpath('./tr[position()>1]'): cols = row.getchildren() request = cols[12].text if request == 'GET /server-status HTTP/1.1': continue result.append({ 'pid': cols[1].text, 'client': cols[10].text, 'request': cols[12].text, }) return result @private @description("Adds new WebDAV share") @accepts(h.ref('Share')) class CreateWebDAVShareTask(Task): @classmethod def early_describe(cls): return "Creating WebDAV share" def describe(self, share): return TaskDescription("Creating WebDAV share {name}", name=share.get('name', '') if share else '') def verify(self, share): return ['service:webdav'] def run(self, share): normalize(share['properties'], { 'read_only': False, 'permission': False,
@private @accepts(h.object()) def update_cache_putter(self, value_dict): for key, value in value_dict.items(): update_cache.put(key, value) @private @accepts(str) @returns(h.any_of(None, str, bool, h.array(str))) def update_cache_getter(self, key): return update_cache.get(key, timeout=1) @description("Set the System Updater Cofiguration Settings") @accepts(h.ref('update')) class UpdateConfigureTask(Task): def describe(self): return "System Updater Configure Settings" def verify(self, props): # TODO: Fix this verify's resource allocation as unique task block = self.dispatcher.resource_graph.get_resource(update_resource_string) if block is not None and block.busy: raise VerifyException( errno.EBUSY, 'An Update Operation (Configuration/ Download/ Applying ' + 'the Updates) is already in the queue, please retry later') return [update_resource_string]
from freenas.dispatcher.rpc import RpcException, SchemaHelper as h, description, accepts, returns from task import Task, Provider, TaskException, ValidationException logger = logging.getLogger("SSHPlugin") @description("Provides info about SSH service configuration") class SSHProvider(Provider): @accepts() @returns(h.ref("service-ssh")) def get_config(self): return ConfigNode("service.sshd", self.configstore) @description("Configure SSH service") @accepts(h.ref("service-ssh")) class SSHConfigureTask(Task): def describe(self, share): return "Configuring SSH service" def verify(self, ssh): return ["system"] def run(self, ssh): try: node = ConfigNode("service.sshd", self.configstore) node.update(ssh) self.dispatcher.call_sync("etcd.generation.generate_group", "sshd") self.dispatcher.dispatch_event("service.ssh.changed", {"operation": "updated", "ids": None}) except RpcException as e: raise TaskException(errno.ENXIO, "Cannot reconfigure SSH: {0}".format(str(e)))
server.starttls() if mail["auth"]: server.login(mail["user"], mail["pass"]) server.sendmail(mail["from"], to, msg) server.quit() except smtplib.SMTPAuthenticationError as e: raise RpcException(errno.EACCES, "Authentication error: {0} {1}".format(e.smtp_code, e.smtp_error)) except Exception as e: logger.error("Failed to send email: {0}".format(str(e)), exc_info=True) raise RpcException(errno.EFAULT, "Email send error: {0}".format(str(e))) except: raise RpcException(errno.EFAULT, "Unexpected error") @accepts(h.ref("mail")) @description("Updates mail configuration") class MailConfigureTask(Task): @classmethod def early_describe(cls): return "Updating mail configuration" def describe(self, mail): return TaskDescription( "Updating {name} mail configuration", name=mail.get("user", "") + "@" + mail.get("server", "") if mail else "", ) def verify(self, mail): errors = ValidationException() node = ConfigNode("mail", self.configstore).__getstate__()
import errno from task import Task, TaskStatus, Provider, TaskException, VerifyException from freenas.dispatcher.rpc import RpcException, description, accepts, returns, private from freenas.dispatcher.rpc import SchemaHelper as h from freenas.utils import normalize class FakeDisksProvider(Provider): def query(self, filter=None, params=None): return self.datastore.query('simulator.disks', *(filter or []), **(params or {})) @description("Creates a Simulated Fake Disk with the parameters provided") @accepts( h.all_of( h.ref('simulator-disk'), h.required('id') ) ) class CreateFakeDisk(Task): def verify(self, disk): return ['system'] def run(self, disk): defpath = os.path.join(self.dispatcher.call_sync('system_dataset.request_directory', 'simulator'), disk['id']) normalize(disk, { 'vendor': 'FreeNAS', 'path': defpath, 'model': 'Virtual Disk', 'serial': self.dispatcher.call_sync('share.iscsi.generate_serial'), 'block_size': 512, 'rpm': '7200',
logger = logging.getLogger('AFPPlugin') @description('Provides info about AFP service configuration') class AFPProvider(Provider): @private @accepts() @returns(h.ref('service-afp')) def get_config(self): return ConfigNode('service.afp', self.configstore).__getstate__() @private @description('Configure AFP service') @accepts(h.ref('service-afp')) class AFPConfigureTask(Task): @classmethod def early_describe(cls): return 'Configuring AFP service' def describe(self, share): return TaskDescription('Configuring AFP service') def verify(self, afp): return ['system'] def run(self, afp): paths = [PosixPath(afp.get(y)) if afp.get(y) else None for y in ('dbpath', 'homedir_path')] for p in paths: if p and not p.exists():
@description("Provides access to configuration store") class ConfigProvider(Provider): @private @accepts(str) @returns((str, int, bool, None)) def get(self, key): return self.dispatcher.configstore.get(key) @private def list(self, root): return self.dispatcher.configstore.list_children(root) @description("Updates configuration settings") @accepts(h.object()) class UpdateConfigTask(Task): def verify(self, settings): return ['system'] def run(self, settings): for i in settings: self.configstore.set(i['key'], i['value']) self.dispatcher.dispatch_event('config.changed', { 'operation': 'update', 'ids': [list(settings.keys())] }) def _init(dispatcher, plugin):
@query('network-route') @generator def query(self, filter=None, params=None): return self.datastore.query_stream('network.routes', *(filter or []), **(params or {})) @description("Provides access to static host entries database") class HostsProvider(Provider): @query('network-host') @generator def query(self, filter=None, params=None): return self.datastore.query_stream('network.hosts', *(filter or []), **(params or {})) @description("Updates global network configuration settings") @accepts(h.ref('network-config')) class NetworkConfigureTask(Task): @classmethod def early_describe(cls): return "Updating global network settings" def describe(self, settings): return TaskDescription("Updating global network settings") def verify(self, settings): return ['system'] def run(self, settings): node = ConfigNode('network', self.configstore) node.update(settings) dhcp_used = self.datastore.exists('network.interfaces', ('dhcp', '=', True))
logger.info('Stopping service {0}'.format(service)) self.dispatcher.call_sync('service.ensure_stopped', service, timeout=120) else: if restart: logger.info('Restarting service {0}'.format(service)) self.dispatcher.call_sync('service.restart', service, timeout=120) elif reload: logger.info('Reloading service {0}'.format(service)) self.dispatcher.call_sync('service.reload', service, timeout=120) @description("Provides functionality to start, stop, restart or reload service") @accepts( str, h.enum(str, ['start', 'stop', 'restart', 'reload']) ) class ServiceManageTask(Task): @classmethod def early_describe(cls): return 'Changing service state' def describe(self, id, action): svc = self.datastore.get_by_id('service_definitions', id) return TaskDescription("{action}ing service {name}", action=action.title(), name=svc['name']) def verify(self, id, action): return ['system'] def run(self, id, action): if not self.datastore.exists('service_definitions', ('id', '=', id)):
logger = logging.getLogger('SSHPlugin') @description('Provides info about SSH service configuration') class SSHProvider(Provider): @private @accepts() @returns(h.ref('ServiceSshd')) def get_config(self): return exclude(ConfigNode('service.sshd', self.configstore).__getstate__(), 'keys') @private @description('Configure SSH service') @accepts(h.ref('ServiceSshd')) class SSHConfigureTask(Task): @classmethod def early_describe(cls): return 'Configuring SSH service' def describe(self, ssh): return TaskDescription('Configuring SSH service') def verify(self, ssh): return ['system'] def run(self, ssh): config = self.dispatcher.call_sync( 'service.query', [('name', '=', 'sshd')], {'single': True, 'select': 'config'}) port = ssh.get('port')
def _init(dispatcher, plugin): def nightly_update_check(args): if args.get('name') != 'scheduler.management': return logger.debug('Scheduling a nightly update check task') caltask = dispatcher.call_sync( 'calendar_task.query', [('name', '=', 'update.checkfetch')], {'single': True} ) or {'schedule': {}} caltask.update({ 'name': 'update.checkfetch', 'args': [True], 'hidden': True, 'protected': True, 'description': 'Nightly update check', }) caltask['schedule'].update({ 'hour': str(random.randint(1, 6)), 'minute': str(random.randint(0, 59)), }) if caltask.get('id'): dispatcher.call_task_sync('calendar_task.update', caltask['id'], caltask) else: dispatcher.call_task_sync('calendar_task.create', caltask) # Register Schemas plugin.register_schema_definition('update', { 'type': 'object', 'properties': { 'train': {'type': 'string'}, 'check_auto': {'type': 'boolean'}, 'update_server': {'type': 'string', 'readOnly': True}, }, }) plugin.register_schema_definition('update-progress', h.object(properties={ 'operation': h.enum(str, ['DOWNLOADING', 'INSTALLING']), 'details': str, 'indeterminate': bool, 'percent': int, 'reboot': bool, 'pkg_name': str, 'pkg_version': str, 'filesize': int, 'num_files_done': int, 'num_files_total': int, 'error': bool, 'finished': bool, })) plugin.register_schema_definition('update-ops', { 'type': 'object', 'properties': { 'operation': { 'type': 'string', 'enum': ['delete', 'install', 'upgrade'] }, 'new_name': {'type': ['string', 'null']}, 'new_version': {'type': ['string', 'null']}, 'previous_name': {'type': ['string', 'null']}, 'previous_version': {'type': ['string', 'null']}, } }) plugin.register_schema_definition('update-info', { 'type': 'object', 'properties': { 'notes': {'type': 'object'}, 'notice': {'type': 'string'}, 'changelog': { 'type': 'array', 'items': {'type': 'string'}, }, 'operations': {'$ref': 'update-ops'}, 'downloaded': {'type': 'boolean'}, } }) plugin.register_schema_definition('update-train', { 'type': 'object', 'properties': { 'name': {'type': 'string'}, 'description': {'type': 'string'}, 'sequence': {'type': 'string'}, 'current': {'type': 'boolean'}, } }) # Register providers plugin.register_provider("update", UpdateProvider) # Register task handlers plugin.register_task_handler("update.update", UpdateConfigureTask) plugin.register_task_handler("update.check", CheckUpdateTask) plugin.register_task_handler("update.download", DownloadUpdateTask) plugin.register_task_handler("update.manual", UpdateManualTask) plugin.register_task_handler("update.apply", UpdateApplyTask) plugin.register_task_handler("update.verify", UpdateVerifyTask) plugin.register_task_handler("update.checkfetch", CheckFetchUpdateTask) plugin.register_task_handler("update.updatenow", UpdateNowTask) # Register Event Types plugin.register_event_type('update.in_progress', schema=h.ref('update-progress')) plugin.register_event_type('update.changed') # Register reources plugin.register_resource(Resource(update_resource_string), ['system']) # Get the Update Cache (if any) at system boot (and hence in init here) # Do this in parallel so that a failed cache generation does not take the # entire dispatcher start/restart with it (See Ticket: #12892) gevent.spawn(generate_update_cache, dispatcher) # Schedule a task to check/dowload for updates plugin.register_event_handler('plugin.service_resume', nightly_update_check)
from freenas.dispatcher.rpc import RpcException, SchemaHelper as h, description, accepts, returns from task import Task, Provider, TaskException, ValidationException logger = logging.getLogger('RIAKCSPlugin') @description('Provides info about RIAK CS service configuration') class RIAKCSProvider(Provider): @accepts() @returns(h.ref('service-riak_cs')) def get_config(self): return ConfigNode('service.riak_cs', self.configstore) @description('Configure RIAK CS service') @accepts(h.ref('service-riak_cs')) class RIAKCSConfigureTask(Task): def describe(self, share): return 'Configuring RIAK CS service' def verify(self, riakcs): errors = [] node = ConfigNode('service.riak_cs', self.configstore).__getstate__() node.update(riakcs) if errors: raise ValidationException(errors) return ['system']
'name': name, 'verbose_name': verbose_name, } @description('Provides access to the alerts filters') class AlertsFiltersProvider(Provider): @query('alert-filter') def query(self, filter=None, params=None): return self.datastore.query( 'alerts-filters', *(filter or []), **(params or {}) ) @accepts(h.ref('alert-filter')) class AlertFilterCreateTask(Task): def describe(self, alertfilter): return 'Creating alert filter {0}'.format(alertfilter['name']) def verify(self, alertfilter): return [] def run(self, alertfilter): id = self.datastore.insert('alerts-filters', alertfilter) self.dispatcher.dispatch_event('alerts.filters.changed', { 'operation': 'create', 'ids': [id] })
def _init(dispatcher, plugin): def on_hostname_change(args): if 'hostname' not in args: return if args.get('jid') != 0: return dispatcher.configstore.set('system.hostname', args['hostname']) dispatcher.call_sync('service.restart', 'mdns') dispatcher.dispatch_event('system.general.changed', { 'operation': 'update', }) # Register schemas plugin.register_schema_definition('system-advanced', { 'type': 'object', 'properties': { 'console_cli': {'type': 'boolean'}, 'console_screensaver': {'type': 'boolean'}, 'serial_console': {'type': 'boolean'}, 'serial_port': {'type': 'string'}, 'serial_speed': {'type': 'integer'}, 'powerd': {'type': 'boolean'}, 'swapondrive': {'type': 'integer'}, 'debugkernel': {'type': 'boolean'}, 'uploadcrash': {'type': 'boolean'}, 'motd': {'type': 'string'}, 'boot_scrub_internal': {'type': 'integer'}, 'periodic_notify_user': {'type': 'integer'}, }, 'additionalProperties': False, }) plugin.register_schema_definition('system-general', { 'type': 'object', 'properties': { 'hostname': {'type': 'string'}, 'language': {'type': 'string'}, 'timezone': {'type': 'string'}, 'console_keymap': {'type': 'string'}, 'syslog_server': {'type': ['string', 'null']}, }, 'additionalProperties': False, }) plugin.register_schema_definition('system-ui', { 'type': 'object', 'properties': { 'webui_protocol': { 'type': ['array'], 'items': { 'type': 'string', 'enum': ['HTTP', 'HTTPS'], }, }, 'webui_listen': { 'type': ['array'], 'items': {'type': 'string'}, }, 'webui_http_redirect_https': {'type': 'boolean'}, 'webui_http_port': {'type': 'integer'}, 'webui_https_certificate': {'type': ['string', 'null']}, 'webui_https_port': {'type': 'integer'}, }, 'additionalProperties': False, }) plugin.register_schema_definition('system-time', { 'type': 'object', 'additionalProperties': False, 'properties': { 'system_time': {'type': 'string'}, 'boot_time': {'type': 'string'}, 'uptime': {'type': 'string'}, 'timezone': {'type': 'string'} } }) plugin.register_schema_definition('power_changed', { 'type': 'object', 'additionalProperties': False, 'properties': { 'operation': {'type': 'string', 'enum': ['SHUTDOWN', 'REBOOT']}, } }) # Register event handler plugin.register_event_handler('system.hostname.change', on_hostname_change) # Register Event Types plugin.register_event_type('power.changed', schema=h.ref('power_changed')) # Register providers plugin.register_provider("system.advanced", SystemAdvancedProvider) plugin.register_provider("system.general", SystemGeneralProvider) plugin.register_provider("system.info", SystemInfoProvider) plugin.register_provider("system.ui", SystemUIProvider) # Register task handlers plugin.register_task_handler("system.advanced.update", SystemAdvancedConfigureTask) plugin.register_task_handler("system.general.update", SystemGeneralConfigureTask) plugin.register_task_handler("system.ui.update", SystemUIConfigureTask) plugin.register_task_handler("system.time.update", SystemTimeConfigureTask) plugin.register_task_handler("system.shutdown", SystemHaltTask) plugin.register_task_handler("system.reboot", SystemRebootTask) # Register debug hook plugin.register_debug_hook(collect_debug) # Set initial hostname netif.set_hostname(dispatcher.configstore.get('system.hostname'))
logger = logging.getLogger('WebDAVPlugin') @description('Provides info about WebDAV service configuration') class WebDAVProvider(Provider): @private @accepts() @returns(h.ref('service-webdav')) def get_config(self): return ConfigNode('service.webdav', self.configstore).__getstate__() @private @description('Configure WebDAV service') @accepts(h.ref('service-webdav')) class WebDAVConfigureTask(Task): @classmethod def early_describe(cls): return 'Configuring WebDAV service' def describe(self, webdav): return TaskDescription('Configuring WebDAV service') def verify(self, webdav): return ['system'] def run(self, webdav): node = ConfigNode('service.webdav', self.configstore).__getstate__() for p in ('http_port', 'https_port'):
config = ConfigNode('service.smb', self.configstore).__getstate__() if 'filemask' in config: if config['filemask'] is not None: config['filemask'] = get_unix_permissions(config['filemask']) if 'dirmask' in config: if config['dirmask'] is not None: config['dirmask'] = get_unix_permissions(config['dirmask']) return config @returns(bool) def ad_enabled(self): return self.datastore.exists('directories', ('plugin', '=', 'winbind'), ('enabled', '=', True)) @private @description('Configure SMB service') @accepts(h.ref('ServiceSmb')) class SMBConfigureTask(Task): @classmethod def early_describe(cls): return 'Configuring SMB service' def describe(self, smb): return TaskDescription('Configuring SMB service') def verify(self, smb): return ['system'] def run(self, smb): node = ConfigNode('service.smb', self.configstore).__getstate__() netbiosname = smb.get('netbiosname') if netbiosname is not None:
@description('Provides information about iSCSI auth groups') class ISCSIAuthProvider(Provider): def query(self, filter=None, params=None): return self.datastore.query('iscsi.auth', *(filter or []), **(params or {})) @description('Provides information about iSCSI portals') class ISCSIPortalProvider(Provider): def query(self, filter=None, params=None): return self.datastore.query('iscsi.portals', *(filter or []), **(params or {})) @private @accepts(h.ref('iscsi-share')) @description("Adds new iSCSI share") class CreateISCSIShareTask(Task): @classmethod def early_describe(cls): return "Creating iSCSI share" def describe(self, share): return TaskDescription("Creating iSCSI share {name}", name=share.get('name', '') if share else '') def verify(self, share): if share['target_type'] == 'FILE': # File extent if not os.path.exists(share['target_path']): raise VerifyException(errno.ENOENT, "Extent file does not exist") elif share['target_type'] == 'ZVOL':
def get_config(self): return ConfigNode('service.rsyncd', self.configstore).__getstate__() @description("Provides access to rsyncd modules database") class RsyncdModuleProvider(Provider): @description("Lists rsyncd modules present in the system") @query('rsyncd-module') @generator def query(self, filter=None, params=None): return self.datastore.query_stream('rsyncd-module', *(filter or []), **(params or {})) @private @description('Configure Rsyncd service') @accepts(h.ref('service-rsyncd')) class RsyncdConfigureTask(Task): @classmethod def early_describe(cls): return 'Configuring Rsyncd service' def describe(self, rsyncd): return TaskDescription('Configuring Rsyncd service') def verify(self, rsyncd): errors = [] if errors: raise ValidationException(errors) return ['system']
system('sysctl', '{0}={1}'.format(name, str(value))) except SubprocessException as e: # sysctl module compatibility raise OSError(str(e.err)) @description("Provides access to OS tunables") class TunablesProvider(Provider): @query('tunable') def query(self, filter=None, params=None): return self.datastore.query('tunables', *(filter or []), **(params or {})) @description("Adds Tunable") @accepts(h.all_of( h.ref('tunable'), h.required('var', 'value', 'type'), )) class TunableCreateTask(Task): def describe(self, tunable): return "Creating Tunable {0}".format(tunable['var']) def verify(self, tunable): errors = ValidationException() if self.datastore.exists('tunables', ('var', '=', tunable['var'])): errors.add((1, 'var'), 'This variable already exists.', code=errno.EEXIST) if '"' in tunable['value'] or "'" in tunable['value']: errors.add((1, 'value'), 'Quotes are not allowed')
def query(self, filter=None, params=None): return self.datastore.query("iscsi.targets", *(filter or []), **(params or {})) class ISCSIAuthProvider(Provider): def query(self, filter=None, params=None): return self.datastore.query("iscsi.auth", *(filter or []), **(params or {})) class ISCSIPortalProvider(Provider): def query(self, filter=None, params=None): return self.datastore.query("iscsi.portals", *(filter or []), **(params or {})) @description("Adds new iSCSI share") @accepts(h.ref("iscsi-share")) class CreateISCSIShareTask(Task): def describe(self, share): return "Creating iSCSI share {0}".format(share["name"]) def verify(self, share): if share["target"][0] == "/": # File extent if not os.path.exists(share["target"]): raise VerifyException(errno.ENOENT, "Extent file does not exist") else: if not os.path.exists(convert_share_target(share["target"])): raise VerifyException(errno.ENOENT, "Extent ZVol does not exist") return ["service:ctl"]
raise RpcException(errno.EINVAL, 'Failed to decode ticket response') except requests.ConnectionError as e: raise RpcException(errno.ENOTCONN, 'Connection failed: {0}'.format(str(e))) except requests.Timeout as e: raise RpcException(errno.ETIMEDOUT, 'Connection timed out: {0}'.format(str(e))) if 'error' in data: raise RpcException(errno.EINVAL, data['message']) return data @description("Submits a new support ticket") @accepts( h.all_of( h.ref('support-ticket'), h.required('subject', 'description', 'category', 'type', 'username', 'password')) ) class SupportSubmitTask(Task): @classmethod def early_describe(cls): return 'Submitting ticket' def describe(self, ticket): return TaskDescription('Submitting ticket') def verify(self, ticket): return ['system'] def run(self, ticket): try:
) @description("Returns list of supported backup providers") @accepts() @returns(h.ref('backup-providers')) def supported_providers(self): result = {} for p in list(self.dispatcher.plugins.values()): if p.metadata and p.metadata.get('type') == 'backup': result[p.metadata['method']] = {} return result @accepts(h.all_of( h.ref('backup'), h.required('name', 'provider', 'dataset') )) @description('Creates a backup task') class CreateBackupTask(Task): @classmethod def early_describe(cls): return 'Creating backup task' def describe(self, backup): return TaskDescription('Creating backup task {name}', name=backup.get('name', '') if backup else '') def verify(self, backup): return ['system'] def run(self, backup): if 'id' in backup and self.datastore.exists('backup', ('id', '=', backup['id'])):
@returns('zfs-pool') def get_config(self): return self.dispatcher.call_sync('zfs.pool.get_boot_pool') @description("Provides information on Boot Environments") class BootEnvironmentsProvider(Provider): @query('boot-environment') def query(self, filter=None, params=None): return bootenvs.query(*(filter or []), **(params or {})) @description( "Creates a clone of the current Boot Environment or of the specified source (optional)" ) @accepts(str, h.any_of(str, None)) class BootEnvironmentCreate(Task): @classmethod def early_describe(cls): return "Cloning Boot Environment" def describe(self, newname, source=None): return TaskDescription("Cloning Boot Environment {source} - new name {name}", name=newname, source=source or '') def verify(self, newname, source=None): return ['system'] def run(self, newname, source=None): if not CreateClone(newname, bename=source): raise TaskException(errno.EIO, 'Cannot create the {0} boot environment'.format(newname))
'webui_http_port': self.configstore.get( 'service.nginx.http.port', ), 'webui_http_redirect_https': self.configstore.get( 'service.nginx.http.redirect_https', ), 'webui_https_certificate': self.configstore.get( 'service.nginx.https.certificate', ), 'webui_https_port': self.configstore.get( 'service.nginx.https.port', ), } @accepts(h.ref('system-general')) class SystemGeneralConfigureTask(Task): def describe(self): return "System General Settings Configure" def verify(self, props): return ['system'] def run(self, props): if 'hostname' in props: netif.set_hostname(props['hostname']) if 'language' in props: self.configstore.set('system.language', props['language']) if 'timezone' in props:
logger = logging.getLogger('LLDPPlugin') @description('Provides info about LLDP service configuration') class LLDPProvider(Provider): @private @accepts() @returns(h.ref('service-lldp')) def get_config(self): return ConfigNode('service.lldp', self.configstore).__getstate__() @private @description('Configure LLDP service') @accepts(h.ref('service-lldp')) class LLDPConfigureTask(Task): def describe(self, share): return 'Configuring LLDP service' def verify(self, lldp): errors = ValidationException() node = ConfigNode('service.lldp', self.configstore).__getstate__() node.update(lldp) # Lazy load pycountry due to extra verbose DEBUG logging import pycountry if node['country_code'] and node['country_code'] not in pycountry.countries.indices['alpha2']: errors.add((0, 'country_code'), 'Invalid ISO-3166 alpha 2 code') if errors:
logger = logging.getLogger('SSHPlugin') @description('Provides info about SSH service configuration') class SSHProvider(Provider): @private @accepts() @returns(h.ref('service-sshd')) def get_config(self): return exclude(ConfigNode('service.sshd', self.configstore).__getstate__(), 'keys') @private @description('Configure SSH service') @accepts(h.ref('service-sshd')) class SSHConfigureTask(Task): @classmethod def early_describe(cls): return 'Configuring SSH service' def describe(self, ssh): return TaskDescription('Configuring SSH service') def verify(self, ssh): return ['system'] def run(self, ssh): config = self.dispatcher.call_sync( 'service.query', [('name', '=', 'sshd')], {'single': True, 'select': 'config'}) port = ssh.get('port')