class OpenVpnProvider(Provider): """ I think that some of the information here needs to be excluded using exclude() I need consultaion on that. """ @returns(h.ref('service-openvpn')) def get_config(self): return ConfigNode('service.openvpn', self.configstore).__getstate__() @returns(h.ref('service-openvpn')) def get_readable_config(self): vpn_config = ConfigNode('service.openvpn', self.configstore).__getstate__() if vpn_config['ca']: vpn_config['ca'] = self.datastore.query( 'crypto.certificates', ('id', '=', vpn_config['ca']), select=('name'))[0] if vpn_config['cert']: vpn_config['cert'] = self.datastore.query( 'crypto.certificates', ('id', '=', vpn_config['cert']), select=('name'))[0] if vpn_config['key']: vpn_config['key'] = self.datastore.query( 'crypto.certificates', ('id', '=', vpn_config['key']), select=('name'))[0] return vpn_config
class StatProvider(Provider): @query('Statistic') @generator def query(self, filter=None, params=None): stats = self.dispatcher.call_sync('statd.output.get_current_state') return q.query(stats, *(filter or []), stream=True, **(params or {})) @returns(h.array(str)) @generator def get_data_sources(self): return self.dispatcher.call_sync('statd.output.get_data_sources') def get_data_sources_tree(self): return self.dispatcher.call_sync('statd.output.get_data_sources_tree') @accepts(h.one_of(str, h.array(str)), h.ref('GetStatsParams')) @returns(h.ref('GetStatsResult')) def get_stats(self, data_source, params): return { 'data': list( self.dispatcher.call_sync('statd.output.get_stats', data_source, params)) } def normalize(self, name, value): return normalize(name, value)
class NetworkProvider(Provider): @returns(h.ref('network-config')) def get_config(self): node = ConfigNode('network', self.configstore).__getstate__() node.update({ 'gateway': self.dispatcher.call_sync( 'networkd.configuration.get_default_routes'), 'dns': self.dispatcher.call_sync('networkd.configuration.get_dns_config') }) return node @returns(h.array(str)) def get_my_ips(self): ips = [] ifaces = self.dispatcher.call_sync( 'networkd.configuration.query_interfaces') ifaces.pop('mgmt0', None) for i, v in ifaces.items(): if 'LOOPBACK' in v['flags'] or v[ 'link_state'] != 'LINK_STATE_UP' or 'UP' not in v['flags']: continue for aliases in v['aliases']: if aliases['address'] and aliases['type'] != 'LINK': ips.append(aliases['address']) return list(set(ips))
class PeerSSHProvider(Provider): @query('peer') @generator def query(self, filter=None, params=None): return q.query(self.dispatcher.call_sync('peer.query', [('type', '=', 'ssh')]), *(filter or []), stream=True, **(params or {})) @private @accepts(str) @returns(h.ref('peer-status')) def get_status(self, id): peer = self.dispatcher.call_sync('peer.query', [('id', '=', id), ('type', '=', 'ssh')], {'single': True}) if not peer: return id, {'state': 'UNKNOWN', 'rtt': None} credentials = peer['credentials'] s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: start_time = datetime.now() s.connect((credentials['address'], credentials.get('port', 22))) delta = datetime.now() - start_time return {'state': 'ONLINE', 'rtt': delta.total_seconds()} except socket.error: return {'state': 'OFFLINE', 'rtt': None} finally: s.close()
class ISCSIProvider(Provider): @accepts() @returns(h.ref('service-iscsi')) def get_config(self): node = ConfigNode('service.iscsi', self.configstore).__getstate__() node['portals'] = self.datastore.query('iscsi.portals') return node
class SNMPProvider(Provider): @accepts() @returns(h.ref('ServiceSnmp')) def get_config(self): state = ConfigNode('service.snmp', self.configstore).__getstate__() state['v3_password'] = Password(state['v3_password']) return state
class SwapProvider(Provider): @accepts() @returns(h.array(h.ref('SwapMirror'))) @description("Returns information about swap mirrors present in the system" ) def info(self): return list(get_swap_info(self.dispatcher).values())
class SystemUIProvider(Provider): @accepts() @returns(h.ref('SystemUi')) def get_config(self): protocol = [] if self.configstore.get('service.nginx.http.enable'): protocol.append('HTTP') if self.configstore.get('service.nginx.https.enable'): protocol.append('HTTPS') return { 'webui_protocol': protocol, 'webui_listen': self.configstore.get('service.nginx.listen'), 'webui_http_port': self.configstore.get('service.nginx.http.port'), 'webui_http_redirect_https': self.configstore.get('service.nginx.http.redirect_https'), 'webui_https_certificate': self.configstore.get('service.nginx.https.certificate'), 'webui_https_port': self.configstore.get('service.nginx.https.port') }
class UPSProvider(Provider): @private @accepts() @returns(h.ref('ServiceUps')) def get_config(self): return ConfigNode('service.ups', self.configstore).__getstate__() @accepts() @returns(h.array(h.array(str))) def drivers(self): driver_list = '/etc/local/nut/driver.list' if not os.path.exists(driver_list): return [] drivers = [] with open(driver_list, 'rb') as f: d = f.read() r = io.StringIO() for line in re.sub(r'[ \t]+', ' ', d.decode('utf-8'), flags=re.M).split('\n'): r.write(line.strip() + '\n') r.seek(0) reader = csv.reader(r, delimiter=' ', quotechar='"') for row in reader: if len(row) == 0 or row[0].startswith('#'): continue if row[-2] == '#': last = -3 else: last = -1 if row[last].find(' (experimental)') != -1: row[last] = row[last].replace(' (experimental)', '').strip() drivers.append({ 'driver_name': row[last], 'description': '{0} ({1})'.format(' '.join(row[0:last]), row[last]) }) return drivers @accepts() @returns(h.array(h.array(str))) def get_usb_devices(self): usb_devices_list = [] try: usbconfig_output = system('usbconfig')[0] if not usbconfig_output.startswith('No device match'): for device in usbconfig_output.rstrip().split('\n'): device_path = os.path.join('/dev', device.split()[0][:-1]) device_description = re.findall(r'<.*?>', device)[0].strip('><') usb_devices_list.append({ 'device': device_path, 'description': device_description }) except SubprocessException as e: raise TaskException(errno.EBUSY, e.err) return usb_devices_list
class TFTPProvider(Provider): @private @accepts() @returns(h.ref('service-tftpd')) def get_config(self): config = ConfigNode('service.tftpd', self.configstore).__getstate__() config['umask'] = get_unix_permissions(config['umask']) return config
class SSHProvider(Provider): @private @accepts() @returns(h.ref('service-sshd')) def get_config(self): return exclude( ConfigNode('service.sshd', self.configstore).__getstate__(), 'keys')
class WebDAVProvider(Provider): @private @accepts() @returns(h.ref('ServiceWebdav')) def get_config(self): state = ConfigNode('service.webdav', self.configstore).__getstate__() state['password'] = Password(state['password']) return state
class FTPProvider(Provider): @private @accepts() @returns(h.ref('ServiceFtp')) def get_config(self): config = ConfigNode('service.ftp', self.configstore).__getstate__() config['filemask'] = get_unix_permissions(config['filemask']) config['dirmask'] = get_unix_permissions(config['dirmask']) return config
class SystemTimeProvider(Provider): @accepts() @returns(h.ref('SystemTime')) def get_config(self): boot_time = datetime.utcfromtimestamp(psutil.boot_time()) return { 'system_time': datetime.now(tz=tz.tzlocal()), 'boot_time': boot_time, 'uptime': (datetime.utcnow() - boot_time).total_seconds(), 'timezone': time.tzname[time.daylight], }
class DynDNSProvider(Provider): @private @accepts() @returns(h.ref('ServiceDyndns')) def get_config(self): return ConfigNode('service.dyndns', self.configstore).__getstate__() @accepts() @returns(h.object()) def providers(self): return PROVIDERS
class IPFSServiceProvider(Provider): def __init__(self): self.ipfs_api = None def initialize(self, context): super(IPFSServiceProvider, self).initialize(context) self.ipfs_api = ipfsapi.Client('127.0.0.1', 5001) @private @accepts() @returns(h.ref('ServiceIpfs')) def get_config(self): return ConfigNode('service.ipfs', self.configstore).__getstate__()
class SystemGeneralProvider(Provider): @accepts() @returns(h.ref('SystemGeneral')) def get_config(self): return { 'hostname': self.configstore.get('system.hostname'), 'description': self.configstore.get('system.description'), 'tags': self.configstore.get('system.tags'), 'language': self.configstore.get('system.language'), 'timezone': self.configstore.get('system.timezone'), 'syslog_server': self.configstore.get('system.syslog_server'), 'console_keymap': self.configstore.get('system.console.keymap') } @accepts() @returns(h.array(h.array(str))) def keymaps(self): if not os.path.exists(KEYMAPS_INDEX): return [] rv = [] with open(KEYMAPS_INDEX, 'r', encoding='utf-8', errors='ignore') as f: d = f.read() fnd = re.findall(r'^(?P<name>[^#\s]+?)\.kbd:en:(?P<desc>.+)$', d, re.M) for name, desc in fnd: rv.append((name, desc)) return rv @accepts() @returns(h.array(str)) def timezones(self): result = [] for root, _, files in os.walk(ZONEINFO_DIR): for f in files: if f in ('zone.tab', 'posixrules'): continue result.append( os.path.join(root, f).replace(ZONEINFO_DIR + '/', '')) return sorted(result) @private @accepts(str, str) @returns(str) def cowsay(self, line, cow_file='default'): if cow_file != 'default' and os.path.exists(cow_file): return system('/usr/local/bin/cowsay', '-f', cow_file, '-s', line) else: return system('/usr/local/bin/cowsay', '-s', line)
class PeerAmazonS3Provider(Provider): @query('peer') @generator def query(self, filter=None, params=None): return q.query(self.dispatcher.call_sync('peer.query', [('type', '=', 'amazon-s3')]), *(filter or []), stream=True, **(params or {})) @private @accepts(str) @returns(h.ref('peer-status')) def get_status(self, id): return {'state': 'NOT_SUPPORTED', 'rtt': None}
class PeerVMwareProvider(Provider): @query('Peer') @generator def query(self, filter=None, params=None): def extend_query(): for i in self.datastore.query_stream('peers', ('type', '=', 'vmware')): password = q.get(i, 'credentials.password') if password: q.set(i, 'credentials.password', Password(password)) i['status'] = lazy(self.get_status, i['id']) yield i return q.query( extend_query(), *(filter or []), stream=True, **(params or {}) ) @private @accepts(str) @returns(h.ref('PeerStatus')) def get_status(self, id): si = None peer = self.datastore.get_by_id('peers', id) if peer['type'] != 'vmware': raise RpcException(errno.EINVAL, 'Invalid peer type: {0}'.format(peer['type'])) try: start_time = datetime.now() ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ssl_context.verify_mode = ssl.CERT_NONE si = connect.SmartConnect( host=q.get(peer, 'credentials.address'), user=q.get(peer, 'credentials.username'), pwd=unpassword(q.get(peer, 'credentials.password')), sslContext=ssl_context ) delta = datetime.now() - start_time except: return {'state': 'OFFLINE', 'rtt': None} finally: if si: connect.Disconnect(si) return {'state': 'ONLINE', 'rtt': delta.total_seconds()}
class SMBProvider(Provider): @private @accepts() @returns(h.ref('service-smb')) def get_config(self): config = ConfigNode('service.smb', self.configstore).__getstate__() if 'filemask' in config: if config['filemask'] is not None: config['filemask'] = get_unix_permissions(config['filemask']) if 'dirmask' in config: if config['dirmask'] is not None: config['dirmask'] = get_unix_permissions(config['dirmask']) return config @returns(bool) def ad_enabled(self): return self.datastore.exists('directories', ('plugin', '=', 'winbind'), ('enabled', '=', True))
class BootPoolProvider(Provider): @returns(h.ref('ZfsPool')) def get_config(self): pool = self.dispatcher.call_sync('zfs.pool.get_boot_pool') @lazy def collect_disks(): disks = [] for vdev, _ in iterate_vdevs(pool['groups']): disk_id = None disk = None try: disk_id = self.dispatcher.call_sync('disk.partition_to_disk', vdev['path']) disk = self.dispatcher.call_sync( 'disk.query', [('id', '=', disk_id), ('online', '=', True)], {'single': True} ) except RpcException: pass disks.append({ 'disk_id': disk_id, 'path': q.get(disk, 'path', vdev['path']), 'guid': vdev['guid'], 'status': vdev['status'] }) return disks return { 'name': pool['id'], 'guid': pool['guid'], 'status': pool['status'], 'scan': pool['scan'], 'properties': include( pool['properties'], 'size', 'capacity', 'health', 'version', 'delegation', 'failmode', 'autoreplace', 'dedupratio', 'free', 'allocated', 'readonly', 'comment', 'expandsize', 'fragmentation', 'leaked' ), 'disks': collect_disks }
class VMwareProvider(Provider): @generator @accepts(str, str, str, bool) @returns(h.ref('VmwareDatastore')) def get_datastores(self, address, username, password, full=False): ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ssl_context.verify_mode = ssl.CERT_NONE try: si = connect.SmartConnect(host=address, user=username, pwd=password, sslContext=ssl_context) content = si.RetrieveContent() vm_view = content.viewManager.CreateContainerView( content.rootFolder, [vim.VirtualMachine], True) except vmodl.MethodFault as err: raise RpcException(errno.EFAULT, err.msg) try: for datastore in content.viewManager.CreateContainerView( content.rootFolder, [vim.Datastore], True).view: vms = [] if full: for vm in vm_view.view: if datastore not in vm.datastore: continue vms.append({ 'id': vm.config.uuid, 'name': vm.summary.config.name, 'on': vm.summary.runtime.powerState == 'poweredOn', 'snapshottable': can_be_snapshotted(vm) }) yield { 'id': datastore.info.url, 'name': datastore.info.name, 'free_space': datastore.info.freeSpace, 'virtual_machines': vms } finally: connect.Disconnect(si)
class PeerAmazonS3Provider(Provider): @query('Peer') @generator def query(self, filter=None, params=None): def extend_query(): for i in self.datastore.query_stream('peers', ('type', '=', 'amazon-s3')): i['status'] = lazy(self.get_status, i['id']) yield i return q.query(extend_query(), *(filter or []), stream=True, **(params or {})) @private @accepts(str) @returns(h.ref('peer-status')) def get_status(self, id): return {'state': 'NOT_SUPPORTED', 'rtt': None}
class BackupProvider(Provider): @generator def query(self, filter=None, params=None): def extend(backup): return backup return q.query(self.datastore.query_stream('backup', callback=extend), *(filter or []), stream=True, **(params or {})) @description("Returns list of supported backup providers") @accepts() @returns(h.ref('backup-providers')) def supported_providers(self): result = {} for p in list(self.dispatcher.plugins.values()): if p.metadata and p.metadata.get('type') == 'backup': result[p.metadata['method']] = {} return result
class NFSProvider(Provider): @private @accepts() @returns(h.ref('ServiceNfs')) def get_config(self): return ConfigNode('service.nfs', self.configstore).__getstate__() @private def get_arguments(self, label): nfs = self.get_config() ips = sum([['-h', x] for x in nfs['bind_addresses']], []) if label == 'org.freebsd.rpcbind': return \ ['/usr/sbin/rpcbind', '-d'] + ips if label == 'org.freebsd.nfsd': return \ ['/usr/sbin/nfsd', '-d', '-t', '-n', nfs['servers']] +\ ['-u'] if nfs['udp'] else [] +\ ips if label == 'org.freebsd.mountd': return \ ['/usr/sbin/mountd', '-d', '-l', '-rS'] +\ ['-n'] if nfs['nonroot'] else [] +\ ['-p', nfs['mountd_port']] if nfs['mountd_port'] else [] +\ ips if label == 'org.freebsd.statd': return \ ['/usr/sbin/rpc.statd', '-d'] +\ ['-p', nfs['rpcstatd_port']] if nfs['rpcstatd_port'] else [] +\ ips if label == 'org.freebsd.lockd': return \ ['/usr/sbin/rpc.lockd', '-d'] +\ ['-p', nfs['rpclockd_port']] if nfs['rpclockd_port'] else [] +\ ips
class SystemAdvancedProvider(Provider): @accepts() @returns(h.ref('system-advanced')) def get_config(self): cs = self.configstore return { 'console_cli': cs.get('system.console.cli'), 'console_screensaver': cs.get('system.console.screensaver'), 'serial_console': cs.get('system.serial.console'), 'serial_port': cs.get('system.serial.port'), 'serial_speed': cs.get('system.serial.speed'), 'powerd': cs.get('service.powerd.enable'), 'swapondrive': cs.get('system.swapondrive'), 'debugkernel': cs.get('system.debug.kernel'), 'uploadcrash': cs.get('system.upload_crash'), 'home_directory_root': cs.get('system.home_directory_root'), 'motd': cs.get('system.motd'), 'boot_scrub_internal': cs.get('system.boot_scrub_internal'), 'periodic_notify_user': cs.get('system.periodic.notify_user'), 'graphite_servers': cs.get('system.graphite_servers'), 'freenas_token_lifetime': cs.get('peer.freenas.token_lifetime') }
logger = logging.getLogger('AFPPlugin') @description('Provides info about AFP service configuration') class AFPProvider(Provider): @private @accepts() @returns(h.ref('service-afp')) def get_config(self): return ConfigNode('service.afp', self.configstore).__getstate__() @private @description('Configure AFP service') @accepts(h.ref('service-afp')) class AFPConfigureTask(Task): @classmethod def early_describe(cls): return 'Configuring AFP service' def describe(self, share): return TaskDescription('Configuring AFP service') def verify(self, afp): return ['system'] def run(self, afp): paths = [PosixPath(afp.get(y)) if afp.get(y) else None for y in ('dbpath', 'homedir_path')] for p in paths: if p and not p.exists():
raise RpcException(errno.EINVAL, 'Failed to decode ticket response') except requests.ConnectionError as e: raise RpcException(errno.ENOTCONN, 'Connection failed: {0}'.format(str(e))) except requests.Timeout as e: raise RpcException(errno.ETIMEDOUT, 'Connection timed out: {0}'.format(str(e))) if 'error' in data: raise RpcException(errno.EINVAL, data['message']) return data @description("Submits a new support ticket") @accepts( h.all_of( h.ref('support-ticket'), h.required('subject', 'description', 'category', 'type', 'username', 'password')) ) class SupportSubmitTask(Task): @classmethod def early_describe(cls): return 'Submitting ticket' def describe(self, ticket): return TaskDescription('Submitting ticket') def verify(self, ticket): return ['system'] def run(self, ticket): try:
if config['filemask'] is not None: config['filemask'] = get_unix_permissions(config['filemask']) if 'dirmask' in config: if config['dirmask'] is not None: config['dirmask'] = get_unix_permissions(config['dirmask']) return config @returns(bool) def ad_enabled(self): return self.datastore.exists('directories', ('type', '=', 'winbind'), ('enabled', '=', True)) @private @description('Configure SMB service') @accepts(h.ref('ServiceSmb')) class SMBConfigureTask(Task): @classmethod def early_describe(cls): return 'Configuring SMB service' def describe(self, smb): return TaskDescription('Configuring SMB service') def verify(self, smb): return ['system'] def run(self, smb): node = ConfigNode('service.smb', self.configstore).__getstate__() netbiosname = smb.get('netbiosname') if netbiosname is not None:
@description('Provides information about iSCSI auth groups') class ISCSIAuthProvider(Provider): def query(self, filter=None, params=None): return self.datastore.query('iscsi.auth', *(filter or []), **(params or {})) @description('Provides information about iSCSI portals') class ISCSIPortalProvider(Provider): def query(self, filter=None, params=None): return self.datastore.query('iscsi.portals', *(filter or []), **(params or {})) @private @accepts(h.ref('iscsi-share')) @description("Adds new iSCSI share") class CreateISCSIShareTask(Task): @classmethod def early_describe(cls): return "Creating iSCSI share" def describe(self, share): return TaskDescription("Creating iSCSI share {name}", name=share.get('name', '') if share else '') def verify(self, share): if share['target_type'] == 'FILE': # File extent if not os.path.exists(share['target_path']): raise VerifyException(errno.ENOENT, "Extent file does not exist") elif share['target_type'] == 'ZVOL':
system('sysctl', '{0}={1}'.format(name, str(value))) except SubprocessException as e: # sysctl module compatibility raise OSError(str(e.err)) @description("Provides access to OS tunables") class TunablesProvider(Provider): @query('tunable') def query(self, filter=None, params=None): return self.datastore.query('tunables', *(filter or []), **(params or {})) @description("Adds Tunable") @accepts(h.all_of( h.ref('tunable'), h.required('var', 'value', 'type'), )) class TunableCreateTask(Task): def describe(self, tunable): return "Creating Tunable {0}".format(tunable['var']) def verify(self, tunable): errors = ValidationException() if self.datastore.exists('tunables', ('var', '=', tunable['var'])): errors.add((1, 'var'), 'This variable already exists.', code=errno.EEXIST) if '"' in tunable['value'] or "'" in tunable['value']: errors.add((1, 'value'), 'Quotes are not allowed')
import errno from task import Task, TaskStatus, Provider, TaskException, VerifyException from freenas.dispatcher.rpc import RpcException, description, accepts, returns, private from freenas.dispatcher.rpc import SchemaHelper as h from freenas.utils import normalize class FakeDisksProvider(Provider): def query(self, filter=None, params=None): return self.datastore.query('simulator.disks', *(filter or []), **(params or {})) @description("Creates a Simulated Fake Disk with the parameters provided") @accepts( h.all_of( h.ref('simulator-disk'), h.required('id') ) ) class CreateFakeDisk(Task): def verify(self, disk): return ['system'] def run(self, disk): defpath = os.path.join(self.dispatcher.call_sync('system_dataset.request_directory', 'simulator'), disk['id']) normalize(disk, { 'vendor': 'FreeNAS', 'path': defpath, 'model': 'Virtual Disk', 'serial': self.dispatcher.call_sync('share.iscsi.generate_serial'), 'block_size': 512,
logger = logging.getLogger('WebDAVPlugin') @description('Provides info about WebDAV service configuration') class WebDAVProvider(Provider): @private @accepts() @returns(h.ref('service-webdav')) def get_config(self): return ConfigNode('service.webdav', self.configstore).__getstate__() @private @description('Configure WebDAV service') @accepts(h.ref('service-webdav')) class WebDAVConfigureTask(Task): @classmethod def early_describe(cls): return 'Configuring WebDAV service' def describe(self, webdav): return TaskDescription('Configuring WebDAV service') def verify(self, webdav): return ['system'] def run(self, webdav): node = ConfigNode('service.webdav', self.configstore).__getstate__() for p in ('http_port', 'https_port'):
@query('network-route') @generator def query(self, filter=None, params=None): return self.datastore.query_stream('network.routes', *(filter or []), **(params or {})) @description("Provides access to static host entries database") class HostsProvider(Provider): @query('network-host') @generator def query(self, filter=None, params=None): return self.datastore.query_stream('network.hosts', *(filter or []), **(params or {})) @description("Updates global network configuration settings") @accepts(h.ref('network-config')) class NetworkConfigureTask(Task): @classmethod def early_describe(cls): return "Updating global network settings" def describe(self, settings): return TaskDescription("Updating global network settings") def verify(self, settings): return ['system'] def run(self, settings): node = ConfigNode('network', self.configstore) node.update(settings) dhcp_used = self.datastore.exists('network.interfaces', ('dhcp', '=', True))
logger = logging.getLogger('SSHPlugin') @description('Provides info about SSH service configuration') class SSHProvider(Provider): @private @accepts() @returns(h.ref('ServiceSshd')) def get_config(self): return exclude(ConfigNode('service.sshd', self.configstore).__getstate__(), 'keys') @private @description('Configure SSH service') @accepts(h.ref('ServiceSshd')) class SSHConfigureTask(Task): @classmethod def early_describe(cls): return 'Configuring SSH service' def describe(self, ssh): return TaskDescription('Configuring SSH service') def verify(self, ssh): return ['system'] def run(self, ssh): config = self.dispatcher.call_sync( 'service.query', [('name', '=', 'sshd')], {'single': True, 'select': 'config'}) port = ssh.get('port')
server.starttls() if mail["auth"]: server.login(mail["user"], mail["pass"]) server.sendmail(mail["from"], to, msg) server.quit() except smtplib.SMTPAuthenticationError as e: raise RpcException(errno.EACCES, "Authentication error: {0} {1}".format(e.smtp_code, e.smtp_error)) except Exception as e: logger.error("Failed to send email: {0}".format(str(e)), exc_info=True) raise RpcException(errno.EFAULT, "Email send error: {0}".format(str(e))) except: raise RpcException(errno.EFAULT, "Unexpected error") @accepts(h.ref("mail")) @description("Updates mail configuration") class MailConfigureTask(Task): @classmethod def early_describe(cls): return "Updating mail configuration" def describe(self, mail): return TaskDescription( "Updating {name} mail configuration", name=mail.get("user", "") + "@" + mail.get("server", "") if mail else "", ) def verify(self, mail): errors = ValidationException() node = ConfigNode("mail", self.configstore).__getstate__()
from freenas.dispatcher.rpc import RpcException, SchemaHelper as h, description, accepts, returns from task import Task, Provider, TaskException, ValidationException logger = logging.getLogger("SSHPlugin") @description("Provides info about SSH service configuration") class SSHProvider(Provider): @accepts() @returns(h.ref("service-ssh")) def get_config(self): return ConfigNode("service.sshd", self.configstore) @description("Configure SSH service") @accepts(h.ref("service-ssh")) class SSHConfigureTask(Task): def describe(self, share): return "Configuring SSH service" def verify(self, ssh): return ["system"] def run(self, ssh): try: node = ConfigNode("service.sshd", self.configstore) node.update(ssh) self.dispatcher.call_sync("etcd.generation.generate_group", "sshd") self.dispatcher.dispatch_event("service.ssh.changed", {"operation": "updated", "ids": None}) except RpcException as e: raise TaskException(errno.ENXIO, "Cannot reconfigure SSH: {0}".format(str(e)))
def get_config(self): return ConfigNode('service.rsyncd', self.configstore).__getstate__() @description("Provides access to rsyncd modules database") class RsyncdModuleProvider(Provider): @description("Lists rsyncd modules present in the system") @query('rsyncd-module') @generator def query(self, filter=None, params=None): return self.datastore.query_stream('rsyncd-module', *(filter or []), **(params or {})) @private @description('Configure Rsyncd service') @accepts(h.ref('service-rsyncd')) class RsyncdConfigureTask(Task): @classmethod def early_describe(cls): return 'Configuring Rsyncd service' def describe(self, rsyncd): return TaskDescription('Configuring Rsyncd service') def verify(self, rsyncd): errors = [] if errors: raise ValidationException(errors) return ['system']
logger = logging.getLogger('SSHPlugin') @description('Provides info about SSH service configuration') class SSHProvider(Provider): @private @accepts() @returns(h.ref('service-sshd')) def get_config(self): return exclude(ConfigNode('service.sshd', self.configstore).__getstate__(), 'keys') @private @description('Configure SSH service') @accepts(h.ref('service-sshd')) class SSHConfigureTask(Task): @classmethod def early_describe(cls): return 'Configuring SSH service' def describe(self, ssh): return TaskDescription('Configuring SSH service') def verify(self, ssh): return ['system'] def run(self, ssh): config = self.dispatcher.call_sync( 'service.query', [('name', '=', 'sshd')], {'single': True, 'select': 'config'}) port = ssh.get('port')
'webui_http_port': self.configstore.get( 'service.nginx.http.port', ), 'webui_http_redirect_https': self.configstore.get( 'service.nginx.http.redirect_https', ), 'webui_https_certificate': self.configstore.get( 'service.nginx.https.certificate', ), 'webui_https_port': self.configstore.get( 'service.nginx.https.port', ), } @accepts(h.ref('system-general')) class SystemGeneralConfigureTask(Task): def describe(self): return "System General Settings Configure" def verify(self, props): return ['system'] def run(self, props): if 'hostname' in props: netif.set_hostname(props['hostname']) if 'language' in props: self.configstore.set('system.language', props['language']) if 'timezone' in props:
class SessionProvider(Provider): @query('Session') @generator def query(self, filter=None, params=None): return self.datastore.query_stream('sessions', *(filter or []), **(params or {})) @accepts() @returns(h.array(h.ref('Session'))) @description("Returns the logged in and active user sessions" + "Does not include the service sessions in this.") def get_live_user_sessions(self): live_user_session_ids = [] for srv in self.dispatcher.ws_servers: for conn in srv.connections: # The if check for 'uid' below is to seperate the actuall gui/cli # users of the websocket connection from that of system services # like etcd, statd and so on. if hasattr(conn.user, 'uid'): live_user_session_ids.append(conn.session_id) return self.datastore.query('sessions', ('id', 'in', live_user_session_ids)) @pass_sender @returns(int) def get_my_session_id(self, sender): return sender.session_id @description("Returns the logged in user for the current session") @returns(str) @pass_sender def whoami(self, sender): return sender.user.name @description("Sends a message to given session") @accepts(int, str) @pass_sender def send_to_session(self, id, message, sender): target = None for srv in self.dispatcher.ws_servers: target = first_or_default(lambda s: s.session_id == id, srv.connections) if target: break if not target: raise RpcException(errno.ENOENT, 'Session {0} not found'.format(id)) target.outgoing_events.put(('session.message', { 'sender_id': sender.session_id, 'sender_name': sender.user.name if sender.user else None, 'message': message })) @description("Sends a message to every active session") @accepts(str) @pass_sender def send_to_all(self, message, sender): for srv in self.dispatcher.ws_servers: for target in srv.connections: target.outgoing_events.put(('session.message', { 'sender_id': sender.session_id, 'sender_name': sender.user.name if sender.user else None, 'message': message }))
return self.datastore.query('alert.classes') @description('Provides access to the alerts filters') class AlertsFiltersProvider(Provider): @query('alert-filter') def query(self, filter=None, params=None): return self.datastore.query( 'alert.filters', *(filter or []), **(params or {}) ) @description("Creates an Alert Filter") @accepts(h.all_of( h.ref('alert-filter'), h.required('id') )) class AlertFilterCreateTask(Task): @classmethod def early_describe(cls): return 'Creating alert filter' def describe(self, alertfilter): return TaskDescription('Creating alert filter {name}', name=alertfilter.get('name', '') if alertfilter else '') def verify(self, alertfilter): return [] def run(self, alertfilter): id = self.datastore.insert('alert.filters', alertfilter)
) @description("Returns list of supported backup providers") @accepts() @returns(h.ref('backup-providers')) def supported_providers(self): result = {} for p in list(self.dispatcher.plugins.values()): if p.metadata and p.metadata.get('type') == 'backup': result[p.metadata['method']] = {} return result @accepts(h.all_of( h.ref('backup'), h.required('name', 'provider', 'dataset') )) @description('Creates a backup task') class CreateBackupTask(Task): @classmethod def early_describe(cls): return 'Creating backup task' def describe(self, backup): return TaskDescription('Creating backup task {name}', name=backup.get('name', '') if backup else '') def verify(self, backup): return ['system'] def run(self, backup):
def validate_netbios_name(netbiosname): regex = re.compile(r"^[a-zA-Z0-9\.\-_!@#\$%^&\(\)'\{\}~]{1,15}$") return regex.match(netbiosname) @description('Provides info about CIFS service configuration') class CIFSProvider(Provider): @accepts() @returns(h.ref('service-cifs')) def get_config(self): return ConfigNode('service.cifs', self.configstore) @description('Configure CIFS service') @accepts(h.ref('service-cifs')) class CIFSConfigureTask(Task): def describe(self, cifs): return 'Configuring CIFS service' def verify(self, cifs): errors = [] node = ConfigNode('service.cifs', self.configstore).__getstate__() netbiosname = cifs.get('netbiosname') if netbiosname is not None: for n in netbiosname: if not validate_netbios_name(n): errors.append(('netbiosname', errno.EINVAL, 'Invalid name {0}'.format(n))) else:
'name': name, 'verbose_name': verbose_name, } @description('Provides access to the alerts filters') class AlertsFiltersProvider(Provider): @query('alert-filter') def query(self, filter=None, params=None): return self.datastore.query( 'alerts-filters', *(filter or []), **(params or {}) ) @accepts(h.ref('alert-filter')) class AlertFilterCreateTask(Task): def describe(self, alertfilter): return 'Creating alert filter {0}'.format(alertfilter['name']) def verify(self, alertfilter): return [] def run(self, alertfilter): id = self.datastore.insert('alerts-filters', alertfilter) self.dispatcher.dispatch_event('alerts.filters.changed', { 'operation': 'create', 'ids': [id] })
Provider, Task ) logger = logging.getLogger('LDAPPlugin') @description("Provides access to LDAP configuration") class LDAPProvider(Provider): @returns(h.ref('ldap-config')) def get_config(self): pass @description("Updates LDAP settings") @accepts(h.ref('ldap-config')) class LDAPConfigureTask(Task): def verify(self, config): return ['system'] def run(self, config): pass def _init(dispatcher, plugin): plugin.register_schema_definition('ldap-config', { 'type': 'object', 'properties': { 'hostname': { 'type': 'string' }, 'binddn': { 'type': 'string' },
from freenas.dispatcher.rpc import RpcException, SchemaHelper as h, description, accepts, returns from task import Task, Provider, TaskException, ValidationException logger = logging.getLogger('RIAKCSPlugin') @description('Provides info about RIAK CS service configuration') class RIAKCSProvider(Provider): @accepts() @returns(h.ref('service-riak_cs')) def get_config(self): return ConfigNode('service.riak_cs', self.configstore) @description('Configure RIAK CS service') @accepts(h.ref('service-riak_cs')) class RIAKCSConfigureTask(Task): def describe(self, share): return 'Configuring RIAK CS service' def verify(self, riakcs): errors = [] node = ConfigNode('service.riak_cs', self.configstore).__getstate__() node.update(riakcs) if errors: raise ValidationException(errors) return ['system']
config = ConfigNode('service.smb', self.configstore).__getstate__() if 'filemask' in config: if config['filemask'] is not None: config['filemask'] = get_unix_permissions(config['filemask']) if 'dirmask' in config: if config['dirmask'] is not None: config['dirmask'] = get_unix_permissions(config['dirmask']) return config @returns(bool) def ad_enabled(self): return self.datastore.exists('directories', ('plugin', '=', 'winbind'), ('enabled', '=', True)) @private @description('Configure SMB service') @accepts(h.ref('ServiceSmb')) class SMBConfigureTask(Task): @classmethod def early_describe(cls): return 'Configuring SMB service' def describe(self, smb): return TaskDescription('Configuring SMB service') def verify(self, smb): return ['system'] def run(self, smb): node = ConfigNode('service.smb', self.configstore).__getstate__() netbiosname = smb.get('netbiosname') if netbiosname is not None:
def _init(dispatcher, plugin): def on_hostname_change(args): if 'hostname' not in args: return if args.get('jid') != 0: return dispatcher.configstore.set('system.hostname', args['hostname']) dispatcher.call_sync('service.restart', 'mdns') dispatcher.dispatch_event('system.general.changed', { 'operation': 'update', }) # Register schemas plugin.register_schema_definition('system-advanced', { 'type': 'object', 'properties': { 'console_cli': {'type': 'boolean'}, 'console_screensaver': {'type': 'boolean'}, 'serial_console': {'type': 'boolean'}, 'serial_port': {'type': 'string'}, 'serial_speed': {'type': 'integer'}, 'powerd': {'type': 'boolean'}, 'swapondrive': {'type': 'integer'}, 'debugkernel': {'type': 'boolean'}, 'uploadcrash': {'type': 'boolean'}, 'motd': {'type': 'string'}, 'boot_scrub_internal': {'type': 'integer'}, 'periodic_notify_user': {'type': 'integer'}, }, 'additionalProperties': False, }) plugin.register_schema_definition('system-general', { 'type': 'object', 'properties': { 'hostname': {'type': 'string'}, 'language': {'type': 'string'}, 'timezone': {'type': 'string'}, 'console_keymap': {'type': 'string'}, 'syslog_server': {'type': ['string', 'null']}, }, 'additionalProperties': False, }) plugin.register_schema_definition('system-ui', { 'type': 'object', 'properties': { 'webui_protocol': { 'type': ['array'], 'items': { 'type': 'string', 'enum': ['HTTP', 'HTTPS'], }, }, 'webui_listen': { 'type': ['array'], 'items': {'type': 'string'}, }, 'webui_http_redirect_https': {'type': 'boolean'}, 'webui_http_port': {'type': 'integer'}, 'webui_https_certificate': {'type': ['string', 'null']}, 'webui_https_port': {'type': 'integer'}, }, 'additionalProperties': False, }) plugin.register_schema_definition('system-time', { 'type': 'object', 'additionalProperties': False, 'properties': { 'system_time': {'type': 'string'}, 'boot_time': {'type': 'string'}, 'uptime': {'type': 'string'}, 'timezone': {'type': 'string'} } }) plugin.register_schema_definition('power_changed', { 'type': 'object', 'additionalProperties': False, 'properties': { 'operation': {'type': 'string', 'enum': ['SHUTDOWN', 'REBOOT']}, } }) # Register event handler plugin.register_event_handler('system.hostname.change', on_hostname_change) # Register Event Types plugin.register_event_type('power.changed', schema=h.ref('power_changed')) # Register providers plugin.register_provider("system.advanced", SystemAdvancedProvider) plugin.register_provider("system.general", SystemGeneralProvider) plugin.register_provider("system.info", SystemInfoProvider) plugin.register_provider("system.ui", SystemUIProvider) # Register task handlers plugin.register_task_handler("system.advanced.update", SystemAdvancedConfigureTask) plugin.register_task_handler("system.general.update", SystemGeneralConfigureTask) plugin.register_task_handler("system.ui.update", SystemUIConfigureTask) plugin.register_task_handler("system.time.update", SystemTimeConfigureTask) plugin.register_task_handler("system.shutdown", SystemHaltTask) plugin.register_task_handler("system.reboot", SystemRebootTask) # Register debug hook plugin.register_debug_hook(collect_debug) # Set initial hostname netif.set_hostname(dispatcher.configstore.get('system.hostname'))
@description("Provides info about DynamicDNS service configuration") class DynDNSProvider(Provider): @accepts() @returns(h.ref("service-dyndns")) def get_config(self): return ConfigNode("service.dyndns", self.configstore) @accepts() @returns(h.object()) def providers(self): return PROVIDERS @description("Configure DynamicDNS service") @accepts(h.ref("service-dyndns")) class DynDNSConfigureTask(Task): def describe(self, share): return "Configuring DynamicDNS service" def verify(self, dyndns): errors = [] node = ConfigNode("service.dyndns", self.configstore) if errors: raise ValidationException(errors) return ["system"] def run(self, dyndns):
for row in table.xpath('./tr[position()>1]'): cols = row.getchildren() request = cols[12].text if request == 'GET /server-status HTTP/1.1': continue result.append({ 'pid': cols[1].text, 'client': cols[10].text, 'request': cols[12].text, }) return result @private @description("Adds new WebDAV share") @accepts(h.ref('Share')) class CreateWebDAVShareTask(Task): @classmethod def early_describe(cls): return "Creating WebDAV share" def describe(self, share): return TaskDescription("Creating WebDAV share {name}", name=share.get('name', '') if share else '') def verify(self, share): return ['service:webdav'] def run(self, share): normalize(share['properties'], { 'read_only': False, 'permission': False,
('nut_upslog', 'restart'), ] if ups['mode'] == 'MASTER': verbs.append(('nut', 'restart')) verbs.append(('nut_upsmon', 'start')) try: for svc, verb in verbs: system("/usr/sbin/service", svc, 'one' + verb) except SubprocessException as e: raise TaskException(errno.EBUSY, e.err) @private @description('Configure UPS service') @accepts(h.ref('service-ups')) class UPSConfigureTask(Task): @classmethod def early_describe(cls): return 'Configuring UPS service' def describe(self, ups): return TaskDescription('Configuring UPS service') def verify(self, ups): return ['system'] def run(self, ups): node = ConfigNode('service.ups', self.configstore).__getstate__() node.update(ups)
return TaskDescription("Activating the Boot Environment {name}", name=name) def verify(self, name): be = FindClone(name) if not be: raise VerifyException(errno.ENOENT, 'Boot environment {0} not found'.format(name)) return ['system'] def run(self, name): if not ActivateClone(name): raise TaskException(errno.EIO, 'Cannot activate the {0} boot environment'.format(name)) @description("Renames the given Boot Environment with the alternate name provieded") @accepts(str, h.ref('boot-environment')) class BootEnvironmentUpdate(Task): @classmethod def early_describe(cls): return "Updating Boot Environment" def describe(self, id, be): return TaskDescription("Updating the Boot Environment {name}", name=id) def verify(self, id, be): return ['system'] def run(self, id, updated_params): be = FindClone(id) if not be: raise VerifyException(errno.ENOENT, 'Boot environment {0} not found'.format(id))
if action in ('stop', 'restart'): self.dispatcher.call_sync('serviced.job.stop', i['Label']) if action in ('start', 'restart'): self.dispatcher.call_sync('serviced.job.start', i['Label']) self.dispatcher.dispatch_event('service.changed', { 'operation': 'update', 'ids': [service['id']] }) @description("Updates configuration for services") @accepts(str, h.all_of( h.ref('service'), h.forbidden('id', 'name', 'builtin', 'pid', 'state') )) class UpdateServiceConfigTask(Task): @classmethod def early_describe(cls): return 'Updating service configuration' def describe(self, id, updated_fields): svc = self.datastore.get_by_id('service_definitions', id) return TaskDescription("Updating configuration of service {name}", name=svc['name']) def verify(self, id, updated_fields): return ['system'] def run(self, id, updated_fields):
class DeviceInfoProvider(Provider): @description("Returns list of available device classes") @returns(h.array(str)) def get_classes(self): return ["disk", "network", "cpu", "usb", "serial_port"] @description("Returns list of devices from given class") @accepts(str) @returns( h.any_of(h.ref('DiskDevice'), h.ref('NetworkDevice'), h.ref('CpuDevice'), h.ref('SerialPortDevice'), h.ref('UsbDevice'))) def get_devices(self, dev_class): method = "_get_class_{0}".format(dev_class) if hasattr(self, method): return getattr(self, method)() return None def _get_class_disk(self): result = [] geom.scan() for child in geom.class_by_name('DISK').geoms: result.append({ "path": os.path.join("/dev", child.name), "name": child.name, "mediasize": child.provider.mediasize, "description": child.provider.config['descr'] }) return result def _get_class_multipath(self): result = [] geom.scan() cls = geom.class_by_name('MULTIPATH') if not cls: return [] for child in cls.geoms: result.append({ "path": os.path.join("/dev", child.name), "name": child.name, "mediasize": child.provider.mediasize, "members": [c.provider.name for c in child.consumers] }) return result def _get_class_network(self): result = [] for i in list(netif.list_interfaces().keys()): if i.startswith(tuple(netif.CLONED_PREFIXES)): continue try: desc = get_sysctl( re.sub('(\\w+)([0-9]+)', 'dev.\\1.\\2.%desc', i)) result.append({'name': i, 'description': desc}) except FileNotFoundError: continue return result def _get_class_serial_port(self): result = [] for devices in devinfo.DevInfo().resource_managers['I/O ports'].values( ): for dev in devices: if not dev.name.startswith('uart'): continue result.append({ 'name': dev.name, 'description': dev.desc, 'drivername': dev.drivername, 'location': dev.location, 'start': hex(dev.start), 'size': dev.size }) return result def _get_class_cpu(self): result = [] ncpus = get_sysctl('hw.ncpu') model = get_sysctl('hw.model').strip('\x00') for i in range(0, ncpus): freq = None temp = None with contextlib.suppress(OSError): freq = get_sysctl('dev.cpu.{0}.freq'.format(i)), with contextlib.suppress(OSError): temp = get_sysctl('dev.cpu.{0}.temperature'.format(i)) result.append({'name': model, 'freq': freq, 'temperature': temp}) return result def _get_class_usb(self): result = [] context = usb1.USBContext() for device in context.getDeviceList(): result.append({ 'bus': device.getBusNumber(), 'address': device.getDeviceAddress(), 'manufacturer': device.getManufacturer(), 'product': device.getProduct(), 'vid': device.getVendorID(), 'pid': device.getProductID(), 'class': device.getDeviceClass() }) context.exit() return result
@private @accepts(h.object()) def update_cache_putter(self, value_dict): for key, value in value_dict.items(): update_cache.put(key, value) @private @accepts(str) @returns(h.any_of(None, str, bool, h.array(str))) def update_cache_getter(self, key): return update_cache.get(key, timeout=1) @description("Set the System Updater Cofiguration Settings") @accepts(h.ref('update')) class UpdateConfigureTask(Task): def describe(self): return "System Updater Configure Settings" def verify(self, props): # TODO: Fix this verify's resource allocation as unique task block = self.dispatcher.resource_graph.get_resource(update_resource_string) if block is not None and block.busy: raise VerifyException( errno.EBUSY, 'An Update Operation (Configuration/ Download/ Applying ' + 'the Updates) is already in the queue, please retry later') return [update_resource_string]
logger = logging.getLogger('LLDPPlugin') @description('Provides info about LLDP service configuration') class LLDPProvider(Provider): @private @accepts() @returns(h.ref('service-lldp')) def get_config(self): return ConfigNode('service.lldp', self.configstore).__getstate__() @private @description('Configure LLDP service') @accepts(h.ref('service-lldp')) class LLDPConfigureTask(Task): def describe(self, share): return 'Configuring LLDP service' def verify(self, lldp): errors = ValidationException() node = ConfigNode('service.lldp', self.configstore).__getstate__() node.update(lldp) # Lazy load pycountry due to extra verbose DEBUG logging import pycountry if node['country_code'] and node['country_code'] not in pycountry.countries.indices['alpha2']: errors.add((0, 'country_code'), 'Invalid ISO-3166 alpha 2 code') if errors:
def _init(dispatcher, plugin): def nightly_update_check(args): if args.get('name') != 'scheduler.management': return logger.debug('Scheduling a nightly update check task') caltask = dispatcher.call_sync( 'calendar_task.query', [('name', '=', 'update.checkfetch')], {'single': True} ) or {'schedule': {}} caltask.update({ 'name': 'update.checkfetch', 'args': [True], 'hidden': True, 'protected': True, 'description': 'Nightly update check', }) caltask['schedule'].update({ 'hour': str(random.randint(1, 6)), 'minute': str(random.randint(0, 59)), }) if caltask.get('id'): dispatcher.call_task_sync('calendar_task.update', caltask['id'], caltask) else: dispatcher.call_task_sync('calendar_task.create', caltask) # Register Schemas plugin.register_schema_definition('update', { 'type': 'object', 'properties': { 'train': {'type': 'string'}, 'check_auto': {'type': 'boolean'}, 'update_server': {'type': 'string', 'readOnly': True}, }, }) plugin.register_schema_definition('update-progress', h.object(properties={ 'operation': h.enum(str, ['DOWNLOADING', 'INSTALLING']), 'details': str, 'indeterminate': bool, 'percent': int, 'reboot': bool, 'pkg_name': str, 'pkg_version': str, 'filesize': int, 'num_files_done': int, 'num_files_total': int, 'error': bool, 'finished': bool, })) plugin.register_schema_definition('update-ops', { 'type': 'object', 'properties': { 'operation': { 'type': 'string', 'enum': ['delete', 'install', 'upgrade'] }, 'new_name': {'type': ['string', 'null']}, 'new_version': {'type': ['string', 'null']}, 'previous_name': {'type': ['string', 'null']}, 'previous_version': {'type': ['string', 'null']}, } }) plugin.register_schema_definition('update-info', { 'type': 'object', 'properties': { 'notes': {'type': 'object'}, 'notice': {'type': 'string'}, 'changelog': { 'type': 'array', 'items': {'type': 'string'}, }, 'operations': {'$ref': 'update-ops'}, 'downloaded': {'type': 'boolean'}, } }) plugin.register_schema_definition('update-train', { 'type': 'object', 'properties': { 'name': {'type': 'string'}, 'description': {'type': 'string'}, 'sequence': {'type': 'string'}, 'current': {'type': 'boolean'}, } }) # Register providers plugin.register_provider("update", UpdateProvider) # Register task handlers plugin.register_task_handler("update.update", UpdateConfigureTask) plugin.register_task_handler("update.check", CheckUpdateTask) plugin.register_task_handler("update.download", DownloadUpdateTask) plugin.register_task_handler("update.manual", UpdateManualTask) plugin.register_task_handler("update.apply", UpdateApplyTask) plugin.register_task_handler("update.verify", UpdateVerifyTask) plugin.register_task_handler("update.checkfetch", CheckFetchUpdateTask) plugin.register_task_handler("update.updatenow", UpdateNowTask) # Register Event Types plugin.register_event_type('update.in_progress', schema=h.ref('update-progress')) plugin.register_event_type('update.changed') # Register reources plugin.register_resource(Resource(update_resource_string), ['system']) # Get the Update Cache (if any) at system boot (and hence in init here) # Do this in parallel so that a failed cache generation does not take the # entire dispatcher start/restart with it (See Ticket: #12892) gevent.spawn(generate_update_cache, dispatcher) # Schedule a task to check/dowload for updates plugin.register_event_handler('plugin.service_resume', nightly_update_check)
if action in ('stop', 'restart'): self.dispatcher.call_sync('serviced.job.stop', i['Label']) if action in ('start', 'restart'): self.dispatcher.call_sync('serviced.job.start', i['Label']) self.dispatcher.dispatch_event('service.changed', { 'operation': 'update', 'ids': [service['id']] }) @description("Updates configuration for services") @accepts(str, h.all_of(h.ref('Service'), h.forbidden('id', 'name', 'builtin', 'pid', 'state'))) class UpdateServiceConfigTask(ProgressTask): @classmethod def early_describe(cls): return 'Updating service configuration' def describe(self, id, updated_fields): svc = self.datastore.get_by_id('service_definitions', id) return TaskDescription("Updating configuration of service {name}", name=svc['name']) def verify(self, id, updated_fields): return ['system'] def run(self, id, updated_fields):
def query(self, filter=None, params=None): return self.datastore.query("iscsi.targets", *(filter or []), **(params or {})) class ISCSIAuthProvider(Provider): def query(self, filter=None, params=None): return self.datastore.query("iscsi.auth", *(filter or []), **(params or {})) class ISCSIPortalProvider(Provider): def query(self, filter=None, params=None): return self.datastore.query("iscsi.portals", *(filter or []), **(params or {})) @description("Adds new iSCSI share") @accepts(h.ref("iscsi-share")) class CreateISCSIShareTask(Task): def describe(self, share): return "Creating iSCSI share {0}".format(share["name"]) def verify(self, share): if share["target"][0] == "/": # File extent if not os.path.exists(share["target"]): raise VerifyException(errno.ENOENT, "Extent file does not exist") else: if not os.path.exists(convert_share_target(share["target"])): raise VerifyException(errno.ENOENT, "Extent ZVol does not exist") return ["service:ctl"]