def run(self, config_path, name, type): try: share = load_config(config_path, f'{type}-{name}', version=CONFIG_VERSION) except FileNotFoundError: raise VerifyException( errno.ENOENT, f'There is no share {name} of type {type} at {config_path} to be imported.' ) except ValueError as err: raise VerifyException(errno.EINVAL, f'Cannot read configuration file: {err}') if share['type'] != type: raise VerifyException( errno.EINVAL, f'Share type {type} does not match configuration file entry type {share["type"]}' ) if not self.dispatcher.call_sync('share.supported_types').get( share['type']): raise TaskException(errno.ENXIO, f'Unknown sharing type {share["type"]}') if self.datastore.exists('shares', ('type', '=', share['type']), ('name', '=', share['name'])): raise TaskException( errno.EEXIST, 'Share {share["name"]} of type {share["type"]} already exists') id = self.run_subtask_sync(f'share.{share["type"]}.import', share) self.dispatcher.dispatch_event('share.changed', { 'operation': 'create', 'ids': [id] }) return id
def run(self, updated_params): node = ConfigNode('directory', self.configstore) node.update(updated_params) self.dispatcher.emit_event('directoryservice.changed', {'operation': 'update'}) try: self.dispatcher.call_sync('dscached.management.reload_config') self.dispatcher.call_sync('dscached.management.flush_cache') except RpcException as e: raise TaskException( errno.ENXIO, 'Cannot reconfigure directory services: {0}'.format(str(e)))
def run(self, id, updated_params): if not self.datastore.exists('backup', ('id', '=', id)): raise TaskException(errno.ENOENT, 'Backup {0} not found'.format(id)) backup = self.datastore.get_by_id('backup', id) backup.update(updated_params) self.datastore.update('backup', id, backup) self.dispatcher.emit_event('backup.changed', { 'operation': 'update', 'ids': [id] }) return id
def run(self, id, updated_params): if not self.datastore.exists('iscsi.auth', ('id', '=', id)): raise TaskException(errno.ENOENT, 'Auth group {0} does not exist'.format(id)) ag = self.datastore.get_by_id('iscsi.auth', id) ag.update(updated_params) self.datastore.update('iscsi.auth', id, ag) self.dispatcher.call_sync('etcd.generation.generate_group', 'ctl') self.dispatcher.call_sync('service.reload', 'ctl') self.dispatcher.dispatch_event('iscsi.auth.changed', { 'operation': 'update', 'ids': [id] })
def run(self, keytab): if self.datastore.exists('kerberos.keytabs', ('name', '=', keytab['name'])): raise TaskException( errno.EEXIST, 'Keytab {0} already exists'.format(keytab['name'])) id = self.datastore.insert('kerberos.keytabs', keytab) generate_keytab(self.datastore) self.dispatcher.dispatch_event('kerberos.keytab.changed', { 'operation': 'create', 'ids': [id] }) return id
def run(self, uuid): rsyncmod = self.datastore.get_by_id('rsyncd-module', uuid) if rsyncmod is None: raise TaskException(errno.ENOENT, 'Rsync module {0} does not exist'.format(uuid)) try: self.datastore.delete('rsyncd-module', uuid) self.dispatcher.call_sync('etcd.generation.generate_group', 'rsyncd') self.dispatcher.call_sync('service.restart', 'rsyncd') except DatastoreException as e: raise TaskException( errno.EBADMSG, 'Cannot delete rsync module: {0}'.format(str(e))) except RpcException as e: raise TaskException(errno.ENXIO, 'Cannot regenerate rsyncd {0}'.format(str(e))) self.dispatcher.dispatch_event('rsyncd.module.changed', { 'operation': 'delete', 'ids': [uuid] })
def run(self, id): iface = self.datastore.get_by_id('network.interfaces', id) if not iface: raise TaskException(errno.ENOENT, 'Interface {0} does not exist'.format(id)) if iface['type'] not in ('VLAN', 'LAGG', 'BRIDGE'): raise TaskException(errno.EBUSY, 'Cannot delete physical interface') self.datastore.delete('network.interfaces', id) try: for code, message in self.dispatcher.call_sync( 'networkd.configuration.configure_network', timeout=60): self.add_warning(TaskWarning(code, message)) except RpcException as e: raise TaskException( errno.ENXIO, 'Cannot reconfigure network: {0}'.format(str(e))) self.dispatcher.dispatch_event('network.interface.changed', { 'operation': 'delete', 'ids': [id] })
def run(self, id): iface = self.datastore.get_by_id('network.interfaces', id) if not iface: raise TaskException(errno.ENOENT, 'Interface {0} does not exist'.format(id)) if not iface['enabled']: raise TaskException(errno.ENXIO, 'Interface {0} is disabled'.format(id)) try: for code, message in self.dispatcher.call_sync( 'networkd.configuration.up_interface', id): self.add_warning(TaskWarning(code, message)) except RpcException as err: raise TaskException( errno.ENXIO, 'Cannot reconfigure interface: {0}'.format(str(err))) self.dispatcher.dispatch_event('network.interface.changed', { 'operation': 'update', 'ids': [id] })
def run(self, props): if 'system_time' in props: timestamp = time.mktime(parser.parse(props['system_time'])) bsd.clock_settime(bsd.ClockType.REALTIME, timestamp) if 'timezone' in props: self.configstore.set('system.timezone', props['timezone']) try: self.dispatcher.call_sync('etcd.generation.generate_group', 'localtime') except RpcException as e: raise TaskException( errno.ENXIO, 'Cannot reconfigure system time: {0}'.format(str(e)))
def run(self, ssh): try: node = ConfigNode('service.sshd', self.configstore) node.update(ssh) self.dispatcher.call_sync('etcd.generation.generate_group', 'sshd') self.dispatcher.dispatch_event('service.sshd.changed', { 'operation': 'updated', 'ids': None, }) except RpcException as e: raise TaskException(errno.ENXIO, 'Cannot reconfigure SSH: {0}'.format(str(e))) return 'RELOAD'
def run(self, id): backup = self.datastore.get_by_id('backup', id) if not backup: raise RpcException(errno.ENOENT, 'Backup {0} not found'.format(id)) dirlist = self.run_subtask_sync( 'backup.{0}.list'.format(backup['provider']), backup['properties'] ) if not any(e['name'] == MANIFEST_FILENAME for e in dirlist): raise TaskException(errno.ENOENT, 'No backup found at specified location') data = self.download(backup['provider'], backup['properties'], MANIFEST_FILENAME) try: manifest = loads(data) except ValueError: raise TaskException(errno.EINVAL, 'Invalid backup manifest') for snap in manifest.get('snapshots', []): snap['created_at'] = datetime.utcfromtimestamp(snap.get('created_at', 0)) return manifest
def get_usb_devices(self): usb_devices_list = [] try: usbconfig_output = system('usbconfig')[0] if not usbconfig_output.startswith('No device match'): for device in usbconfig_output.rstrip().split('\n'): device_path = os.path.join('/dev', device.split()[0][:-1]) device_description = re.findall(r'<.*?>', device)[0].strip('><') usb_devices_list.append({'device': device_path, 'description': device_description}) except SubprocessException as e: raise TaskException(errno.EBUSY, e.err) return usb_devices_list
def run(self, id, updated_fields): share = self.datastore.get_by_id('shares', id) oldname = share['name'] newname = updated_fields.get('name', oldname) share.update(updated_fields) self.datastore.update('shares', id, share) path = self.dispatcher.call_sync('share.translate_path', share['id']) try: smb_conf = smbconf.SambaConfig('registry') smb_conf.transaction_start() try: if oldname != newname: del smb_conf.shares[oldname] smb_share = smbconf.SambaShare() smb_conf.shares[newname] = smb_share smb_share = smb_conf.shares[newname] convert_share(self.dispatcher, smb_share, path, share['enabled'], share['properties']) smb_share.save() except BaseException as err: smb_conf.transaction_cancel() raise TaskException(errno.EBUSY, f'Failed to update samba configuration: {err}') else: smb_conf.transaction_commit() reload_samba() if not share['enabled']: drop_share_connections(share['name']) except smbconf.SambaConfigException: raise TaskException(errno.EFAULT, 'Cannot access samba registry') self.dispatcher.dispatch_event('share.smb.changed', { 'operation': 'update', 'ids': [id] })
def run(self, group): if self.datastore.exists('groups', ('name', '=', group['name'])): raise TaskException( errno.EEXIST, 'Group {0} already exists'.format(group['name']) ) if 'gid' in group and self.datastore.exists('groups', ('gid', '=', group['gid'])): raise TaskException( errno.EEXIST, 'Group with GID {0} already exists'.format(group['gid']) ) if 'gid' not in group: # Need to get next free GID gid = self.dispatcher.call_sync('group.next_gid') else: gid = group.pop('gid') try: normalize_name(group, 'name') group['builtin'] = False group['gid'] = gid group.setdefault('sudo', False) gid = self.datastore.insert('groups', group) self.dispatcher.call_sync('etcd.generation.generate_group', 'accounts') except DatastoreException as e: raise TaskException(errno.EBADMSG, 'Cannot add group: {0}'.format(str(e))) except RpcException as e: raise TaskException(e.code, 'Cannot regenerate groups file: {0}'.format(e.message)) self.dispatcher.dispatch_event('group.changed', { 'operation': 'create', 'ids': [gid] }) return gid
def run(self, fd, logs=True, cores=False): try: with os.fdopen(fd.fd, 'wb') as f: with tarfile.open(fileobj=f, mode='w:gz', dereference=True) as tar: plugins = self.dispatcher.call_sync('management.get_plugin_names') total = len(plugins) done = 0 # Iterate over plugins for plugin in plugins: self.set_progress(done / total * 80, 'Collecting debug info for {0}'.format(plugin)) try: hooks = self.dispatcher.call_sync('management.collect_debug', plugin, timeout=600) except RpcException as err: self.add_warning( TaskWarning(err.code, 'Cannot collect debug data for {0}: {1}'.format(plugin, err.message)) ) continue for hook in hooks: self.process_hook(hook, plugin, tar) done += 1 if logs: hook = { 'type': 'AttachCommandOutput', 'name': 'system-log', 'command': ['/usr/local/sbin/logctl', '--last', '3d', '--dump'], 'shell': False, 'decode': False } self.set_progress(90, 'Collecting logs') self.process_hook(hook, 'Logs', tar) if cores: hook = { 'type': 'AttachDirectory', 'name': 'cores', 'path': '/var/db/system/cores', 'recursive': True } self.set_progress(95, 'Collecting core files') self.process_hook(hook, 'UserCores', tar) except BrokenPipeError as err: raise TaskException(errno.EPIPE, 'The download timed out') from err
class CAInternalCreateTask(Task): def verify(self, certificate): if self.datastore.exists('crypto.certificates', ('name', '=', certificate['name'])): raise VerifyException( errno.EEXIST, 'Certificate with given name already exists') return ['system'] def run(self, certificate): try: certificate['key_length'] = certificate.get('key_length', 2048) certificate['digest_algorithm'] = certificate.get( 'digest_algorithm', 'SHA256') certificate['lifetime'] = certificate.get('lifetime', 3650) key = generate_key(certificate['key_length']) cert = create_certificate(certificate) cert.set_pubkey(key) cert.add_extensions([ crypto.X509Extension("basicConstraints", True, "CA:TRUE, pathlen:0"), crypto.X509Extension("keyUsage", True, "keyCertSign, cRLSign"), crypto.X509Extension("subjectKeyIdentifier", False, "hash", subject=cert), ]) cert.set_serial_number(1) cert.sign(key, str(certificate['digest_algorithm'])) certificate['type'] = 'CA_INTERNAL' certificate['certificate'] = crypto.dump_certificate( crypto.FILETYPE_PEM, cert) certificate['privatekey'] = crypto.dump_privatekey( crypto.FILETYPE_PEM, key) certificate['serial'] = 1 pkey = self.datastore.insert('crypto.certificates', certificate) self.dispatcher.call_sync('etcd.generation.generate_group', 'crypto') except DatastoreException, e: raise TaskException( errno.EBADMSG, 'Cannot create internal CA: {0}'.format(str(e))) except RpcException, e: raise TaskException( errno.ENXIO, 'Cannot generate certificate: {0}'.format(str(e)))
def run(self, props): if 'hostname' in props: netif.set_hostname(props['hostname']) if 'description' in props: self.configstore.set('system.description', props['description']) if 'tags' in props: self.configstore.set('system.tags', props['tags']) if 'language' in props: self.configstore.set('system.language', props['language']) if 'timezone' in props: new = props['timezone'] old = self.configstore.get('system.timezone') if new != old: count = self.run_subtask_sync('calendar_task.change_timezone', new) self.add_warning(TaskWarning( errno.ENXIO, "{0} calendar tasks rescheduled from timezone '{1}' to '{2}'".format(count, old, new))) self.configstore.set('system.timezone', new) os.environ['TZ'] = new if 'console_keymap' in props: new = props['console_keymap'] old = self.configstore.get('system.console.keymap') if new != old: with open ('/dev/console') as fd: system('/usr/sbin/kbdcontrol', '-l', props['console_keymap'], file_obj_stdin=fd) self.configstore.set('system.console.keymap', new) syslog_changed = False if 'syslog_server' in props: self.configstore.set('system.syslog_server', props['syslog_server']) syslog_changed = True try: self.dispatcher.call_sync('etcd.generation.generate_group', 'localtime') if syslog_changed: self.dispatcher.call_sync('serviced.job.send_signal', 'org.freenas.logd', signal.SIGHUP) except RpcException as e: raise TaskException( errno.ENXIO, 'Cannot reconfigure system: {0}'.format(str(e),) ) self.dispatcher.dispatch_event('system.general.changed', { 'operation': 'update', })
def run(self, pool): self.pool = pool self.dispatcher.register_event_handler("fs.zfs.scrub.finish", self.__scrub_finished) self.dispatcher.register_event_handler("fs.zfs.scrub.abort", self.__scrub_aborted) self.finish_event.clear() try: zfs = libzfs.ZFS() pool = zfs.get(self.pool) pool.start_scrub() self.started = True except libzfs.ZFSException, err: raise TaskException(errno.EFAULT, str(err))
def run(self, pool_name, path, type, params=None): self.check_type(type) try: params = params or {} sparse = False if params.get('sparse'): sparse = True del params['sparse'] zfs = libzfs.ZFS() pool = zfs.get(pool_name) pool.create(path, params, fstype=self.type, sparse_vol=sparse) except libzfs.ZFSException, err: raise TaskException(errno.EFAULT, str(err))
def run(self, pool_name, name, properties): try: zfs = libzfs.ZFS() dataset = zfs.get_dataset(name) for k, v in properties.items(): if k in dataset.properties: if v['value'] is None: dataset.properties[k].inherit() else: dataset.properties[k].value = v['value'] else: prop = libzfs.ZFSUserProperty(v['value']) dataset.properties[k] = prop except libzfs.ZFSException, err: raise TaskException(errno.EFAULT, str(err))
def get_driver_and_check_capabilities(self, id, clones=False, snapshots=False): ds = self.dispatcher.call_sync('vm.datastore.query', [('id', '=', id)], {'single': True}) if not ds: raise RpcException(errno.ENOENT, 'Datastore {0} not found'.format(id)) capabilities = ds['capabilities'] name = ds['name'] if clones and 'clones' not in capabilities: raise TaskException( errno.ENOTSUP, 'Datastore {0} does not support clones'.format(name)) if snapshots and 'snapshots' not in capabilities: raise TaskException( errno.ENOTSUP, 'Datastore {0} does not support snapshots'.format(name)) return ds['type']
def run(self, id): try: group = self.datastore.get_by_id('groups', id) if group is None: raise TaskException(errno.ENOENT, 'Group with given ID does not exist') # Remove group from users for i in self.datastore.query_stream( 'users', ('groups', 'in', group['gid'])): i['groups'].remove(id) self.datastore.update('users', i['id'], i) self.datastore.delete('groups', id) self.dispatcher.call_sync('etcd.generation.generate_group', 'accounts') except DatastoreException as e: raise TaskException(errno.EBADMSG, 'Cannot delete group: {0}'.format(str(e))) self.dispatcher.dispatch_event('group.changed', { 'operation': 'delete', 'ids': [id] })
def run(self, backup): if 'id' in backup and self.datastore.exists('backup', ('id', '=', backup['id'])): raise TaskException( errno.EEXIST, 'Backup with ID {0} already exists'.format(backup['id'])) if self.datastore.exists('backup', ('name', '=', backup['name'])): raise TaskException( errno.EEXIST, 'Backup with name {0} already exists'.format(backup['name'])) normalize(backup, {'properties': {}}) backup['properties'], = self.join_subtasks( self.run_subtask('backup.{0}.init'.format(backup['provider']), backup)) id = self.datastore.insert('backup', backup) self.dispatcher.emit_event('backup.changed', { 'operation': 'create', 'ids': [id] }) return id
def run(self, id): share = self.datastore.get_by_id('shares', id) self.datastore.delete('shares', id) try: smb_conf = smbconf.SambaConfig('registry') smb_conf.transaction_start() try: del smb_conf.shares[share['name']] except BaseException as err: smb_conf.transaction_cancel() raise TaskException(errno.EBUSY, 'Failed to update samba configuration: {0}', err) else: smb_conf.transaction_commit() reload_samba() drop_share_connections(share['name']) except smbconf.SambaConfigException: raise TaskException(errno.EFAULT, 'Cannot access samba registry') self.dispatcher.dispatch_event('share.smb.changed', { 'operation': 'delete', 'ids': [id] })
def run(self, id, updated_fields): ds = self.datastore.get_by_id('vm.datastores', id) if not ds: raise TaskException(errno.ENOENT, 'Datastore {0} not found'.format(id)) self.run_subtask_sync_with_progress('vm.datastore.{0}.update'.format(ds['type']), id, updated_fields) ds.update(updated_fields) self.datastore.update('vm.datastores', id, ds) self.dispatcher.emit_event('vm.datastore.changed', { 'operation': 'update', 'ids': [id] }) return id
def run(self, id, updated_fields): try: certificate = self.datastore.get_by_id('crypto.certificates', id) certificate['certificate'] = updated_fields['certificate'] certificate['type'] = 'CERT_EXISTING' pkey = self.datastore.update('crypto.certificates', id, certificate) self.dispatcher.call_sync('etcd.generation.generate_group', 'crypto') except DatastoreException, e: raise TaskException(errno.EBADMSG, 'Cannot update CSR: {0}'.format(str(e)))
def run(self, id): be = FindClone(id) if not be: raise TaskException(errno.ENOENT, 'Boot environment {0} not found'.format(id)) def doit(): if not DeleteClone(id): raise TaskException(errno.EIO, 'Cannot delete the {0} boot environment'.format(id)) self.dispatcher.exec_and_wait_for_event( 'boot.environment.changed', lambda args: args['operation'] == 'delete' and id in args['ids'], doit, 600 )
def run(self, group): if 'id' not in group: # Need to get next free GID gid = self.dispatcher.call_sync('groups.next_gid') else: gid = group.pop('id') try: group['builtin'] = False self.datastore.insert('groups', group, pkey=gid) self.dispatcher.call_sync('etcd.generation.generate_group', 'accounts') except DatastoreException, e: raise TaskException(errno.EBADMSG, 'Cannot add group: {0}'.format(str(e)))
def run(self, id, updated_fields): try: alertfilter = self.datastore.get_by_id('alert.filters', id) alertfilter.update(updated_fields) self.datastore.update('alert.filters', id, alertfilter) except DatastoreException as e: raise TaskException( errno.EBADMSG, 'Cannot update alert filter: {0}'.format(str(e)) ) self.dispatcher.dispatch_event('alert.filter.changed', { 'operation': 'update', 'ids': [id], })
def run(self, iscsi): try: node = ConfigNode('service.iscsi', self.configstore) self.dispatcher.call_sync('etcd.generation.generate_group', 'services') self.dispatcher.call_sync('etcd.generation.generate_group', 'ctl') self.dispatcher.call_sync('service.apply_state', 'iscsi') self.dispatcher.dispatch_event('service.iscsi.changed', { 'operation': 'updated', 'ids': None, }) except RpcException as e: raise TaskException(errno.ENXIO, 'Cannot reconfigure iSCSI: {0}'.format(str(e)))