def run(self, webdav): node = ConfigNode('service.webdav', self.configstore).__getstate__() node.update(webdav) if node['http_port'] == node['https_port']: raise TaskException(errno.EINVAL, 'HTTP and HTTPS ports cannot be the same') if 'HTTPS' in node['protocol'] and not node['certificate']: raise TaskException(errno.EINVAL, 'SSL protocol specified without choosing a certificate') if node['certificate']: cert = self.dispatcher.call_sync( 'crypto.certificate.query', [('name', '=', node['certificate'])], {'single': True} ) if not cert: raise TaskException(errno.ENOENT, 'SSL Certificate not found.') try: node = ConfigNode('service.webdav', self.configstore) node.update(webdav) self.dispatcher.call_sync('etcd.generation.generate_group', 'services') self.dispatcher.call_sync('etcd.generation.generate_group', 'webdav') self.dispatcher.dispatch_event('service.webdav.changed', { 'operation': 'updated', 'ids': None, }) except RpcException as e: raise TaskException( errno.ENXIO, 'Cannot reconfigure WebDAV: {0}'.format(str(e)) ) return 'RESTART'
def run(self, key_type, key_length): try: if key_type == 'dh-parameters': dhparams = system('/usr/bin/openssl', 'dhparam', str(key_length))[0] self.configstore.set('service.openvpn.dh', dhparams) self.dispatcher.call_sync('etcd.generation.generate_group', 'openvpn') self.dispatcher.dispatch_event('service.openvpn.changed', { 'operation': 'update', 'ids': None, }) else: tls_auth_key = system('/usr/local/sbin/openvpn', '--genkey', '--secret', '/dev/stdout')[0] self.configstore.set('service.openvpn.tls_auth', tls_auth_key) self.dispatcher.call_sync('etcd.generation.generate_group', 'openvpn') self.dispatcher.dispatch_event('service.openvpn.changed', { 'operation': 'update', 'ids': None, }) except RpcException as e: raise TaskException( errno.ENXIO, 'Cannot reconfigure OpenVPN: {0}'.format(str(e))) except SubprocessException as e: raise TaskException( errno.ENOENT, 'Cannont create requested key - check your system setup {0}'. format(e))
def run(self, id): tunable = self.datastore.get_by_id('tunables', id) if tunable is None: raise TaskException(errno.ENOENT, 'Tunable with given ID does not exist') try: self.datastore.delete('tunables', id) self.dispatcher.dispatch_event('tunable.changed', { 'operation': 'delete', 'ids': [id] }) if tunable['type'] == 'LOADER': self.dispatcher.call_sync('etcd.generation.generate_group', 'loader', timeout=600) elif tunable['type'] == 'RC': self.dispatcher.call_sync('etcd.generation.generate_group', 'services') except DatastoreException as e: raise TaskException(errno.EBADMSG, 'Cannot delete Tunable: {0}'.format(str(e))) except RpcException as e: raise TaskException(errno.ENXIO, 'Cannot generate tunable: {0}'.format(str(e)))
def run(self, pool, new_vdevs, updated_vdevs): try: self.pool = pool zfs = libzfs.ZFS() pool = zfs.get(pool) if new_vdevs: nvroot = convert_topology(zfs, new_vdevs) pool.attach_vdevs(nvroot) if updated_vdevs: for i in updated_vdevs: vdev = pool.vdev_by_guid(long(i['target_guid'])) if not vdev: raise TaskException( errno.ENOENT, 'Vdev with GUID {0} not found'.format( i['target_guid'])) new_vdev = libzfs.ZFSVdev(zfs, i['vdev']['type']) new_vdev.path = i['vdev']['path'] vdev.attach(new_vdev) # Wait for resilvering process to complete self.started = True self.dispatcher.test_or_wait_for_event( 'fs.zfs.resilver.finished', lambda args: args['guid'] == str(pool.guid), lambda: pool.scrub.state == libzfs.ScanState.SCANNING and pool.scrub.function == libzfs.ScanFunction.RESILVER) except libzfs.ZFSException, err: raise TaskException(errno.EFAULT, str(err))
def run(self, tunable): if self.datastore.exists('tunables', ('var', '=', tunable['var'])): raise TaskException(errno.EEXIST, 'This variable already exists.') try: if 'enabled' not in tunable: tunable['enabled'] = True if tunable['enabled'] and tunable['type'] == 'SYSCTL': sysctl_set(tunable['var'], tunable['value']) pkey = self.datastore.insert('tunables', tunable) self.dispatcher.dispatch_event('tunable.changed', { 'operation': 'create', 'ids': [pkey] }) if tunable['enabled']: if tunable['type'] == 'LOADER': self.dispatcher.call_sync('etcd.generation.generate_group', 'loader', timeout=3600) elif tunable['type'] == 'RC': self.dispatcher.call_sync('etcd.generation.generate_group', 'services') except DatastoreException as e: raise TaskException(errno.EBADMSG, 'Cannot create Tunable: {0}'.format(str(e))) except RpcException as e: raise TaskException(errno.ENXIO, 'Cannot generate tunable: {0}'.format(str(e))) except OSError as e: raise TaskException(errno.ENXIO, 'Failed to set sysctl: {0}'.format(str(e))) return pkey
def run(self, id): interface = self.datastore.get_by_id('network.interfaces', id) if not interface: raise TaskException(errno.ENOENT, 'Interface {0} does not exist'.format(id)) if not interface['enabled']: raise TaskException(errno.ENXIO, 'Interface {0} is disabled'.format(id)) if not interface['dhcp']: raise TaskException( errno.EINVAL, 'Cannot renew a lease on interface that is not configured for DHCP' ) try: self.dispatcher.call_sync('networkd.configuration.renew_lease', id) except RpcException as err: raise TaskException(err.code, err.message, err.extra) self.dispatcher.dispatch_event('network.interface.changed', { 'operation': 'update', 'ids': [id] })
def run(self, id, delete_dataset=False): share = self.datastore.get_by_id('shares', id) target_type = share['target_type'] dataset = None if target_type == 'DATASET' or target_type == 'ZVOL': dataset = share['target_path'] elif delete_dataset: raise TaskException(errno.EINVAL, 'Cannot delete dataset for non-dataset share') if not share: raise TaskException(errno.ENOENT, 'Share not found') path = self.dispatcher.call_sync('share.get_directory_path', share['id']) try: delete_config( path, '{0}-{1}'.format(share['type'], share['name']) ) except OSError: pass self.run_subtask_sync('share.{0}.delete'.format(share['type']), id) self.dispatcher.dispatch_event('share.changed', { 'operation': 'delete', 'ids': [id] }) if dataset and delete_dataset: self.run_subtask_sync('volume.dataset.delete', dataset)
def run(self, ftp): if ftp.get('filemask'): ftp['filemask'] = get_integer(ftp['filemask']) if ftp.get('dirmask'): ftp['dirmask'] = get_integer(ftp['dirmask']) if ftp.get('anonymous_path'): if not os.path.exists(ftp['anonymous_path']): raise TaskException( errno.ENOENT, 'Directory {0} does not exists'.format( ftp['anonymous_path'])) try: node = ConfigNode('service.ftp', self.configstore) node.update(ftp) self.dispatcher.call_sync('etcd.generation.generate_group', 'ftp') self.dispatcher.dispatch_event('service.ftp.changed', { 'operation': 'updated', 'ids': None, }) except RpcException as e: raise TaskException(errno.ENXIO, 'Cannot reconfigure FTP: {0}'.format(str(e))) return 'RESTART'
def run(self, ipfs): try: node = ConfigNode('service.ipfs', self.configstore) old_path = node['path'].value if 'path' in ipfs and ipfs['path'] != old_path: if not os.path.exists(ipfs['path']): os.makedirs(ipfs['path']) # jkh says that the ipfs path should be owned by root os.chown(ipfs['path'], 0, 0) # Only move the contents and not the entire folder # there could be other stuff in that folder # (a careless user might have merged this with his other files) # also this folder could be a dataset in which case a simple move will fail # so lets just move the internal contents of this folder over if old_path is not None and os.path.exists(old_path): try: for item in os.listdir(old_path): shutil.move(old_path + '/' + item, ipfs['path']) except shutil.Error as serr: raise TaskException( errno.EIO, "Migrating ipfs path resulted in error: {0}".format(serr)) node.update(ipfs) self.dispatcher.call_sync('etcd.generation.generate_group', 'ipfs') self.dispatcher.dispatch_event('service.ipfs.changed', { 'operation': 'updated', 'ids': None, }) except RpcException as e: raise TaskException( errno.ENXIO, 'Cannot reconfigure IPFS: {0}'.format(str(e)) ) return 'RESTART'
def run(self, id, updated_fields): if 'name' in updated_fields: if self.datastore.exists('groups', ('name', '=', updated_fields['name']), ('id', '!=', id)): raise TaskException( errno.EEXIST, 'Group {0} already exists'.format(updated_fields['name']) ) try: normalize_name(updated_fields, 'name') group = self.datastore.get_by_id('groups', id) if group is None: raise TaskException(errno.ENOENT, 'Group {0} does not exist'.format(id)) group.update(updated_fields) self.datastore.update('groups', id, group) self.dispatcher.call_sync('etcd.generation.generate_group', 'accounts') except DatastoreException as e: raise TaskException(errno.EBADMSG, 'Cannot update group: {0}'.format(str(e))) except RpcException as e: raise TaskException(e.code, 'Cannot regenerate groups file: {0}'.format(e.message)) self.dispatcher.dispatch_event('group.changed', { 'operation': 'update', 'ids': [id] })
def run(self, name): be = FindClone(name) if not be: raise TaskException(errno.ENOENT, 'Boot environment {0} not found'.format(name)) if not ActivateClone(name): raise TaskException(errno.EIO, 'Cannot activate the {0} boot environment'.format(name))
def run(self, id, updated_params): directory = self.datastore.get_by_id('directories', id) old_name = None if directory['immutable']: raise TaskException( errno.EPERM, 'Directory {0} is immutable'.format(directory['name'])) if 'name' in updated_params: old_name = directory['name'] if self.datastore.exists('directories', ('name', '=', updated_params['name'])): raise TaskException( errno.EEXIST, 'Directory {0} already exists'.format(directory['name'])) directory.update(updated_params) self.datastore.update('directories', id, directory) self.dispatcher.call_sync('dscached.management.configure_directory', id) self.dispatcher.dispatch_event('directory.changed', { 'operation': 'update', 'ids': [id] }) if old_name: node = ConfigNode('directory', self.configstore) search_order = node['search_order'].value if old_name in search_order: search_order.remove(old_name) search_order.append(directory['name']) node['search_order'] = search_order self.dispatcher.call_sync('dscached.management.reload_config')
def run(self, dataset): self.join_subtasks(self.run_subtask('volume.snapshot.create', { 'dataset': dataset, 'name': 'org.freenas.indexer:now', 'hidden': True })) prev = self.dispatcher.call_sync( 'volume.snapshot.query', [ ('dataset', '=', dataset), ('name', '=', 'org.freenas.indexer:ref') ], {'single': True} ) if not prev: raise TaskException(errno.ENOENT, 'Reference snapshot not found') zfs = libzfs.ZFS() ds = zfs.get_dataset(dataset) if not ds: raise TaskException(errno.ENOENT, 'Dataset {0} not found'.format(dataset)) for rec in ds.diff('{0}@org.freenas.indexer:ref'.format(dataset), '{0}@org.freenas.indexer:now'.format(dataset)): collect(self.datastore, rec.path) self.join_subtasks(self.run_subtask('volume.snapshot.delete', '{0}@org.freenas.indexer:ref'.format(dataset))) self.join_subtasks(self.run_subtask('volume.snapshot.update', '{0}@org.freenas.indexer:now'.format(dataset), { 'name': 'org.freenas.indexer:ref', }))
def open_ssh_connection(dispatcher, backup): peer = dispatcher.call_sync('peer.query', [('id', '=', backup['peer'])], {'single': True}) if not peer: raise TaskException(errno.ENOENT, 'Cannot find peer {0}'.format(backup['peer'])) if peer['type'] != 'ssh': raise TaskException(errno.EINVAL, 'Invalid peer type: {0}'.format(peer['type'])) creds = peer['credentials'] try: session = transport.Transport(creds['address'], creds.get('port', 22)) session.window_size = 1024 * 1024 * 1024 session.packetizer.REKEY_BYTES = pow(2, 48) session.packetizer.REKEY_PACKETS = pow(2, 48) session.start_client() if creds.get('privkey'): if try_key_auth(session, creds): return session else: raise Exception('Cannot authenticate using keys') session.auth_password(creds['username'], creds['password']) return session except socket.gaierror as err: raise Exception('Connection error: {0}'.format(err.strerror)) except ssh_exception.BadAuthenticationType as err: raise Exception('Cannot authenticate: {0}'.format(str(err)))
def run(self, ssh): config = self.dispatcher.call_sync('service.query', [('name', '=', 'sshd')], { 'single': True, 'select': 'config' }) port = ssh.get('port') if port and port != config['port'] and is_port_open(port): raise TaskException( errno.EBUSY, 'Port number : {0} is already in use'.format(port)) try: node = ConfigNode('service.sshd', self.configstore) node.update(ssh) self.dispatcher.call_sync('etcd.generation.generate_group', 'sshd') self.dispatcher.dispatch_event('service.sshd.changed', { 'operation': 'updated', 'ids': None, }) except RpcException as e: raise TaskException(errno.ENXIO, 'Cannot reconfigure SSH: {0}'.format(str(e))) return 'RELOAD'
def run(self, id, updated_fields): alertfilter = self.datastore.get_by_id('alert.filters', id) order = self.configstore.get('alert.filter.order') if not alertfilter: raise RpcException(errno.ENOENT, 'Alert filter doesn\'t exist') if 'id' in updated_fields and updated_fields['id'] != alertfilter['id']: raise TaskException(errno.EINVAL, 'Cannot change alert filter id') try: if 'index' in updated_fields: index = updated_fields.pop('index') order.remove(id) order.insert(index, id) self.configstore.set('alert.filter.order', order) alertfilter.update(updated_fields) self.datastore.update('alert.filters', id, alertfilter) except DatastoreException as e: raise TaskException( errno.EBADMSG, 'Cannot update alert filter: {0}'.format(str(e)) ) self.dispatcher.dispatch_event('alert.filter.changed', { 'operation': 'update', 'ids': order, })
def run(self, id, updated_params): if not self.datastore.exists('iscsi.targets', ('id', '=', id)): raise TaskException(errno.ENOENT, 'Target {0} does not exist'.format(id)) if 'id' in updated_params: updated_params['id'] = updated_params['id'].lower() if 'extents' in updated_params: seen_numbers = [] for i in updated_params['extents']: if not self.datastore.exists('shares', ('type', '=', 'iscsi'), ('name', '=', i['name'])): raise TaskException(errno.ENOENT, "Share {0} not found".format(i['name'])) if i['number'] in seen_numbers: raise TaskException(errno.EEXIST, "LUN number {0} used twice".format(i['number'])) seen_numbers.append(i['number']) target = self.datastore.get_by_id('iscsi.targets', id) target.update(updated_params) self.datastore.update('iscsi.targets', id, target) self.dispatcher.call_sync('etcd.generation.generate_group', 'ctl') self.dispatcher.call_sync('service.reload', 'ctl') self.dispatcher.dispatch_event('iscsi.target.changed', { 'operation': 'update', 'ids': [id] })
def run(self, dc): self.set_progress(0, 'Checking Domain Controller service state') node = ConfigNode('service.dc', self.configstore).__getstate__() node.update(dc) if not node.get('volume'): raise TaskException( errno.ENXIO, 'Domain controller service is hosted by the virtual machine.' 'Please provide the valid zfs pool name for the virtual machine volume creation.' ) else: try: self.dispatcher.call_sync( 'service.dc.check_dc_vm_availability') except RpcException: dc['vm_id'] = self.run_subtask_sync( 'vm.create', { 'name': 'zentyal_domain_controller', 'template': { 'name': 'zentyal-4.2' }, 'target': node['volume'], 'config': { 'autostart': True } }, progress_callback=lambda p, m, e=None: self.chunk_progress( 5, 100, 'Creating Domain Controller virtual machine: ', p, m, e)) finally: vm_config = self.dispatcher.call_sync( 'vm.query', [('id', '=', dc.get('vm_id', node['vm_id']))], { 'select': 'config', 'single': True }) if not node['enable'] and vm_config['autostart']: vm_config['autostart'] = False elif node['enable'] and not vm_config['autostart']: vm_config['autostart'] = True self.run_subtask_sync( 'vm.update', dc['vm_id'] if dc.get('vm_id') else node['vm_id'], {'config': vm_config}) try: node = ConfigNode('service.dc', self.configstore) node.update(dc) self.dispatcher.dispatch_event('service.dc.changed', { 'operation': 'update', 'ids': None, }) except RpcException as e: raise TaskException( errno.ENXIO, 'Cannot reconfigure DC vm service: {0}'.format(str(e)))
def run(self, nfs): config = self.dispatcher.call_sync('service.query', [('name', '=', 'nfs')], {'single': True})['config'] for n in ('mountd_port', 'rpcstatd_port', 'rpclockd_port'): port = nfs.get(n) if port and port != config[n] and is_port_open(port, 'inet'): raise TaskException( errno.EBUSY, 'Port number : {0} is already in use'.format(port)) try: node = ConfigNode('service.nfs', self.configstore) node.update(nfs) self.dispatcher.call_sync('etcd.generation.generate_group', 'services') self.dispatcher.call_sync('etcd.generation.generate_group', 'nfs') self.dispatcher.dispatch_event('service.nfs.changed', { 'operation': 'updated', 'ids': None, }) except RpcException as e: raise TaskException(errno.ENXIO, 'Cannot reconfigure NFS: {0}'.format(str(e))) return 'RESTART'
def run(self, rsyncd): config = ConfigNode('service.rsyncd', self.configstore).__getstate__() if rsyncd.get('port') and is_port_open(rsyncd['port']): service_state = self.dispatcher.call_sync( 'service.query', [('name', '=', 'rsyncd')], {'single': True, 'select': 'state'} ) if not (service_state == "RUNNING" and rsyncd['port'] == config['port']): raise TaskException(errno.EINVAL, 'Provided port is already in use') try: node = ConfigNode('service.rsyncd', self.configstore) node.update(rsyncd) self.dispatcher.call_sync('etcd.generation.generate_group', 'rsyncd') self.dispatcher.dispatch_event('service.rsyncd.changed', { 'operation': 'updated', 'ids': None, }) except RpcException as e: raise TaskException( errno.ENXIO, 'Cannot reconfigure Rsyncd: {0}'.format(str(e)) )
def run(self, uuid, updated_fields): rsyncmod = self.datastore.get_by_id('rsyncd-module', uuid) if rsyncmod is None: raise TaskException(errno.ENOENT, 'Rsync module {0} does not exist'.format(uuid)) rsyncmod.update(updated_fields) if re.search(r'[/\]]', rsyncmod['name']): raise TaskException(errno.EINVAL, 'The name cannot contain slash or a closing square bracket.') try: self.datastore.update('rsyncd-module', uuid, rsyncmod) self.dispatcher.call_sync('etcd.generation.generate_group', 'rsyncd') self.dispatcher.call_sync('service.restart', 'rsyncd') except DatastoreException as e: raise TaskException(errno.EBADMSG, 'Cannot update rsync module: {0}'.format(str(e))) except RpcException as e: raise TaskException(errno.ENXIO, 'Cannot regenerate rsyncd {0}'.format(str(e))) self.dispatcher.dispatch_event('rsyncd.module.changed', { 'operation': 'update', 'ids': [uuid] })
def run(self, config_path, name, type): try: share = load_config(config_path, f'{type}-{name}', version=CONFIG_VERSION) except FileNotFoundError: raise VerifyException( errno.ENOENT, f'There is no share {name} of type {type} at {config_path} to be imported.' ) except ValueError as err: raise VerifyException(errno.EINVAL, f'Cannot read configuration file: {err}') if share['type'] != type: raise VerifyException( errno.EINVAL, f'Share type {type} does not match configuration file entry type {share["type"]}' ) if not self.dispatcher.call_sync('share.supported_types').get(share['type']): raise TaskException(errno.ENXIO, f'Unknown sharing type {share["type"]}') if self.datastore.exists( 'shares', ('type', '=', share['type']), ('name', '=', share['name']) ): raise TaskException(errno.EEXIST, 'Share {share["name"]} of type {share["type"]} already exists') id = self.run_subtask_sync(f'share.{share["type"]}.import', share) self.dispatcher.dispatch_event('share.changed', { 'operation': 'create', 'ids': [id] }) return id
def run(self, afp): paths = [PosixPath(afp.get(y)) if afp.get(y) else None for y in ('dbpath', 'homedir_path')] for p in paths: if p and not p.exists(): raise TaskException(errno.ENOENT, 'Path : {0} does not exist'.format(p.as_posix())) if p and not p.is_dir(): raise TaskException(errno.ENOTDIR, 'Path : {0} is not a directory'.format(p.as_posix())) if afp.get('guest_user'): if not self.dispatcher.call_sync('user.query', [('username', '=', afp['guest_user'])], {'single': True}): raise TaskException(errno.EINVAL, 'User: {0} does not exist'.format(afp['guest_user'])) try: node = ConfigNode('service.afp', self.configstore) node.update(afp) self.dispatcher.call_sync('etcd.generation.generate_group', 'services') self.dispatcher.call_sync('etcd.generation.generate_group', 'afp') self.dispatcher.dispatch_event('service.afp.changed', { 'operation': 'updated', 'ids': None, }) except RpcException as e: raise TaskException( errno.ENXIO, 'Cannot reconfigure AFP: {0}'.format(str(e)) ) return 'RELOAD'
def run(self, id): share = self.datastore.get_by_id('shares', id) self.datastore.delete('shares', id) try: smb_conf = smbconf.SambaConfig('registry') smb_conf.transaction_start() try: del smb_conf.shares[share['name']] except BaseException as err: smb_conf.transaction_cancel() raise TaskException( errno.EBUSY, 'Failed to update samba configuration: {0}', err) else: smb_conf.transaction_commit() reload_samba() drop_share_connections(share['name']) except smbconf.SambaConfigException: raise TaskException(errno.EFAULT, 'Cannot access samba registry') self.dispatcher.dispatch_event('share.smb.changed', { 'operation': 'delete', 'ids': [id] })
def run(self, id, delete_params=None): subtasks = [] try: user = self.datastore.get_by_id('users', id) if user is None: raise TaskException( errno.ENOENT, 'User with UID {0} does not exist'.format(id)) if (delete_params and delete_params.get('delete_home_directory') and user['home'] not in (None, '/nonexistent') and os.path.exists(user['home'])): homedir_dataset = self.dispatcher.call_sync( 'volume.dataset.query', [('mountpoint', '=', user['home'])], {'single': True}) if homedir_dataset: subtasks.append( self.run_subtask('volume.dataset.delete', homedir_dataset['id'])) elif user['home'] not in (None, '/nonexistent') and os.path.exists( user['home']): self.add_warning( TaskWarning( errno.EBUSY, 'Home directory {} left behind, you need to delete it separately' .format(user['home']))) group = self.datastore.get_by_id('groups', user['group']) if group and user['uid'] == group['gid']: if delete_params and delete_params.get('delete_own_group'): subtasks.append( self.run_subtask('group.delete', user['group'])) else: self.add_warning( TaskWarning( errno.EBUSY, 'Group {0} ({1}) left behind, you need to delete it separately' .format(group['name'], group['gid']))) self.join_subtasks(*subtasks) if user.get('smbhash'): try: system('/usr/local/bin/pdbedit', '-x', user['username']) except SubprocessException as e: pass self.datastore.delete('users', id) self.dispatcher.call_sync('etcd.generation.generate_group', 'accounts') except DatastoreException as e: raise TaskException(errno.EBADMSG, 'Cannot delete user: {0}'.format(str(e))) self.dispatcher.dispatch_event('user.changed', { 'operation': 'delete', 'ids': [id] })
def run(self, peer, initial_credentials): if 'name' not in peer: raise TaskException(errno.EINVAL, 'Name has to be specified') if self.datastore.exists('peers', ('name', '=', peer['name'])): raise TaskException(errno.EINVAL, 'Peer entry {0} already exists'.format(peer['name'])) return self.datastore.insert('peers', peer)
def run(self, props): webui_protocol = props.get('webui_protocol', []) if webui_protocol: self.configstore.set( 'service.nginx.http.enable', True if 'HTTP' in webui_protocol else False, ) self.configstore.set( 'service.nginx.https.enable', True if 'HTTPS' in webui_protocol else False, ) if 'webui_listen' in props: self.configstore.set('service.nginx.listen', props['webui_listen']) if 'webui_http_port' in props: self.configstore.set('service.nginx.http.port', props['webui_http_port']) if 'webui_http_redirect_https' in props: self.configstore.set('service.nginx.http.redirect_https', props['webui_http_redirect_https']) if 'webui_https_certificate' in props: if not self.dispatcher.call_sync('crypto.certificate.query', [ ('type', '!=', 'CERT_CSR'), ('id', '=', props['webui_https_certificate']) ], {'count': True}): raise TaskException( errno.ENOENT, 'Certificate id : {0} does not exist'.format( props['webui_https_certificate'])) else: self.configstore.set('service.nginx.https.certificate', props['webui_https_certificate']) if 'webui_https_port' in props: self.configstore.set('service.nginx.https.port', props['webui_https_port']) if self.configstore.get('service.nginx.https.enable', False) and not self.configstore.get( 'service.nginx.https.certificate', False): raise TaskException( errno.EINVAL, 'HTTPS protocol specified for UI without certificate') try: self.dispatcher.call_sync('etcd.generation.generate_group', 'nginx') self.dispatcher.call_sync('service.reload', 'nginx') except RpcException as e: raise TaskException( errno.ENXIO, 'Cannot reconfigure system UI: {0}'.format(str(e))) self.dispatcher.dispatch_event('system.ui.changed', { 'operation': 'update', 'ids': ['system.ui'], })
def run(self, share): normalize(share['properties'], { 'read_only': False, 'guest_ok': False, 'guest_only': False, 'browseable': True, 'recyclebin': False, 'show_hidden_files': False, 'previous_versions': True, 'vfs_objects': [], 'hosts_allow': [], 'hosts_deny': [], 'users_allow': [], 'users_deny': [], 'groups_allow': [], 'groups_deny': [], 'full_audit_prefix': '%u|%I|%m|%S', 'full_audit_priority': 'notice', 'full_audit_failure': 'connect', 'full_audit_success': 'open mkdir unlink rmdir rename', 'case_sensitive': 'AUTO', 'allocation_roundup_size': 1048576, 'ea_support': True, 'store_dos_attributes': True, 'map_archive': True, 'map_hidden': False, 'map_readonly': True, 'map_system': False, 'fruit_metadata': 'STREAM' }) id = self.datastore.insert('shares', share) path = self.dispatcher.call_sync('share.translate_path', id) try: smb_conf = smbconf.SambaConfig('registry') smb_conf.transaction_start() try: smb_share = smbconf.SambaShare() convert_share(self.dispatcher, smb_share, path, share['enabled'], share['properties']) smb_conf.shares[share['name']] = smb_share except BaseException as err: smb_conf.transaction_cancel() raise TaskException(errno.EBUSY, 'Failed to update samba configuration: {0}', err) else: smb_conf.transaction_commit() reload_samba() except smbconf.SambaConfigException: raise TaskException(errno.EFAULT, 'Cannot access samba registry') self.dispatcher.dispatch_event('share.smb.changed', { 'operation': 'create', 'ids': [id] }) return id
def doit(): try: if not CreateClone(newname, bename=source): raise TaskException( errno.EIO, f'Cannot create the {newname} boot environment') except KeyError: raise TaskException( errno.EEXIST, f'Boot environment {newname} already exists')
def run(self, name, updated_fields): if not self.datastore.exists('network.routes', ('id', '=', name)): raise TaskException(errno.ENOENT, 'Route {0} does not exist'.format(name)) route = self.datastore.get_one('network.routes', ('id', '=', name)) net = updated_fields[ 'network'] if 'network' in updated_fields else route['network'] netmask = updated_fields[ 'netmask'] if 'netmask' in updated_fields else route['netmask'] type = updated_fields['type'] if 'type' in updated_fields else route[ 'type'] gateway = updated_fields[ 'gateway'] if 'gateway' in updated_fields else route['gateway'] if type == 'INET': max_cidr = 32 else: max_cidr = 128 if not (0 <= netmask <= max_cidr): raise TaskException( errno.EINVAL, 'Netmask value {0} is not valid. Allowed values are 0-{1} (CIDR).' .format(route['netmask'], max_cidr)) try: ipaddress.ip_network(os.path.join(net, str(netmask))) except ValueError: raise TaskException( errno.EINVAL, '{0} would have host bits set. Change network or netmask to represent a valid network' .format(os.path.join(net, str(netmask)))) network = ipaddress.ip_network(os.path.join(net, str(netmask))) if ipaddress.ip_address(gateway) in network: self.add_warning( TaskWarning( errno.EINVAL, 'Gateway {0} is in the destination network {1}.'.format( gateway, network.exploded))) route = self.datastore.get_one('network.routes', ('id', '=', name)) route.update(updated_fields) self.datastore.update('network.routes', name, route) try: for code, message in self.dispatcher.call_sync( 'networkd.configuration.configure_routes'): self.add_warning(TaskWarning(code, message)) except RpcException as e: raise TaskException( errno.ENXIO, 'Cannot reconfigure routes: {0}'.format(str(e))) self.dispatcher.dispatch_event('network.route.changed', { 'operation': 'update', 'ids': [route['id']] })