def run(self, id): try: user = self.datastore.get_by_id('users', id) group = self.datastore.get_by_id('groups', user['group']) if group and user['uid'] == group['gid']: self.add_warning(TaskWarning( errno.EBUSY, 'Group {0} ({1}) left behind, you need to delete it separately'.format(group['name'], group['gid'])) ) if user.get('smbhash'): try: system('/usr/local/bin/pdbedit', '-x', user['username']) except SubprocessException as e: pass self.datastore.delete('users', id) self.dispatcher.call_sync('etcd.generation.generate_group', 'accounts') except DatastoreException as e: raise TaskException(errno.EBADMSG, 'Cannot delete user: {0}'.format(str(e))) self.dispatcher.dispatch_event('user.changed', { 'operation': 'delete', 'ids': [id] })
def run(self, key_type, key_length): try: if key_type == 'dh-parameters': dhparams = system('/usr/bin/openssl', 'dhparam', str(key_length))[0] self.configstore.set('service.openvpn.dh', dhparams) self.dispatcher.call_sync('etcd.generation.generate_group', 'openvpn') self.dispatcher.dispatch_event('service.openvpn.changed', { 'operation': 'update', 'ids': None, }) else: tls_auth_key = system('/usr/local/sbin/openvpn', '--genkey', '--secret', '/dev/stdout')[0] self.configstore.set('service.openvpn.tls_auth',tls_auth_key) self.dispatcher.call_sync('etcd.generation.generate_group', 'openvpn') self.dispatcher.dispatch_event('service.openvpn.changed', { 'operation': 'update', 'ids': None, }) except RpcException as e: raise TaskException(errno.ENXIO, 'Cannot reconfigure OpenVPN: {0}'.format(str(e))) except SubprocessException as e: raise TaskException(errno.ENOENT, 'Cannont create requested key - check your system setup {0}'.format(e))
def restart(self, service): svc = self.datastore.get_one('service_definitions', ('name', '=', service)) status = self.query([('name', '=', service)], {'single': True}) if not svc: raise RpcException(errno.ENOENT, 'Service {0} not found'.format(service)) if status['state'] != 'RUNNING': return hook_rpc = svc.get('restart_rpc') if hook_rpc: try: self.dispatcher.call_sync(hook_rpc) except RpcException: pass return rc_scripts = svc['rcng']['rc-scripts'] try: if type(rc_scripts) is str: system("/usr/sbin/service", rc_scripts, 'onerestart') if type(rc_scripts) is list: for i in rc_scripts: system("/usr/sbin/service", i, 'onerestart') except SubprocessException: pass
def run(self, name, action): service = self.datastore.get_one('service_definitions', ('name', '=', name)) hook_rpc = service.get('{0}_rpc'.format(action)) if hook_rpc: try: return self.dispatcher.call_sync(hook_rpc) except RpcException as e: raise TaskException(errno.EBUSY, 'Hook {0} for {1} failed: {2}'.format( action, name, e )) rc_scripts = service['rcng'].get('rc-scripts') reload_scripts = service['rcng'].get('reload', rc_scripts) try: if type(rc_scripts) is str: system("/usr/sbin/service", rc_scripts, 'one' + action) if type(rc_scripts) is list: for i in rc_scripts: if action == 'reload' and i not in reload_scripts: continue system("/usr/sbin/service", i, 'one' + action) except SubprocessException as e: raise TaskException(errno.EBUSY, e.err) self.dispatcher.dispatch_event('services.changed', { 'operation': 'update', 'ids': [service['id']] })
def reload(self, service): svc = self.datastore.get_one('service_definitions', ('name', '=', service)) status = self.query([('name', '=', service)], {'single': True}) if not svc: raise RpcException(errno.ENOENT, 'Service {0} not found'.format(service)) rc_scripts = svc['rcng']['rc-scripts'] reload_scripts = svc['rcng'].get('reload', rc_scripts) if status['state'] != 'RUNNING': return if type(rc_scripts) is str: try: system("/usr/sbin/service", rc_scripts, 'onereload') except SubprocessException: pass if type(rc_scripts) is list: for i in rc_scripts: if i not in reload_scripts: continue try: system("/usr/sbin/service", i, 'onereload') except SubprocessException: pass
def ups_signal(kwargs): ups = dispatcher.call_sync('service.ups.get_config') name = kwargs.get('name') notifytype = kwargs.get('type') if name == 'ONBATT': if ups['shutdown_mode'] == 'BATT': logger.warn('Issuing shutdown from UPS signal') system('/usr/local/sbin/upsmon', '-c', 'fsd') elif name in ('EMAIL', 'COMMBAD', 'COMMOK'): if ups['email_notify'] and ups['email_recipients']: subject = ups['email_subject'].replace('%d', time.asctime()).replace('%h', socket.gethostname()) dispatcher.call_sync('mail.send', { 'to': ups['email_recipients'], 'subject': subject, 'message': 'UPS Status: {0}\n'.format( notifytype, ), }) else: logger.debug('Email not configured for UPS') else: logger.info('Unhandled UPS Signal: %s', name)
def run(self, key_type, key_length): try: if key_type == 'dh-parameters': dhparams = system('/usr/bin/openssl', 'dhparam', str(key_length))[0] self.configstore.set('service.openvpn.dh', dhparams) self.dispatcher.call_sync('etcd.generation.generate_group', 'openvpn') self.dispatcher.dispatch_event('service.openvpn.changed', { 'operation': 'update', 'ids': None, }) else: tls_auth_key = system('/usr/local/sbin/openvpn', '--genkey', '--secret', '/dev/stdout')[0] self.configstore.set('service.openvpn.tls_auth', tls_auth_key) self.dispatcher.call_sync('etcd.generation.generate_group', 'openvpn') self.dispatcher.dispatch_event('service.openvpn.changed', { 'operation': 'update', 'ids': None, }) except RpcException as e: raise TaskException(errno.ENXIO, 'Cannot reconfigure OpenVPN: {0}'.format(str(e))) except SubprocessException as e: raise TaskException(errno.ENOENT, 'Cannont create requested key - check your system setup {0}'.format(e))
def find_dumps(dispatcher): logger.warning('Finding and saving crash dumps') for disk in get_available_disks(dispatcher): try: system('/sbin/savecore', '/data/crash', disk + 'p1') except SubprocessException: continue
def run(self, disk, erase_method=None): diskinfo = self.dispatcher.call_sync("disks.get_disk_config", disk) try: system('/sbin/zpool', 'labelclear', '-f', disk) if diskinfo.get('partitions'): system('/sbin/gpart', 'destroy', '-F', disk) except SubprocessException as err: raise TaskException(errno.EFAULT, 'Cannot erase disk: {0}'.format(err.err)) if not erase_method: erase_method = 'QUICK' zeros = b'\0' * (1024 * 1024) fd = os.open(disk, os.O_WRONLY) if erase_method == 'QUICK': os.write(fd, zeros) os.lseek(fd, diskinfo['mediasize'] - len(zeros), os.SEEK_SET) os.write(fd, zeros) if erase_method in ('ZEROS', 'RANDOM'): self.mediasize = diskinfo['mediasize'] self.remaining = self.mediasize self.started = True while self.remaining > 0: garbage = zeros if erase_method == 'ZEROS' else os.urandom(1024 * 1024) amount = min(len(garbage), self.remaining) os.write(fd, garbage[:amount]) self.remaining -= amount os.close(fd)
def verify(self, id, updated_fields, force=False): ntp = self.datastore.get_by_id('ntpservers', id) if ntp is None: raise VerifyException(errno.ENOENT, 'NTP Server with given ID does not exists') errors = [] try: if 'address' in updated_fields: system('ntpdate', '-q', updated_fields['address']) except SubprocessException: if not force: errors.append(( 'address', errno.EINVAL, 'Server could not be reached. Check "Force" to continue regardless.')) minpoll = updated_fields.get('minpoll', ntp.get('minpoll')) maxpoll = updated_fields.get('maxpoll', ntp.get('maxpoll')) if minpoll is not None and maxpoll is not None and not maxpoll > minpoll: errors.append(('maxpoll', errno.EINVAL, 'Max Poll should be higher than Min Poll')) if errors: raise ValidationException(errors) return ['system']
def run(self, id, delete_params=None): subtasks = [] try: user = self.datastore.get_by_id('users', id) if user is None: raise TaskException( errno.ENOENT, 'User with UID {0} does not exist'.format(id)) if (delete_params and delete_params.get('delete_home_directory') and user['home'] not in (None, '/nonexistent') and os.path.exists(user['home'])): homedir_dataset = self.dispatcher.call_sync( 'volume.dataset.query', [('mountpoint', '=', user['home'])], {'single': True}) if homedir_dataset: subtasks.append( self.run_subtask('volume.dataset.delete', homedir_dataset['id'])) elif user['home'] not in (None, '/nonexistent') and os.path.exists( user['home']): self.add_warning( TaskWarning( errno.EBUSY, 'Home directory {} left behind, you need to delete it separately' .format(user['home']))) group = self.datastore.get_by_id('groups', user['group']) if group and user['uid'] == group['gid']: if delete_params and delete_params.get('delete_own_group'): subtasks.append( self.run_subtask('group.delete', user['group'])) else: self.add_warning( TaskWarning( errno.EBUSY, 'Group {0} ({1}) left behind, you need to delete it separately' .format(group['name'], group['gid']))) self.join_subtasks(*subtasks) if user.get('smbhash'): try: system('/usr/local/bin/pdbedit', '-x', user['username']) except SubprocessException as e: pass self.datastore.delete('users', id) self.dispatcher.call_sync('etcd.generation.generate_group', 'accounts') except DatastoreException as e: raise TaskException(errno.EBADMSG, 'Cannot delete user: {0}'.format(str(e))) self.dispatcher.dispatch_event('user.changed', { 'operation': 'delete', 'ids': [id] })
def create_swap(dispatcher, disks): disks = [x for x in [get_swap_partition(dispatcher, x) for x in disks] if x is not None] for idx, pair in enumerate(zip(*[iter(disks)] * 2)): name = 'swap{0}'.format(idx) disk_a, disk_b = pair logger.info('Creating swap partition %s from disks: %s, %s', name, disk_a, disk_b) system('/sbin/gmirror', 'label', '-b', 'prefer', name, disk_a, disk_b) system('/sbin/swapon', '/dev/mirror/{0}'.format(name))
def run(self, disk): try: disk = os.path.join('/dev', disk) system('/usr/local/sbin/grub-install', "--modules='zfs part_gpt'", disk) except SubprocessException, err: raise TaskException(errno.EFAULT, 'Cannot install GRUB: {0}'.format(err.err))
def run(self, disk, erase_method=None): try: system('/sbin/zpool', 'labelclear', '-f', disk) generate_disk_cache(self.dispatcher, disk) if self.dispatcher.call_sync("disks.get_disk_config", disk)['partitions']: system('/sbin/gpart', 'destroy', '-F', disk) except SubprocessException, err: raise TaskException(errno.EFAULT, 'Cannot erase disk: {0}'.format(err.err))
def run(self, uid, updated_fields): try: user = self.datastore.get_by_id('users', uid) home_before = user.get('home') user.update(updated_fields) password = user.pop('password', None) if password: user['unixhash'] = crypted_password(password) self.datastore.update('users', uid, user) self.dispatcher.call_sync('etcd.generation.generate_group', 'accounts') if password: system( '/usr/local/bin/smbpasswd', '-D', '0', '-s', '-a', user['username'], stdin='{0}\n{1}\n'.format(password, password).encode('utf8')) user['smbhash'] = system('/usr/local/bin/pdbedit', '-d', '0', '-w', user['username'])[0] self.datastore.update('users', uid, user) except SubprocessException as e: raise TaskException( errno.ENXIO, 'Could not generate samba password. stdout: {0}\nstderr: {1}'.format(e.out, e.err)) except DatastoreException as e: raise TaskException(errno.EBADMSG, 'Cannot update user: {0}'.format(str(e))) except RpcException as e: raise TaskException(errno.ENXIO, 'Cannot regenerate users file, etcd service is offline') volumes_root = self.dispatcher.call_sync('volumes.get_volumes_root') if user['home'].startswith(volumes_root): if not os.path.exists(user['home']): try: self.dispatcher.call_sync('volumes.decode_path', user['home']) except RpcException as err: raise TaskException(err.code, err.message) if (home_before != '/nonexistent' and home_before != user['home'] and os.path.exists(home_before)): system('mv', home_before, user['home']) else: os.makedirs(user['home']) os.chown(user['home'], uid, user['group']) os.chmod(user['home'], 0o755) elif user['home'] != home_before: os.chown(user['home'], uid, user['group']) os.chmod(user['home'], 0o755) elif not user['builtin'] and user['home'] not in (None, '/nonexistent'): raise TaskException( errno.ENOENT, "Invalid mountpoint specified for home directory: {0}.".format(user['home']) + " Use '{0}' instead as the mountpoint".format(volumes_root) ) self.dispatcher.dispatch_event('users.changed', { 'operation': 'update', 'ids': [uid] })
def create_swap(dispatcher, disks): disks = [x for x in [get_swap_partition(dispatcher, x) for x in disks] if x is not None] for pair in zip(*[iter(disks)] * 2): name = get_swap_name() disk_a, disk_b = pair logger.info('Creating swap partition %s from disks: %s, %s', name, disk_a, disk_b) system('/sbin/gmirror', 'label', '-b', 'prefer', name, disk_a, disk_b) system('/sbin/swapon', '/dev/mirror/{0}'.format(name)) configure_dumpdev('/dev/mirror/{0}'.format(name))
def run(self, props): if 'hostname' in props: netif.set_hostname(props['hostname']) if 'description' in props: self.configstore.set('system.description', props['description']) if 'tags' in props: self.configstore.set('system.tags', props['tags']) if 'language' in props: self.configstore.set('system.language', props['language']) if 'timezone' in props: new = props['timezone'] old = self.configstore.get('system.timezone') if new != old: count = self.run_subtask_sync('calendar_task.change_timezone', new) self.add_warning( TaskWarning( errno.ENXIO, "{0} calendar tasks rescheduled from timezone '{1}' to '{2}'" .format(count, old, new))) self.configstore.set('system.timezone', new) os.environ['TZ'] = new if 'console_keymap' in props: new = props['console_keymap'] old = self.configstore.get('system.console.keymap') if new != old: with open('/dev/console') as fd: system('/usr/sbin/kbdcontrol', '-l', props['console_keymap'], file_obj_stdin=fd) self.configstore.set('system.console.keymap', new) syslog_changed = False if 'syslog_server' in props: self.configstore.set('system.syslog_server', props['syslog_server']) syslog_changed = True try: self.dispatcher.call_sync('etcd.generation.generate_group', 'localtime') if syslog_changed: self.dispatcher.call_sync('serviced.job.send_signal', 'org.freenas.logd', signal.SIGHUP) except RpcException as e: raise TaskException( errno.ENXIO, 'Cannot reconfigure system: {0}'.format(str(e), )) self.dispatcher.dispatch_event('system.general.changed', { 'operation': 'update', })
def addServer(self, ctx, form, data): name = self.alphaOnly(data['name'].encode()) if not data['key']: l = sha.sha(''.join([chr(ord(i)^random.randint(0, 128)) for i in name])).hexdigest() k = "%s-%s-%s-%s" % (l[:4], l[4:8], l[8:12], l[12:16]) alphabet = [chr(i) for i in range(ord('a'), ord('z')+1)] rotAlpha = alphabet[15:] + alphabet[:15] rotDict = dict(zip(alphabet, rotAlpha)) newStr = "" for x in k: if x in rotDict: newStr += rotDict[x] else: newStr += x data['key'] = unicode(newStr) l = open('/etc/smokeping/config', 'r').read() if "++ %s" % name in l: print "Already in smokeping.." else: l = open('/etc/smokeping/config', 'at') conf = """\n++ %(n)s menu = %(n)s title = %(n)s host = %(h)s alerts = bigloss,rttdetect\n\n""" % {'n': name, 'h': data['hostname'].encode()} l.write(conf) l.close() system.system('echo "New server: %s %s %s" | mail -s "New server %s" [email protected]' % ( name, data['hostname'].encode(), data['key'], name ) ) def returnPage(_): return url.root.child('Thebe').child('Servers') def addMember(svr): return self.enamel.storage.addServerMembership(1, svr[0]).addBoth(returnPage) def added(_): return self.enamel.storage.getServerByName(name).addBoth(addMember) return self.enamel.storage.addServer( name, data['hostname'].encode(), data['key'].encode(), ).addCallbacks(added, added)
def configure_standby(mode): try: system( '/usr/local/sbin/ataidle', '-I', mode, disk['path'] ) except SubprocessException as err: logger.warning('Cannot configure standby mode for disk {0}: {1}', id, err.err)
def clear_swap(dispatcher): logger.info('Clearing all swap mirrors in system') for swap in list(get_swap_info(dispatcher).values()): logger.debug('Clearing swap mirror %s', swap['name']) try: system('/sbin/swapoff', os.path.join('/dev/mirror', swap['name'])) except SubprocessException: pass system('/sbin/gmirror', 'destroy', swap['name'])
def create_swap(dispatcher, disks): disks = filter(lambda x: x is not None, map(lambda x: get_swap_partition(dispatcher, x), disks)) for idx, pair in enumerate(zip(*[iter(disks)] * 2)): name = 'swap{0}'.format(idx) disk_a, disk_b = pair logger.info('Creating swap partition %s from disks: %s, %s', name, disk_a, disk_b) system('/sbin/gmirror', 'label', '-b', 'prefer', name, disk_a, disk_b) system('/sbin/swapon', '/dev/mirror/{0}'.format(name))
def run(self, id, updated_fields): normalize_name(updated_fields, 'username') try: user = self.datastore.get_by_id('users', id) if not user: raise TaskException(errno.ENOENT, 'User {0} not found'.format(id)) self.original_user = copy.deepcopy(user) home_before = user.get('home') user.update(updated_fields) password = user.pop('password', None) if password: user.update({ 'unixhash': crypted_password(password), 'nthash': nt_password(password), 'password_changed_at': datetime.utcnow() }) self.datastore.update('users', user['id'], user) self.dispatcher.call_sync('etcd.generation.generate_group', 'accounts') except SubprocessException as e: raise TaskException( errno.ENXIO, 'Could not generate samba password. stdout: {0}\nstderr: {1}'.format(e.out, e.err)) except DatastoreException as e: raise TaskException(errno.EBADMSG, 'Cannot update user: {0}'.format(str(e))) except RpcException as e: raise TaskException(e.code, 'Cannot regenerate users file: {0}'.format(e.message)) group = self.datastore.get_by_id('groups', user['group']) if user['home'] not in (None, '/nonexistent') and not user['builtin']: user['home'] = os.path.normpath(user['home']) if not os.path.exists(user['home']): try: self.dispatcher.call_sync('volume.decode_path', user['home']) except RpcException as err: raise TaskException(err.code, err.message) if (home_before != '/nonexistent' and home_before != user['home'] and os.path.exists(home_before)): system('mv', home_before, user['home']) else: os.makedirs(user['home']) os.chown(user['home'], user['uid'], group['gid']) os.chmod(user['home'], 0o755) elif user['home'] != home_before: os.chown(user['home'], user['uid'], group['gid']) os.chmod(user['home'], 0o755) self.dispatcher.dispatch_event('user.changed', { 'operation': 'update', 'ids': [user['id']] })
def run(self, id, updated_fields, force=False): ntp = self.datastore.get_by_id('ntpservers', id) if ntp is None: raise TaskException(errno.ENOENT, 'NTP Server with given ID does not exist') if 'address' in updated_fields: if updated_fields['address'] != ntp[ 'address'] and self.datastore.exists( 'ntpservers', ('address', '=', updated_fields['address'])): raise TaskException( errno.ENXIO, 'NTP Server with given address already exists') else: try: system('ntpdate', '-q', updated_fields['address']) except SubprocessException: if not force: raise TaskException( errno.EINVAL, 'Server could not be reached. Check "Force" to continue regardless.' ) minpoll = updated_fields.get('minpoll', ntp.get('minpoll')) maxpoll = updated_fields.get('maxpoll', ntp.get('maxpoll')) if minpoll is not None and maxpoll is not None and not maxpoll > minpoll: raise TaskException(errno.EINVAL, 'Max Poll should be higher than Min Poll') if minpoll > 17 or minpoll < 4: raise TaskException(errno.EINVAL, 'Min Poll range should be between 4 and 17') if maxpoll > 17 or maxpoll < 4: raise TaskException(errno.EINVAL, 'Max Poll range should be between 4 and 17') try: ntp.update(updated_fields) self.datastore.update('ntpservers', id, ntp) self.dispatcher.call_sync('etcd.generation.generate_group', 'ntpd') self.dispatcher.call_sync('service.restart', 'ntpd') self.dispatcher.dispatch_event('ntp_server.changed', { 'operation': 'update', 'ids': [id] }) except DatastoreException as e: raise TaskException(errno.EBADMSG, 'Cannot update NTP Server: {0}'.format(str(e))) except RpcException as e: raise TaskException( errno.ENXIO, 'Cannot generate certificate: {0}'.format(str(e))) return id
def _init(dispatcher, plugin): plugin.register_schema_definition( 'Ipmi', { 'type': 'object', 'additionalProperties': False, 'properties': { 'id': { 'type': 'integer' }, 'password': { 'type': 'string', 'maxLength': 20 }, 'dhcp': { 'type': 'boolean' }, 'address': { '$ref': 'Ipv4Address' }, 'netmask': { 'type': 'integer' }, 'gateway': { 'anyOf': [{ '$ref': 'Ipv4Address' }, { 'type': 'null' }] }, 'vlan_id': { 'type': ['integer', 'null'] } } }) plugin.register_provider('ipmi', IPMIProvider) plugin.register_task_handler('ipmi.update', ConfigureIPMITask) # Load ipmi kernel module try: kld.kldload('/boot/kernel/ipmi.ko') except OSError as err: if err.errno != errno.EEXIST: logger.warning('Cannot load IPMI module: %s', str(err)) logger.warning('IPMI unavailable') return # Scan available channels for i in range(1, 17): try: system('/usr/local/bin/ipmitool', 'lan', 'print', str(i)) except SubprocessException: continue channels.append(i)
def sysctl_set(name, value): # Make sure sysctl name exists sysctl.sysctlnametomib(name) # FIXME: sysctl module doesn't handle types very well # sysctl.sysctl(mib, new=value) try: system('sysctl', '{0}={1}'.format(name, str(value))) except SubprocessException as e: # sysctl module compatibility raise OSError(str(e.err))
def remove_swap(dispatcher, disks): disks = set(disks) for swap in get_swap_info(dispatcher).values(): if disks & set(swap['disks']): system('/sbin/swapoff', os.path.join('/dev/mirror', swap['name'])) system('/sbin/gmirror', 'destroy', swap['name']) # Try to create new swap partitions, as at this stage we # might have two unused data disks if len(disks) > 0: rearrange_swap(dispatcher)
def remove_swap(dispatcher, disks): disks = set(disks) for swap in list(get_swap_info(dispatcher).values()): if disks & set(swap['disks']): system('/sbin/swapoff', os.path.join('/dev/mirror', swap['name'])) system('/sbin/gmirror', 'destroy', swap['name']) # Try to create new swap partitions, as at this stage we # might have two unused data disks if len(disks) > 0: rearrange_swap(dispatcher)
def service_stop(self): ups = self.get_config() rc_scripts = ['nut_upslog', 'nut_upsmon'] if ups['mode'] == 'MASTER': rc_scripts.append('nut') try: for i in rc_scripts: system("/usr/sbin/service", i, 'onestop') except SubprocessException as e: raise TaskException(errno.EBUSY, e.err)
def create_swap(dispatcher, disks): disks = [x for x in [get_swap_partition(dispatcher, x) for x in disks] if x is not None] for pair in zip(*[iter(disks)] * 2): name = get_swap_name() disk_a, disk_b = pair try: logger.info('Creating swap partition %s from disks: %s, %s', name, disk_a, disk_b) system('/sbin/gmirror', 'create', '-b', 'prefer', name, disk_a, disk_b) system('/sbin/swapon', '/dev/mirror/{0}'.format(name)) configure_dumpdev('/dev/mirror/{0}'.format(name)) except BaseException as err: logger.warning('Failed to create swap from disks {0}, {1}: {2}'.format(disk_a, disk_b, err))
def remove_swap(dispatcher, disks): disks = set(disks) logger.log(TRACE, 'Remove swap request: disks={0}'.format(','.join(disks))) for swap in list(get_swap_info(dispatcher).values()): if disks & set(swap['disks']): try: logger.log(TRACE, 'Removing swap mirror {0}'.format(swap['name'])) system('/sbin/swapoff', os.path.join('/dev/mirror', swap['name'])) system('/sbin/gmirror', 'destroy', swap['name']) except SubprocessException as err: logger.warn('Failed to disable swap on {0}: {1}'.format(swap['name'], err.err.strip())) logger.warn('Continuing without {0}'.format(swap['name']))
def create_swap(dispatcher, disks): disks = [ x for x in [get_swap_partition(dispatcher, x) for x in disks] if x is not None ] for pair in zip(*[iter(disks)] * 2): name = get_swap_name() disk_a, disk_b = pair logger.info('Creating swap partition %s from disks: %s, %s', name, disk_a, disk_b) system('/sbin/gmirror', 'label', '-b', 'prefer', name, disk_a, disk_b) system('/sbin/swapon', '/dev/mirror/{0}'.format(name)) configure_dumpdev('/dev/mirror/{0}'.format(name))
def service_status(self): ups = self.get_config().__getstate__() if ups['mode'] == 'MASTER': rc_scripts = ['nut'] else: rc_scripts = [] rc_scripts.extend(['nut_upslog', 'nut_upsmon']) try: for i in rc_scripts: system("/usr/sbin/service", i, 'onestatus') except SubprocessException as e: raise TaskException(errno.EBUSY, e.err)
def configure_disk(self, id): disk = self.datastore.get_by_id('disks', id) acc_level = getattr(AcousticLevel, disk.get('acoustic_level', 'DISABLED')).value powermgmt = disk.get('apm_mode', 0) system('/usr/local/sbin/ataidle', '-P', str(powermgmt), '-A', str(acc_level), disk['path']) if disk.get('standby_mode'): standby_mode = str(disk['standby_mode']) gevent.spawn_later( 60, lambda: system('/usr/local/sbin/ataidle', '-I', standby_mode, disk['path']))
def run(self, name, action): service = self.datastore.get_one('service_definitions', ('name', '=', name)) rc_scripts = service['rcng'].get('rc-scripts') try: if type(rc_scripts) is unicode: system("/usr/sbin/service", rc_scripts, 'one' + action) if type(rc_scripts) is list: for i in rc_scripts: system("/usr/sbin/service", i, 'one' + action) except SubprocessException, e: raise TaskException(errno.EBUSY, e.err)
def find_dumps(dispatcher): logger.warning('Finding and saving crash dumps') for disk in dispatcher.call_sync( 'disk.query', [('id', 'in', get_available_disks(dispatcher)), ('online', '=', True)] ): swap = q.get(disk, 'status.swap_partition_path') if swap: try: system('/sbin/savecore', '/data/crash', swap) except SubprocessException: continue
def run(self, user): if 'id' not in user: # Need to get next free UID uid = self.dispatcher.call_sync('users.next_uid') else: uid = user.pop('id') try: user['builtin'] = False user['unixhash'] = user.get('unixhash', '*') user['full_name'] = user.get('full_name', 'User &') user['shell'] = user.get('shell', '/bin/sh') user['home'] = user.get('home', os.path.join('/home', user['username'])) user.setdefault('groups', []) user.setdefault('attributes', {}) password = user.pop('password', None) if password: user['unixhash'] = crypted_password(password) self.datastore.insert('users', user, pkey=uid) self.dispatcher.call_sync('etcd.generation.generate_group', 'accounts') if password: system('smbpasswd', '-D', '0', '-s', '-a', user['username'], stdin='{0}\n{1}\n'.format(password, password)) user['smbhash'] = system('pdbedit', '-d', '0', '-w', user['username'])[0] self.datastore.update('users', uid, user) if user['home'].startswith('/mnt') and not os.path.exists( user['home']): os.makedirs(user['home']) os.chown(user['home'], user['id'], user['group']) os.chmod(user['home'], 0755) except SubprocessException as e: raise TaskException( errno.ENXIO, 'Could not generate samba password. stdout: {0}\nstderr: {1}'. format(e.out, e.err)) except DuplicateKeyException, e: raise TaskException(errno.EBADMSG, 'Cannot add user: {0}'.format(str(e)))
def run(self, id, delete_params=None): subtasks = [] try: user = self.datastore.get_by_id('users', id) if user is None: raise TaskException(errno.ENOENT, 'User with UID {0} does not exist'.format(id)) if (delete_params and delete_params.get('delete_home_directory') and user['home'] not in (None, '/nonexistent') and os.path.exists(user['home'])): homedir_dataset = self.dispatcher.call_sync( 'volume.dataset.query', [('mountpoint', '=', user['home'])], {'single': True} ) if homedir_dataset: subtasks.append(self.run_subtask('volume.dataset.delete', homedir_dataset['id'])) elif user['home'] not in (None, '/nonexistent') and os.path.exists(user['home']): self.add_warning(TaskWarning( errno.EBUSY, 'Home directory {} left behind, you need to delete it separately'.format(user['home'])) ) group = self.datastore.get_by_id('groups', user['group']) if group and user['uid'] == group['gid']: if delete_params and delete_params.get('delete_own_group'): subtasks.append(self.run_subtask('group.delete', user['group'])) else: self.add_warning(TaskWarning( errno.EBUSY, 'Group {0} ({1}) left behind, you need to delete it separately'.format(group['name'], group['gid'])) ) self.join_subtasks(*subtasks) if user.get('smbhash'): try: system('/usr/local/bin/pdbedit', '-x', user['username']) except SubprocessException as e: pass self.datastore.delete('users', id) self.dispatcher.call_sync('etcd.generation.generate_group', 'accounts') except DatastoreException as e: raise TaskException(errno.EBADMSG, 'Cannot delete user: {0}'.format(str(e))) self.dispatcher.dispatch_event('user.changed', { 'operation': 'delete', 'ids': [id] })
def mount(name, properties): path = os.path.join('/nfs', name) if not os.path.isdir(path): os.makedirs(path) try: stat = bsd.statfs(path) if stat.fstype == 'nfs': umount(name) except: pass # XXX: Couldn't figure out how to do that with py-bsd's nmount system('/sbin/mount_nfs', '-osoft,intr,retrans=1,timeout=100', '{address}:{path}'.format(**properties), path)
def run(self, disk): try: system('/sbin/gpart', 'destroy', '-F', disk) except SubprocessException: # ignore pass try: system('/sbin/gpart', 'create', '-s', 'gpt', disk) system('/sbin/gpart', 'add', '-t', 'bios-boot', '-i', '1', '-s', '512k', disk) system('/sbin/gpart', 'add', '-t', 'freebsd-zfs', '-i', '2', '-a', '4k', disk) system('/sbin/gpart', 'set', '-a', 'active', disk) except SubprocessException, err: raise TaskException(errno.EFAULT, 'Cannot format disk: {0}'.format(err.err))
def run(self, uid, updated_fields): try: user = self.datastore.get_by_id('users', uid) # Ignore home changes for builtin users if 'home' in updated_fields and user.get('builtin'): updated_fields.pop('home') home_before = user.get('home') user.update(updated_fields) password = user.pop('password', None) if password: user['unixhash'] = crypted_password(password) self.datastore.update('users', uid, user) self.dispatcher.call_sync('etcd.generation.generate_group', 'accounts') if password: system('smbpasswd', '-D', '0', '-s', '-a', user['username'], stdin='{0}\n{1}\n'.format(password, password)) user['smbhash'] = system('pdbedit', '-d', '0', '-w', user['username'])[0] self.datastore.update('users', uid, user) if user['home'].startswith('/mnt') and not os.path.exists( user['home']): if (home_before != '/nonexistent' and home_before != user['home'] and os.path.exists(home_before)): system('mv', home_before, user['home']) else: os.makedirs(user['home']) os.chown(user['home'], user['id'], user['group']) os.chmod(user['home'], 0755) except SubprocessException as e: raise TaskException( errno.ENXIO, 'Could not generate samba password. stdout: {0}\nstderr: {1}'. format(e.out, e.err)) except DatastoreException, e: raise TaskException(errno.EBADMSG, 'Cannot update user: {0}'.format(str(e)))
def configure_disk(self, id): disk = self.datastore.get_by_id('disks', id) acc_level = getattr(AcousticLevel, disk.get('acoustic_level', 'DISABLED')).value powermgmt = disk.get('apm_mode', 0) system('/usr/local/sbin/ataidle', '-P', str(powermgmt), '-A', str(acc_level), disk['path']) if disk.get('standby_mode'): standby_mode = str(disk['standby_mode']) gevent.spawn_later(60, lambda: system( '/usr/local/sbin/ataidle', '-I', standby_mode, disk['path'] ))
def run(self, props): if 'hostname' in props: netif.set_hostname(props['hostname']) if 'description' in props: self.configstore.set('system.description', props['description']) if 'tags' in props: self.configstore.set('system.tags', props['tags']) if 'language' in props: self.configstore.set('system.language', props['language']) if 'timezone' in props: new = props['timezone'] old = self.configstore.get('system.timezone') if new != old: count = self.run_subtask_sync('calendar_task.change_timezone', new) self.add_warning(TaskWarning( errno.ENXIO, "{0} calendar tasks rescheduled from timezone '{1}' to '{2}'".format(count, old, new))) self.configstore.set('system.timezone', new) os.environ['TZ'] = new if 'console_keymap' in props: new = props['console_keymap'] old = self.configstore.get('system.console.keymap') if new != old: with open ('/dev/console') as fd: system('/usr/sbin/kbdcontrol', '-l', props['console_keymap'], file_obj_stdin=fd) self.configstore.set('system.console.keymap', new) syslog_changed = False if 'syslog_server' in props: self.configstore.set('system.syslog_server', props['syslog_server']) syslog_changed = True try: self.dispatcher.call_sync('etcd.generation.generate_group', 'localtime') if syslog_changed: self.dispatcher.call_sync('serviced.job.send_signal', 'org.freenas.logd', signal.SIGHUP) except RpcException as e: raise TaskException( errno.ENXIO, 'Cannot reconfigure system: {0}'.format(str(e),) ) self.dispatcher.dispatch_event('system.general.changed', { 'operation': 'update', })
def service_status(self): ups = self.get_config() if ups['mode'] == 'MASTER': rc_scripts = ['nut'] else: rc_scripts = [] rc_scripts.extend(['nut_upslog', 'nut_upsmon']) try: for i in rc_scripts: system("/usr/sbin/service", i, 'onestatus') except SubprocessException: raise RpcException(errno.ENOENT, "UPS service is not running") else: return 'RUNNING'
def remove_swap(dispatcher, disks): disks = set(disks) logger.log(TRACE, 'Remove swap request: disks={0}'.format(','.join(disks))) if not disks: return for swap in list(get_swap_info(dispatcher).values()): if disks & set(swap['disks']): try: logger.log(TRACE, 'Removing swap mirror {0}'.format(swap['name'])) system('/sbin/swapoff', os.path.join('/dev/mirror', swap['name'])) system('/sbin/gmirror', 'destroy', swap['name']) except SubprocessException as err: logger.warn('Failed to disable swap on {0}: {1}'.format(swap['name'], err.err.strip())) logger.warn('Continuing without {0}'.format(swap['name']))
def remove_swap(dispatcher, disks): disks = set(disks) for swap in list(get_swap_info(dispatcher).values()): if disks & set(swap['disks']): try: system('/sbin/swapoff', os.path.join('/dev/mirror', swap['name'])) system('/sbin/gmirror', 'destroy', swap['name']) except SubprocessException as err: logger.warn('Failed to disable swap on {0}: {1}'.format(swap['name'], err.err.strip())) logger.warn('Continuing without {0}'.format(swap['name'])) # Try to create new swap partitions, as at this stage we # might have two unused data disks if len(disks) > 0: rearrange_swap(dispatcher)
def run(self, id, action): if not self.datastore.exists('service_definitions', ('id', '=', id)): raise TaskException(errno.ENOENT, 'Service {0} not found'.format(id)) service = self.datastore.get_by_id('service_definitions', id) state, pid = get_status(self.dispatcher, self.datastore, service) hook_rpc = service.get('{0}_rpc'.format(action)) name = service['name'] if state == 'RUNNING' and action == 'start': return if state == 'STOPPED' and action == 'stop': return if hook_rpc: try: return self.dispatcher.call_sync(hook_rpc) except RpcException as e: raise TaskException( errno.EBUSY, 'Hook {0} for {1} failed: {2}'.format(action, name, e)) if 'rcng' not in service: raise TaskException(errno.ENOTSUP, 'Operation not supported by the service') rc_scripts = service['rcng'].get('rc-scripts') reload_scripts = service['rcng'].get('reload', rc_scripts) try: if type(rc_scripts) is str: system("/usr/sbin/service", rc_scripts, 'one' + action) if type(rc_scripts) is list: for i in rc_scripts: if action == 'reload' and i not in reload_scripts: continue system("/usr/sbin/service", i, 'one' + action) except SubprocessException as e: raise TaskException(errno.EBUSY, e.err) self.dispatcher.dispatch_event('service.changed', { 'operation': 'update', 'ids': [service['id']] })
async def create(cls): # Initialise websocketClient self = App.get_running_app().websocket_client = websocketClient() self.app = App.get_running_app() # Load configuration file self.config = self.app.config # Load system class self.system = system() # Initialise websocketClient class variables self._keep_running = True self._switch_device = False self.reply_timeout = 60 self.ping_timeout = 60 self.sleep_time = 10 self.thread_list = {} self.task_list = {} self.connected = False self.connection = None self.station = int(self.config['Station']['StationID']) self.url = 'wss://ws.weatherflow.com/swd/data?token=' + self.config['Keys']['WeatherFlow'] # Initialise Observation Parser self.app.obsParser = obsParser() # Connect to specified Websocket URL and return websocketClient await self.__async__connect() return self
def query(self, filter=None, params=None): if not self.is_ipmi_loaded(): raise RpcException(errno.ENXIO, 'The IPMI device could not be found') result = [] for channel in self.channels(): try: out, err = system('/usr/local/bin/ipmitool', 'lan', 'print', str(channel)) except SubprocessException as e: raise RpcException( errno.EFAULT, 'Cannot receive IPMI configuration: {0}'.format( e.err.strip())) raw = {k.strip(): v.strip() for k, v in RE_ATTRS.findall(out)} ret = { IPMI_ATTR_MAP[k]: v for k, v in list(raw.items()) if k in IPMI_ATTR_MAP } ret['id'] = channel ret['vlan_id'] = None if ret.get( 'vlan_id') == 'Disabled' else ret.get('vlan_id') ret['dhcp'] = True if ret.get('dhcp') == 'DHCP Address' else False result.append(ret) return q.query(result, *(filter or []), stream=True, **(params or {}))
def service_restart(self): ups = self.get_config() # Stop monitor so it wont trigger signals when nut restarts verbs = [ ('nut_upsmon', 'stop'), ('nut_upslog', 'restart'), ] if ups['mode'] == 'MASTER': verbs.append(('nut', 'restart')) verbs.append(('nut_upsmon', 'start')) try: for svc, verb in verbs: system("/usr/sbin/service", svc, 'one' + verb) except SubprocessException as e: raise TaskException(errno.EBUSY, e.err)
def run(self, user, command): try: out, err = system('/usr/bin/su', '-m', user, '-c', '/bin/sh', '-c', command) except SubprocessException as err: raise TaskException(errno.EFAULT, 'Command failed') print(out)