def setacl_nfs4(self, job, data): job.set_progress(0, 'Preparing to set acl.') options = data['options'] dacl = data.get('dacl', []) if osc.IS_LINUX or not os.pathconf(data['path'], 64): raise CallError( f"NFSv4 ACLS are not supported on path {data['path']}", errno.EOPNOTSUPP) self._common_perm_path_validate(data['path']) if dacl and options['stripacl']: raise CallError( 'Setting ACL and stripping ACL are not permitted simultaneously.', errno.EINVAL) uid = -1 if data.get('uid', None) is None else data['uid'] gid = -1 if data.get('gid', None) is None else data['gid'] if options['stripacl']: a = acl.ACL(file=data['path']) a.strip() a.apply(data['path']) else: inheritable_is_present = False cleaned_acl = [] lockace_is_present = False for entry in dacl: ace = { 'tag': (acl.ACLWho(entry['tag'])).name, 'id': entry['id'], 'type': entry['type'], 'perms': self.__convert_to_adv_permset(entry['perms']['BASIC']) if 'BASIC' in entry['perms'] else entry['perms'], 'flags': self.__convert_to_adv_flagset(entry['flags']['BASIC']) if 'BASIC' in entry['flags'] else entry['flags'], } if ace['flags'].get('INHERIT_ONLY') and not ace['flags'].get( 'DIRECTORY_INHERIT', False) and not ace['flags'].get( 'FILE_INHERIT', False): raise CallError( 'Invalid flag combination. DIRECTORY_INHERIT or FILE_INHERIT must be set if INHERIT_ONLY is set.', errno.EINVAL) if ace['tag'] == 'EVERYONE' and self.__convert_to_basic_permset( ace['perms']) == 'NOPERMS': lockace_is_present = True elif ace['flags'].get('DIRECTORY_INHERIT') or ace['flags'].get( 'FILE_INHERIT'): inheritable_is_present = True cleaned_acl.append(ace) if not inheritable_is_present: raise CallError( 'At least one inheritable ACL entry is required', errno.EINVAL) if options['canonicalize']: cleaned_acl = self.canonicalize_acl_order(cleaned_acl) if not lockace_is_present: locking_ace = { 'tag': 'EVERYONE', 'id': None, 'type': 'ALLOW', 'perms': self.__convert_to_adv_permset('NOPERMS'), 'flags': self.__convert_to_adv_flagset('INHERIT') } cleaned_acl.append(locking_ace) a = acl.ACL() a.__setstate__(cleaned_acl) a.apply(data['path']) if not options['recursive']: os.chown(data['path'], uid, gid) job.set_progress(100, 'Finished setting NFS4 ACL.') return job.set_progress(10, f'Recursively setting ACL on {data["path"]}.') self._winacl(data['path'], 'clone', uid, gid, options) job.set_progress(100, 'Finished setting NFS4 ACL.')
def common_load_dataset_checks(self, ds): self.common_encryption_checks(ds) if ds.key_loaded: raise CallError(f'{id} key is already loaded')
def get_quota(self, ds, quota_type): if quota_type == 'dataset': dataset = self.query([('id', '=', ds)], {'get': True}) return [{ 'quota_type': 'DATASET', 'id': ds, 'name': ds, 'quota': int(dataset['properties']['quota']['rawvalue']), 'refquota': int(dataset['properties']['refquota']['rawvalue']), 'used_bytes': int(dataset['properties']['used']['rawvalue']), }] quota_list = [] quota_get = subprocess.run( [ 'zfs', f'{quota_type}space', '-H', '-n', '-p', '-o', 'name,used,quota,objquota,objused', ds ], capture_output=True, check=False, ) if quota_get.returncode != 0: raise CallError( f'Failed to get {quota_type} quota for {ds}: [{quota_get.stderr.decode()}]' ) for quota in quota_get.stdout.decode().splitlines(): m = quota.split('\t') if len(m) != 5: self.logger.debug('Invalid %s quota: %s', quota_type.lower(), quota) continue entry = { 'quota_type': quota_type.upper(), 'id': int(m[0]), 'name': None, 'quota': int(m[2]), 'used_bytes': int(m[1]), 'used_percent': 0, 'obj_quota': int(m[3]) if m[3] != '-' else 0, 'obj_used': int(m[4]) if m[4] != '-' else 0, 'obj_used_percent': 0, } if entry['quota'] > 0: entry['used_percent'] = entry['used_bytes'] / entry[ 'quota'] * 100 if entry['obj_quota'] > 0: entry['obj_used_percent'] = entry['obj_used'] / entry[ 'obj_quota'] * 100 try: if entry['quota_type'] == 'USER': entry['name'] = (self.middleware.call_sync( 'user.get_user_obj', {'uid': entry['id']}))['pw_name'] else: entry['name'] = (self.middleware.call_sync( 'group.get_group_obj', {'gid': entry['id']}))['gr_name'] except Exception: self.logger.debug('Unable to resolve %s id %d to name', quota_type.lower(), entry['id']) pass quota_list.append(entry) return quota_list
async def do_update(self, pk, data): """ Update attributes of an existing user. """ user = await self._get_instance(pk) verrors = ValidationErrors() if 'group' in data: group = await self.middleware.call('datastore.query', 'account.bsdgroups', [ ('id', '=', data['group']) ]) if not group: verrors.add('user_update.group', f'Group {data["group"]} not found', errno.ENOENT) group = group[0] else: group = user['group'] user['group'] = group['id'] await self.__common_validation(verrors, data, 'user_update', pk=pk) try: st = os.stat(user.get("home", "/nonexistent")).st_mode old_mode = f'{stat.S_IMODE(st):03o}' except FileNotFoundError: old_mode = None home = data.get('home') or user['home'] has_home = home != '/nonexistent' # root user (uid 0) is an exception to the rule if data.get('sshpubkey') and not home.startswith('/mnt') and user['uid'] != 0: verrors.add('user_update.sshpubkey', 'Home directory is not writable, leave this blank"') # Do not allow attributes to be changed for builtin user if user['builtin']: for i in ('group', 'home', 'home_mode', 'uid', 'username', 'smb'): if i in data and data[i] != user[i]: verrors.add(f'user_update.{i}', 'This attribute cannot be changed') if not user['smb'] and data.get('smb') and not data.get('password'): # Changing from non-smb user to smb user requires re-entering password. verrors.add('user_update.smb', 'Password must be changed in order to enable SMB authentication') verrors.check() must_change_pdb_entry = False for k in ('username', 'password', 'locked'): new_val = data.get(k) old_val = user.get(k) if new_val is not None and old_val != new_val: if k == 'username': try: await self.middleware.call("smb.remove_passdb_user", old_val) except Exception: self.logger.debug("Failed to remove passdb entry for user [%s]", old_val, exc_info=True) must_change_pdb_entry = True if user['smb'] is True and data.get('smb') is False: try: must_change_pdb_entry = False await self.middleware.call("smb.remove_passdb_user", user['username']) except Exception: self.logger.debug("Failed to remove passdb entry for user [%s]", user['username'], exc_info=True) if user['smb'] is False and data.get('smb') is True: must_change_pdb_entry = True # Copy the home directory if it changed if ( has_home and 'home' in data and data['home'] != user['home'] and not data['home'].startswith(f'{user["home"]}/') ): home_copy = True home_old = user['home'] else: home_copy = False # After this point user dict has values from data user.update(data) if home_copy and not os.path.isdir(user['home']): try: os.makedirs(user['home']) mode_to_set = user.get('home_mode') if not mode_to_set: mode_to_set = '700' if old_mode is None else old_mode perm_job = await self.middleware.call('filesystem.setperm', { 'path': user['home'], 'uid': user['uid'], 'gid': group['bsdgrp_gid'], 'mode': mode_to_set, 'options': {'stripacl': True}, }) await perm_job.wait() except OSError: self.logger.warn('Failed to chown homedir', exc_info=True) if not os.path.isdir(user['home']): raise CallError(f'{user["home"]} is not a directory') home_mode = user.pop('home_mode', None) if user['builtin']: home_mode = None try: update_sshpubkey_args = [ home_old if home_copy else user['home'], user, group['bsdgrp_group'], ] await self.update_sshpubkey(*update_sshpubkey_args) except PermissionError as e: self.logger.warn('Failed to update authorized keys', exc_info=True) raise CallError(f'Failed to update authorized keys: {e}') else: if user['uid'] == 0: if await self.middleware.call('failover.licensed'): try: await self.middleware.call('failover.call_remote', 'user.update_sshpubkey', update_sshpubkey_args) except Exception: self.logger.error('Failed to sync root ssh pubkey to standby node', exc_info=True) if home_copy: """ Background copy of user home directoy to new path as the user in question. """ await self.middleware.call('user.do_home_copy', home_old, user['home'], user['username'], home_mode, user['uid']) elif has_home and home_mode is not None: """ A non-recursive call to set permissions should return almost immediately. """ perm_job = await self.middleware.call('filesystem.setperm', { 'path': user['home'], 'mode': home_mode, 'options': {'stripacl': True}, }) await perm_job.wait() user.pop('sshpubkey', None) await self.__set_password(user) if 'groups' in user: groups = user.pop('groups') await self.__set_groups(pk, groups) user = await self.user_compress(user) await self.middleware.call('datastore.update', 'account.bsdusers', pk, user, {'prefix': 'bsdusr_'}) await self.middleware.call('service.reload', 'user') if user['smb'] and must_change_pdb_entry: await self.__set_smbpasswd(user['username']) return pk
def upgrade(self, pool): try: with libzfs.ZFS() as zfs: zfs.get(pool).upgrade() except libzfs.ZFSException as e: raise CallError(str(e))
async def legacy_validate(self, keytab): err = await self._validate({'file': keytab}) try: err.check() except Exception as e: raise CallError(e)
return pk @accepts(Int('id'), Dict('options', Bool('delete_users', default=False))) async def do_delete(self, pk, options=None): """ Delete group `id`. The `delete_users` option deletes all users that have this group as their primary group. """ group = await self._get_instance(pk) if group['smb'] and (g := (await self.middleware.call('smb.groupmap_list')).get(group['group'])): await self.middleware.call('smb.groupmap_delete', {"sid": g['SID']}) if group['builtin']: raise CallError('A built-in group cannot be deleted.', errno.EACCES) nogroup = await self.middleware.call('datastore.query', 'account.bsdgroups', [('group', '=', 'nogroup')], {'prefix': 'bsdgrp_', 'get': True}) for i in await self.middleware.call('datastore.query', 'account.bsdusers', [('group', '=', group['id'])], {'prefix': 'bsdusr_'}): if options['delete_users']: await self.middleware.call('datastore.delete', 'account.bsdusers', i['id']) else: await self.middleware.call('datastore.update', 'account.bsdusers', i['id'], {'group': nogroup['id']}, {'prefix': 'bsdusr_'}) await self.middleware.call('datastore.delete', 'account.bsdgroups', pk) await self.middleware.call('service.reload', 'user')
async def validate_k8s_setup(self): k8s_config = await self.middleware.call('kubernetes.config') if not k8s_config['dataset']: raise CallError('Please configure kubernetes pool.') if not await self.middleware.call('service.started', 'kubernetes'): raise CallError('Kubernetes service is not running.')
async def do_create(self, data): verrors = ValidationErrors() if ( not data.get('group') and not data.get('group_create') ) or ( data.get('group') is not None and data.get('group_create') ): verrors.add('group', f'You need to either provide a group or group_create', errno.EINVAL) await self.__common_validation(verrors, data) if data.get('sshpubkey') and not data['home'].startswith('/mnt'): verrors.add('sshpubkey', 'Home directory is not writable, leave this blank"') if verrors: raise verrors groups = data.pop('groups') or [] create = data.pop('group_create') if create: group = await self.middleware.call('group.query', [('group', '=', data['username'])]) if group: group = group[0] else: group = await self.middleware.call('group.create', {'name': data['username']}) group = (await self.middleware.call('group.query', [('id', '=', group)]))[0] data['group'] = group['id'] else: group = await self.middleware.call('group.query', [('id', '=', data['group'])]) if not group: raise CallError(f'Group {data["group"]} not found') group = group[0] # Is this a new directory or not? Let's not nuke existing directories, # e.g. /, /root, /mnt/tank/my-dataset, etc ;). new_homedir = False home_mode = data.pop('home_mode') if data['home'] != '/nonexistent': try: os.makedirs(data['home'], mode=int(home_mode, 8)) os.chown(data['home'], data['uid'], group['gid']) except FileExistsError: if not os.path.isdir(data['home']): raise CallError( 'Path for home directory already ' 'exists and is not a directory', errno.EEXIST ) except OSError as oe: raise CallError( 'Failed to create the home directory ' f'({data["home"]}) for user: {oe}' ) else: new_homedir = True if os.stat(data['home']).st_dev == os.stat('/mnt').st_dev: raise CallError( f'Path for the home directory (data["home"]) ' 'must be under a volume or dataset' ) if not data.get('uid'): data['uid'] = await self.get_next_uid() pk = None # Make sure pk exists to rollback in case of an error try: password = await self.__set_password(data) sshpubkey = data.pop('sshpubkey', None) # datastore does not have sshpubkey pk = await self.middleware.call('datastore.insert', 'account.bsdusers', data, {'prefix': 'bsdusr_'}) await self.__set_groups(pk, groups) except Exception: if pk is not None: await self.middleware.call('datastore.delete', 'account.bsdusers', pk) if new_homedir: # Be as atomic as possible when creating the user if # commands failed to execute cleanly. shutil.rmtree(data['home']) raise await self.middleware.call('service.reload', 'user') await self.__set_smbpasswd(data['username'], password) if os.path.exists(data['home']): for f in os.listdir(SKEL_PATH): if f.startswith('dot'): dest_file = os.path.join(data['home'], f[3:]) else: dest_file = os.path.join(data['home'], f) if not os.path.exists(dest_file): shutil.copyfile(os.path.join(SKEL_PATH, f), dest_file) os.chown(dest_file, data['uid'], group['gid']) data['sshpubkey'] = sshpubkey await self.__update_sshpubkey(data, group['group']) return pk
async def get_pagesize(self): cp = await run(['getconf', 'PAGESIZE'], check=False) if cp.returncode: raise CallError( f'Unable to retrieve pagesize value: {cp.stderr.decode()}') return int(cp.stdout.decode().strip())
async def migrate(self, _from, _to): config = await self.config() await self.__setup_datasets(_to, config['uuid']) if _from: path = '/tmp/system.new' if not os.path.exists('/tmp/system.new'): os.mkdir('/tmp/system.new') else: # Make sure we clean up any previous attempts await run('umount', '-R', path, check=False) else: path = SYSDATASET_PATH await self.__mount(_to, config['uuid'], path=path) restart = ['collectd', 'rrdcached', 'syslogd'] if await self.middleware.call('service.started', 'cifs'): restart.insert(0, 'cifs') for service in ['open-vm-tools', 'webdav']: restart.append(service) try: if osc.IS_LINUX: await self.middleware.call('cache.put', 'use_syslog_dataset', False) await self.middleware.call('service.restart', 'syslogd') if await self.middleware.call('service.started', 'glusterd'): restart.insert(0, 'glusterd') # Middleware itself will log to syslog dataset. # This may be prone to a race condition since we dont wait the workers to stop # logging, however all the work before umount seems to make it seamless. await self.middleware.call('core.stop_logging') for i in restart: await self.middleware.call('service.stop', i) if _from: cp = await run('rsync', '-az', f'{SYSDATASET_PATH}/', '/tmp/system.new', check=False) if cp.returncode == 0: await self.__umount(_from, config['uuid']) await self.__umount(_to, config['uuid']) await self.__mount(_to, config['uuid'], SYSDATASET_PATH) proc = await Popen( f'zfs list -H -o name {_from}/.system|xargs zfs destroy -r', shell=True) await proc.communicate() os.rmdir('/tmp/system.new') else: raise CallError( f'Failed to rsync from {SYSDATASET_PATH}: {cp.stderr.decode()}' ) finally: if osc.IS_LINUX: await self.middleware.call('cache.pop', 'use_syslog_dataset') restart.reverse() for i in restart: await self.middleware.call('service.start', i) await self.__nfsv4link(config)
def send_raw(self, job, message, config=None): interval = message.get('interval') if interval is None: interval = timedelta() else: interval = timedelta(seconds=interval) sw_name = self.middleware.call_sync('system.info')['version'].split( '-', 1)[0] channel = message.get('channel') if not channel: channel = sw_name.lower() if interval > timedelta(): channelfile = '/tmp/.msg.%s' % (channel) last_update = datetime.now() - interval try: last_update = datetime.fromtimestamp( os.stat(channelfile).st_mtime) except OSError: pass timediff = datetime.now() - last_update if (timediff >= interval) or (timediff < timedelta()): # Make sure mtime is modified # We could use os.utime but this is simpler! with open(channelfile, 'w') as f: f.write('!') else: raise CallError( 'This message was already sent in the given interval') if not config: config = self.middleware.call_sync('mail.config') verrors = self.__password_verify(config['pass'], 'mail-config.pass') if verrors: raise verrors to = message.get('to') if not to: to = [ self.middleware.call_sync('user.query', [('username', '=', 'root')], {'get': True})['email'] ] if not to[0]: raise CallError('Email address for root is not configured') if message.get('attachments'): job.check_pipe("input") def read_json(): f = job.pipes.input.r data = b'' i = 0 while True: read = f.read(1048576) # 1MiB if read == b'': break data += read i += 1 if i > 50: raise ValueError( 'Attachments bigger than 50MB not allowed yet') if data == b'': return None return json.loads(data) attachments = read_json() else: attachments = None if 'html' in message or attachments: msg = MIMEMultipart() msg.preamble = message['text'] if 'html' in message: msg2 = MIMEMultipart('alternative') msg2.attach( MIMEText(message['text'], 'plain', _charset='utf-8')) msg2.attach(MIMEText(message['html'], 'html', _charset='utf-8')) msg.attach(msg2) if attachments: for attachment in attachments: m = Message() m.set_payload(attachment['content']) for header in attachment.get('headers'): m.add_header(header['name'], header['value'], **(header.get('params') or {})) msg.attach(m) else: msg = MIMEText(message['text'], _charset='utf-8') msg['Subject'] = message['subject'] msg['From'] = config['fromemail'] msg['To'] = ', '.join(to) if message.get('cc'): msg['Cc'] = ', '.join(message.get('cc')) msg['Date'] = formatdate() local_hostname = socket.gethostname() msg['Message-ID'] = "<%s-%s.%s@%s>" % ( sw_name.lower(), datetime.utcnow().strftime("%Y%m%d.%H%M%S.%f"), base64.urlsafe_b64encode(os.urandom(3)), local_hostname) extra_headers = message.get('extra_headers') or {} for key, val in list(extra_headers.items()): if key in msg: msg.replace_header(key, val) else: msg[key] = val syslog.openlog(logoption=syslog.LOG_PID, facility=syslog.LOG_MAIL) try: server = self._get_smtp_server(config, message['timeout'], local_hostname=local_hostname) # NOTE: Don't do this. # # If smtplib.SMTP* tells you to run connect() first, it's because the # mailserver it tried connecting to via the outgoing server argument # was unreachable and it tried to connect to 'localhost' and barfed. # This is because FreeNAS doesn't run a full MTA. # else: # server.connect() headers = '\n'.join([f'{k}: {v}' for k, v in msg._headers]) syslog.syslog(f"sending mail to {', '.join(to)}\n{headers}") server.sendmail(config['fromemail'], to, msg.as_string()) server.quit() except Exception as e: # Don't spam syslog with these messages. They should only end up in the # test-email pane. # We are only interested in ValueError, not subclasses. if e.__class__ is ValueError: raise CallError(str(e)) syslog.syslog(f'Failed to send email to {", ".join(to)}: {str(e)}') if isinstance(e, smtplib.SMTPAuthenticationError): raise CallError( f'Authentication error ({e.smtp_code}): {e.smtp_error}', errno.EAUTH) self.logger.warn('Failed to send email: %s', str(e), exc_info=True) if message['queue']: with MailQueue() as mq: mq.append(msg) raise CallError(f'Failed to send email: {e}') return True
def setperm(self, job, data): """ Remove extended ACL from specified path. If `mode` is specified then the mode will be applied to the path and files and subdirectories depending on which `options` are selected. Mode should be formatted as string representation of octal permissions bits. `uid` the desired UID of the file user. If set to None (the default), then user is not changed. `gid` the desired GID of the file group. If set to None (the default), then group is not changed. `stripacl` setperm will fail if an extended ACL is present on `path`, unless `stripacl` is set to True. `recursive` remove ACLs recursively, but do not traverse dataset boundaries. `traverse` remove ACLs from child datasets. If no `mode` is set, and `stripacl` is True, then non-trivial ACLs will be converted to trivial ACLs. An ACL is trivial if it can be expressed as a file mode without losing any access rules. """ job.set_progress(0, 'Preparing to set permissions.') options = data['options'] mode = data.get('mode', None) uid = -1 if data['uid'] is None else data['uid'] gid = -1 if data['gid'] is None else data['gid'] self._common_perm_path_validate(data['path']) acl_is_trivial = self.middleware.call_sync('filesystem.acl_is_trivial', data['path']) if not acl_is_trivial and not options['stripacl']: raise CallError( f'Non-trivial ACL present on [{data["path"]}]. Option "stripacl" required to change permission.', errno.EINVAL) if mode is not None: mode = int(mode, 8) a = acl.ACL(file=data['path']) a.strip() a.apply(data['path']) if mode: os.chmod(data['path'], mode) if uid or gid: os.chown(data['path'], uid, gid) if not options['recursive']: job.set_progress(100, 'Finished setting permissions.') return action = 'clone' if mode else 'strip' job.set_progress( 10, f'Recursively setting permissions on {data["path"]}.') self._winacl(data['path'], action, uid, gid, options) job.set_progress(100, 'Finished setting permissions.')
def listdir(self, path, filters=None, options=None): """ Get the contents of a directory. Each entry of the list consists of: name(str): name of the file path(str): absolute path of the entry realpath(str): absolute real path of the entry (if SYMLINK) type(str): DIRECTORY | FILESYSTEM | SYMLINK | OTHER size(int): size of the entry mode(int): file mode/permission uid(int): user id of entry owner gid(int): group id of entry onwer acl(bool): extended ACL is present on file """ if not os.path.exists(path): raise CallError(f'Directory {path} does not exist', errno.ENOENT) if not os.path.isdir(path): raise CallError(f'Path {path} is not a directory', errno.ENOTDIR) rv = [] for entry in os.scandir(path): if entry.is_symlink(): etype = 'SYMLINK' elif entry.is_dir(): etype = 'DIRECTORY' elif entry.is_file(): etype = 'FILE' else: etype = 'OTHER' data = { 'name': entry.name, 'path': entry.path, 'realpath': os.path.realpath(entry.path) if etype == 'SYMLINK' else entry.path, 'type': etype, } try: stat = entry.stat() data.update({ 'size': stat.st_size, 'mode': stat.st_mode, 'acl': False if self.acl_is_trivial(data["realpath"]) else True, 'uid': stat.st_uid, 'gid': stat.st_gid, }) except FileNotFoundError: data.update({ 'size': None, 'mode': None, 'acl': None, 'uid': None, 'gid': None }) rv.append(data) return filter_list(rv, filters=filters or [], options=options or {})
async def _kinit(self): """ There are two ways of performing the kinit: 1) username / password combination. In this case, password must be written to file or recieved via STDIN 2) kerberos keytab. For now we only check for kerberos realms explicitly configured in AD and LDAP. """ ad = await self.middleware.call('activedirectory.config') ldap = await self.middleware.call('ldap.config') await self.middleware.call('etc.generate', 'kerberos') if ad['enable']: if ad['kerberos_principal']: ad_kinit = await run( ['kinit', '--renewable', '-k', ad['kerberos_principal']], check=False) if ad_kinit.returncode != 0: raise CallError( f"kinit for domain [{ad['domainname']}] with principal [{ad['kerberos_principal']}] failed: {ad_kinit.stderr.decode()}" ) else: principal = f'{ad["bindname"]}@{ad["domainname"].upper()}' ad_kinit = await Popen([ 'kinit', '--renewable', '--password-file=STDIN', principal ], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) output = await ad_kinit.communicate( input=ad['bindpw'].encode()) if ad_kinit.returncode != 0: raise CallError( f"kinit for domain [{ad['domainname']}] with password failed: {output[1].decode()}" ) if ldap['enable'] and ldap['kerberos_realm']: if ldap['kerberos_principal']: ldap_kinit = await run( ['kinit', '--renewable', '-k', ldap['kerberos_principal']], check=False) if ldap_kinit.returncode != 0: raise CallError( f"kinit for realm {ldap['kerberos_realm']} with keytab failed: {ldap_kinit.stderr.decode()}" ) else: krb_realm = await self.middleware.call( 'kerberos.realm.query', [('id', '=', ldap['kerberos_realm'])], {'get': True}) bind_cn = (ldap['binddn'].split(','))[0].split("=") principal = f'{bind_cn[1]}@{krb_realm["realm"]}' ldap_kinit = await Popen([ 'kinit', '--renewable', '--password-file=STDIN', principal ], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) output = await ldap_kinit.communicate( input=ldap['bindpw'].encode()) if ldap_kinit.returncode != 0: raise CallError( f"kinit for realm {krb_realm['realm']} with password failed: {output[1].decode()}" )
'system.is_enterprise_ix_hardware') context = SMARTCTX(devices=devices, enterprise_hardware=hardware) if disks := await self.middleware.call('disk.query', [['name', '=', disk]]): smartoptions = disks[0]['smartoptions'] else: self.middleware.logger.warning( "No database row found for disk %r", disk) smartoptions = '' smartctl_args = await get_smartctl_args( context, disk, smartoptions) if smartctl_args is None: raise CallError(f'S.M.A.R.T. is unavailable for disk {disk}') cp = await smartctl(smartctl_args + args, check=False, stderr=subprocess.STDOUT, encoding='utf8', errors='ignore') if (cp.returncode & 0b11) != 0: raise CallError( f'smartctl failed for disk {disk}:\n{cp.stdout}') except CallError: if options['silent']: return None raise
async def _get_cached_klist(self): """ Try to get retrieve cached kerberos tgt info. If it hasn't been cached, perform klist, parse it, put it in cache, then return it. """ if await self.middleware.call('cache.has_key', 'KRB_TGT_INFO'): return (await self.middleware.call('cache.get', 'KRB_TGT_INFO')) ad = await self.middleware.call('activedirectory.config') ldap = await self.middleware.call('ldap.config') ad_TGT = [] ldap_TGT = [] if not ad['enable'] and not ldap['enable']: return {'ad_TGT': ad_TGT, 'ldap_TGT': ldap_TGT} if not ad['enable'] and not ldap['kerberos_realm']: return {'ad_TGT': ad_TGT, 'ldap_TGT': ldap_TGT} if not await self.status(): await self.start() try: klist = await asyncio.wait_for(run(['klist', '-v'], check=False, stdout=subprocess.PIPE), timeout=10.0) except Exception as e: await self.stop() raise CallError( "Attempt to list kerberos tickets failed with error: %s", e) if klist.returncode != 0: await self.stop() raise CallError( f'klist failed with error: {klist.stderr.decode()}') klist_output = klist.stdout.decode() tkts = klist_output.split('\n\n') for tkt in tkts: s = tkt.splitlines() if len(s) > 4: for entry in s: if "Auth time" in entry: issued = time.strptime( (entry.split('Auth time: '))[1].lstrip().replace( ' ', ' '), '%b %d %H:%M:%S %Y') elif "End time" in entry: expires = time.strptime( (entry.split('End time: '))[1].lstrip().replace( ' ', ' '), '%b %d %H:%M:%S %Y') elif "Server" in entry: server = (entry.split('Server: '))[1] elif "Client" in entry: client = (entry.split('Client: '))[1] elif 'Ticket etype' in entry: etype = (entry.split('Ticket etype: '))[1] elif 'Ticket flags' in entry: flags = (entry.split('Ticket flags: '))[1].split(',') if ad['enable'] and ad['kerberos_realm'] and ad[ 'domainname'] in client: ad_TGT.append({ 'issued': issued, 'expires': expires, 'client': client, 'server': server, 'etype': etype, 'flags': flags, }) elif ldap['enable'] and ldap['kerberos_realm']: if ldap['kerberos_realm']['krb_realm'] in client: ldap_TGT.append({ 'issued': issued, 'expires': expires, 'client': client, 'server': server, 'etype': etype, 'flags': flags, }) if ad_TGT or ldap_TGT: await self.middleware.call('cache.put', 'KRB_TGT_INFO', { 'ad_TGT': ad_TGT, 'ldap_TGT': ldap_TGT }) return {'ad_TGT': ad_TGT, 'ldap_TGT': ldap_TGT}
def debug(self, job): """ Download a debug file. """ job.set_progress(0, 'Generating debug file') debug_job = self.middleware.call_sync( 'system.debug_generate', job_on_progress_cb=lambda encoded: job.set_progress( int(encoded['progress']['percent'] * 0.9), encoded['progress'][ 'description'])) standby_debug = None if self.middleware.call_sync('failover.licensed'): try: standby_debug = self.middleware.call_sync( 'failover.call_remote', 'system.debug_generate', [], {'job': True}) except Exception: self.logger.warn('Failed to get debug from standby node', exc_info=True) else: remote_ip = self.middleware.call_sync('failover.remote_ip') url = self.middleware.call_sync( 'failover.call_remote', 'core.download', ['filesystem.get', [standby_debug], 'debug.txz'], )[1] url = f'http://{remote_ip}:6000{url}' # no reason to honor proxy settings in this # method since we're downloading the debug # archive directly across the heartbeat # interface which is point-to-point proxies = {'http': '', 'https': ''} standby_debug = io.BytesIO() with requests.get(url, stream=True, proxies=proxies) as r: for i in r.iter_content(chunk_size=1048576): if standby_debug.tell() > DEBUG_MAX_SIZE * 1048576: raise CallError( f'Standby debug file is bigger than {DEBUG_MAX_SIZE}MiB.' ) standby_debug.write(i) debug_job.wait_sync() if debug_job.error: raise CallError(debug_job.error) job.set_progress(90, 'Preparing debug file for streaming') if standby_debug: # Debug file cannot be big on HA because we put both debugs in memory # so they can be downloaded at once. try: if os.stat( debug_job.result).st_size > DEBUG_MAX_SIZE * 1048576: raise CallError( f'Debug file is bigger than {DEBUG_MAX_SIZE}MiB.') except FileNotFoundError: raise CallError('Debug file was not found, try again.') network = self.middleware.call_sync('network.configuration.config') node = self.middleware.call_sync('failover.node') tario = io.BytesIO() with tarfile.open(fileobj=tario, mode='w') as tar: if node == 'A': my_hostname = network['hostname'] remote_hostname = network['hostname_b'] else: my_hostname = network['hostname_b'] remote_hostname = network['hostname'] tar.add(debug_job.result, f'{my_hostname}.txz') tarinfo = tarfile.TarInfo(f'{remote_hostname}.txz') tarinfo.size = standby_debug.tell() standby_debug.seek(0) tar.addfile(tarinfo, fileobj=standby_debug) tario.seek(0) shutil.copyfileobj(tario, job.pipes.output.w) else: with open(debug_job.result, 'rb') as f: shutil.copyfileobj(f, job.pipes.output.w) job.pipes.output.w.close()
def reattach_device(self): cp = subprocess.Popen(['virsh', '-c', LIBVIRT_URI, 'nodedev-reattach', self.passthru_device()]) stderr = cp.communicate()[1] if cp.returncode: raise CallError(f'Unable to re-attach {self.passthru_device()} PCI device: {stderr.decode()}')
def getacl_posix1e(self, path, simplified): st = os.stat(path) ret = { 'uid': st.st_uid, 'gid': st.st_gid, 'acl': [], 'flags': { 'setuid': bool(st.st_mode & pystat.S_ISUID), 'setgid': bool(st.st_mode & pystat.S_ISGID), 'sticky': bool(st.st_mode & pystat.S_ISVTX), }, 'acltype': ACLType.POSIX1E.name } ret['uid'] = st.st_uid ret['gid'] = st.st_gid gfacl = subprocess.run(['getfacl', '-c', '-n', path], check=False, capture_output=True) if gfacl.returncode != 0: raise CallError( f"Failed to get POSIX1e ACL on path [{path}]: {gfacl.stderr.decode()}" ) # linux output adds extra line to output if it's an absolute path and extra newline at end. entries = gfacl.stdout.decode().splitlines() entries = entries[:-1] for entry in entries: if entry.startswith("#"): continue ace = { "default": False, "tag": None, "id": -1, "perms": { "READ": False, "WRITE": False, "EXECUTE": False, } } tag, id, perms = entry.rsplit(":", 2) ace['perms'].update({ "READ": perms[0].casefold() == "r", "WRITE": perms[1].casefold() == "w", "EXECUTE": perms[2].casefold() == "x", }) if tag.startswith('default'): ace['default'] = True tag = tag[8:] ace['tag'] = tag.upper() if id.isdigit(): ace['id'] = int(id) elif ace['tag'] != 'OTHER': ace['tag'] += '_OBJ' ret['acl'].append(ace) return ret
async def do_create(self, data): """ Create a new user. If `uid` is not provided it is automatically filled with the next one available. `group` is required if `group_create` is false. `password` is required if `password_disabled` is false. Available choices for `shell` can be retrieved with `user.shell_choices`. `attributes` is a general-purpose object for storing arbitrary user information. `smb` specifies whether the user should be allowed access to SMB shares. User willl also automatically be added to the `builtin_users` group. """ verrors = ValidationErrors() if ( not data.get('group') and not data.get('group_create') ) or ( data.get('group') is not None and data.get('group_create') ): verrors.add( 'user_create.group', f'Enter either a group name or create a new group to ' 'continue.', errno.EINVAL ) await self.__common_validation(verrors, data, 'user_create') if data.get('sshpubkey') and not data['home'].startswith('/mnt'): verrors.add( 'user_create.sshpubkey', 'The home directory is not writable. Leave this field blank.' ) verrors.check() groups = data.pop('groups') create = data.pop('group_create') if create: group = await self.middleware.call('group.query', [('group', '=', data['username'])]) if group: group = group[0] else: group = await self.middleware.call('group.create', {'name': data['username'], 'smb': False}) group = (await self.middleware.call('group.query', [('id', '=', group)]))[0] data['group'] = group['id'] else: group = await self.middleware.call('group.query', [('id', '=', data['group'])]) if not group: raise CallError(f'Group {data["group"]} not found') group = group[0] if data['smb']: groups.append((await self.middleware.call('group.query', [('group', '=', 'builtin_users')], {'get': True}))['id']) # Is this a new directory or not? Let's not nuke existing directories, # e.g. /, /root, /mnt/tank/my-dataset, etc ;). new_homedir = False home_mode = data.pop('home_mode') if data['home'] and data['home'] != '/nonexistent': try: try: os.makedirs(data['home'], mode=int(home_mode, 8)) new_homedir = True await self.middleware.call('filesystem.setperm', { 'path': data['home'], 'mode': home_mode, 'uid': data['uid'], 'gid': group['gid'], 'options': {'stripacl': True} }) except FileExistsError: if not os.path.isdir(data['home']): raise CallError( 'Path for home directory already ' 'exists and is not a directory', errno.EEXIST ) # If it exists, ensure the user is owner. await self.middleware.call('filesystem.chown', { 'path': data['home'], 'uid': data['uid'], 'gid': group['gid'], }) except OSError as oe: raise CallError( 'Failed to create the home directory ' f'({data["home"]}) for user: {oe}' ) except Exception: if new_homedir: shutil.rmtree(data['home']) raise if not data.get('uid'): data['uid'] = await self.get_next_uid() pk = None # Make sure pk exists to rollback in case of an error data = await self.user_compress(data) try: await self.__set_password(data) sshpubkey = data.pop('sshpubkey', None) # datastore does not have sshpubkey pk = await self.middleware.call('datastore.insert', 'account.bsdusers', data, {'prefix': 'bsdusr_'}) await self.__set_groups(pk, groups) except Exception: if pk is not None: await self.middleware.call('datastore.delete', 'account.bsdusers', pk) if new_homedir: # Be as atomic as possible when creating the user if # commands failed to execute cleanly. shutil.rmtree(data['home']) raise await self.middleware.call('service.reload', 'user') if data['smb']: await self.__set_smbpasswd(data['username']) if os.path.isdir(SKEL_PATH) and os.path.exists(data['home']): for f in os.listdir(SKEL_PATH): if f.startswith('dot'): dest_file = os.path.join(data['home'], f[3:]) else: dest_file = os.path.join(data['home'], f) if not os.path.exists(dest_file): shutil.copyfile(os.path.join(SKEL_PATH, f), dest_file) await self.middleware.call('filesystem.chown', { 'path': dest_file, 'uid': data['uid'], 'gid': group['gid'], }) data['sshpubkey'] = sshpubkey try: await self.update_sshpubkey(data['home'], data, group['group']) except PermissionError as e: self.logger.warn('Failed to update authorized keys', exc_info=True) raise CallError(f'Failed to update authorized keys: {e}') return pk
def getacl(self, path, simplified): if not os.path.exists(path): raise CallError('Path not found.', errno.ENOENT) return self.getacl_posix1e(path, simplified)
def run(self, job, id, skip_disabled): """ Job to run cronjob task of `id`. """ def __cron_log(line): job.logs_fd.write(line) syslog.syslog(syslog.LOG_INFO, line.decode()) cron_task = self.middleware.call_sync('cronjob.get_instance', id) if skip_disabled and not cron_task['enabled']: raise CallError('Cron job is disabled', errno.EINVAL) cron_cmd = ' '.join( self.middleware.call_sync('cronjob.construct_cron_command', cron_task['schedule'], cron_task['user'], cron_task['command'], cron_task['stdout'], cron_task['stderr'])[7:]) job.set_progress(10, 'Executing Cron Task') syslog.openlog('cron', facility=syslog.LOG_CRON) syslog.syslog(syslog.LOG_INFO, f'({cron_task["user"]}) CMD ({cron_cmd})') cp = run_command_with_user_context(cron_cmd, cron_task['user'], __cron_log) syslog.closelog() job.set_progress(85, 'Executed Cron Task') if cp.stdout: email = (self.middleware.call_sync( 'user.query', [['username', '=', cron_task['user']]], {'get': True}))['email'] stdout = cp.stdout.decode() if email: mail_job = self.middleware.call_sync('mail.send', { 'subject': 'CronTask Run', 'text': stdout, 'to': [email] }) job.set_progress(95, 'Sending mail for Cron Task output') mail_job.wait_sync() if mail_job.error: job.logs_fd.write( f'Failed to send email for CronTask run: {mail_job.error}' .encode()) else: job.set_progress( 95, 'Email for root user not configured. Skipping sending mail.' ) job.logs_fd.write( f'Executed CronTask - {cron_cmd}: {stdout}'.encode()) if cp.returncode: raise CallError( f'CronTask "{cron_cmd}" exited with {cp.returncode} (non-zero) exit status.' ) job.set_progress(100, 'Execution of Cron Task complete.')
def setacl_nfs4(self, job, data): raise CallError('NFSv4 ACLs are not yet implemented.', errno.ENOTSUP)
def get_devices(self, name): try: with libzfs.ZFS() as zfs: return [i.replace('/dev/', '') for i in zfs.get(name).disks] except libzfs.ZFSException as e: raise CallError(str(e), errno.ENOENT)
def setperm(self, job, data): job.set_progress(0, 'Preparing to set permissions.') options = data['options'] mode = data.get('mode', None) uid = -1 if data['uid'] is None else data['uid'] gid = -1 if data['gid'] is None else data['gid'] self._common_perm_path_validate(data['path']) acl_is_trivial = self.middleware.call_sync('filesystem.acl_is_trivial', data['path']) if not acl_is_trivial and not options['stripacl']: raise CallError( f'Non-trivial ACL present on [{data["path"]}]. Option "stripacl" required to change permission.', errno.EINVAL) if mode is not None: mode = int(mode, 8) stripacl = subprocess.run(['setfacl', '-b', data['path']], check=False, capture_output=True) if stripacl.returncode != 0: raise CallError( f"Failed to remove POSIX1e ACL from [{data['path']}]: " f"{stripacl.stderr.decode()}") if mode: os.chmod(data['path'], mode) os.chown(data['path'], uid, gid) if not options['recursive']: job.set_progress(100, 'Finished setting permissions.') return action = 'clone' if mode else 'strip' job.set_progress( 10, f'Recursively setting permissions on {data["path"]}.') if action == 'strip': stripacl = subprocess.run(['setfacl', '-bR', data['path']], check=False, capture_output=True) if stripacl.returncode != 0: raise CallError( f"Failed to remove POSIX1e ACL from [{data['path']}]: " f"{stripacl.stderr.decode()}") if uid != -1 or gid != -1: if gid == -1: chown = subprocess.run( ['chown', '-R', str(uid), data['path']], check=False, capture_output=True) elif uid == -1: chown = subprocess.run( ['chgrp', '-R', str(gid), data['path']], check=False, capture_output=True) else: chown = subprocess.run( ['chown', '-R', f'{uid}:{gid}', data['path']], check=False, capture_output=True) if chown.returncode != 0: raise CallError(f"Failed to chown [{data['path']}]: " f"{chown.stderr.decode()}") chmod = subprocess.run( ['chmod', '-R', str(data.get('mode')), data['path']], check=False, capture_output=True) if chmod.returncode != 0: raise CallError(f"Failed to chmod [{data['path']}]: " f"{chmod.stderr.decode()}") job.set_progress(100, 'Finished setting permissions.')
def common_encryption_checks(self, ds): if not ds.encrypted: raise CallError(f'{id} is not encrypted')
def format(self, disk, swapgb, sync=True): disk_details = self.middleware.call_sync('device.get_disk', disk) if not disk_details: raise CallError(f'Unable to retrieve disk details for {disk}') size = disk_details['size'] if not size: self.logger.error(f'Unable to determine size of {disk}') else: # The GPT header takes about 34KB + alignment, round it to 100 if size - 102400 <= swapgb * 1024 * 1024 * 1024: raise CallError( f'Your disk size must be higher than {swapgb}GB') job = self.middleware.call_sync('disk.wipe', disk, 'QUICK', sync) job.wait_sync() if job.error: raise CallError(f'Failed to wipe disk {disk}: {job.error}') # Calculate swap size. swapsize = swapgb * 1024 * 1024 * 1024 / (disk_details["sectorsize"] or 512) # Round up to nearest whole integral multiple of 128 # so next partition starts at mutiple of 128. swapsize = (int((swapsize + 127) / 128)) * 128 commands = [] if osc.IS_LINUX else [('gpart', 'create', '-s', 'gpt', f'/dev/{disk}')] if swapsize > 0: if osc.IS_LINUX: commands.extend([ ('sgdisk', f'-a{int(4096/disk_details["sectorsize"])}', f'-n1:128:{swapsize}', '-t1:8200', f'/dev/{disk}'), ('sgdisk', '-n2:0:0', '-t2:BF01', f'/dev/{disk}'), ]) else: commands.extend([ ('gpart', 'add', '-a', '4k', '-b', '128', '-t', 'freebsd-swap', '-s', str(swapsize), disk), ('gpart', 'add', '-a', '4k', '-t', 'freebsd-zfs', disk), ]) else: if osc.IS_LINUX: commands.append( ('sgdisk', f'-a{int(4096/disk_details["sectorsize"])}', '-n1:0:0', '-t1:BF01', f'/dev/{disk}'), ) else: commands.append(('gpart', 'add', '-a', '4k', '-b', '128', '-t', 'freebsd-zfs', disk)) # Install a dummy boot block so system gives meaningful message if booting # from the wrong disk. if osc.IS_FREEBSD: commands.append(('gpart', 'bootcode', '-b', '/boot/pmbr-datadisk', f'/dev/{disk}')) # TODO: Let's do the same for linux please ^^^ for command in commands: cp = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) if cp.returncode != 0: raise CallError( f'Unable to GPT format the disk "{disk}": {cp.stderr}') if osc.IS_LINUX: self.middleware.call_sync('device.settle_udev_events') for partition in self.middleware.call_sync('disk.list_partitions', disk): with contextlib.suppress(CallError): # It's okay to suppress this as some partitions might not have it self.middleware.call_sync('zfs.pool.clear_label', partition['path']) if sync: # We might need to sync with reality (e.g. devname -> uuid) self.middleware.call_sync('disk.sync', disk)
async def setup(self, mount, exclude_pool=None): # FIXME: corefile for LINUX if osc.IS_FREEBSD: # We default kern.corefile value await run('sysctl', "kern.corefile='/var/tmp/%N.core'") config = await self.config() dbconfig = await self.middleware.call( 'datastore.config', self._config.datastore, {'prefix': self._config.datastore_prefix}) boot_pool = await self.middleware.call('boot.pool_name') if (not await self.middleware.call('system.is_freenas') and await self.middleware.call('failover.status') == 'BACKUP' and config.get('basename') and config['basename'] != f'{boot_pool}/.system'): try: os.unlink(SYSDATASET_PATH) except OSError: pass return # If the system dataset is configured in a data pool we need to make sure it exists. # In case it does not we need to use another one. if config['pool'] != boot_pool and not await self.middleware.call( 'pool.query', [('name', '=', config['pool'])]): job = await self.middleware.call('systemdataset.update', { 'pool': None, 'pool_exclude': exclude_pool, }) await job.wait() if job.error: raise CallError(job.error) return # If we dont have a pool configure in the database try to find the first data pool # to put it on. if not dbconfig['pool']: pool = None for p in await self.middleware.call('pool.query', [('encrypt', '!=', '2')], {'order_by': ['encrypt']}): if (exclude_pool and p['name'] == exclude_pool) or await self.middleware.call( 'pool.dataset.query', [['name', '=', p['name']], [ 'OR', [['key_format.value', '=', 'PASSPHRASE'], ['locked', '=', True]] ]]): continue if p['is_decrypted']: pool = p break if pool: job = await self.middleware.call('systemdataset.update', {'pool': pool['name']}) await job.wait() if job.error: raise CallError(job.error) return if not config['basename']: if os.path.exists(SYSDATASET_PATH): try: os.rmdir(SYSDATASET_PATH) except Exception: self.logger.debug('Failed to remove system dataset dir', exc_info=True) return config if not config['is_decrypted']: return if await self.__setup_datasets(config['pool'], config['uuid']): # There is no need to wait this to finish # Restarting rrdcached will ensure that we start/restart collectd as well asyncio.ensure_future( self.middleware.call('service.restart', 'rrdcached')) if not os.path.isdir(SYSDATASET_PATH): if os.path.exists(SYSDATASET_PATH): os.unlink(SYSDATASET_PATH) os.makedirs(SYSDATASET_PATH) acltype = await self.middleware.call('zfs.dataset.query', [('id', '=', config['basename'])]) if acltype and acltype[0]['properties']['acltype']['value'] == 'off': await self.middleware.call( 'zfs.dataset.update', config['basename'], {'properties': { 'acltype': { 'value': 'off' } }}, ) if mount: await self.__mount(config['pool'], config['uuid']) corepath = f'{SYSDATASET_PATH}/cores' if os.path.exists(corepath): # FIXME: corefile for LINUX if osc.IS_FREEBSD: # FIXME: sysctl module not working await run('sysctl', f"kern.corefile='{corepath}/%N.core'") os.chmod(corepath, 0o775) await self.__nfsv4link(config) await self.middleware.call('smb.configure') await self.middleware.call('dscache.initialize') return config
async def new_ticket(self, job, data): """ Creates a new ticket for support. This is done using the support proxy API. For FreeNAS it will be created on Redmine and for TrueNAS on SupportSuite. For FreeNAS `criticality`, `environment`, `phone`, `name` and `email` attributes are not required. For TrueNAS `username`, `password` and `type` attributes are not required. """ await self.middleware.call('network.general.will_perform_activity', 'support') job.set_progress(1, 'Gathering data') sw_name = 'freenas' if not await self.middleware.call( 'system.is_enterprise') else 'truenas' if sw_name == 'freenas': required_attrs = ('type', 'username', 'password') else: required_attrs = ('phone', 'name', 'email', 'criticality', 'environment') data['serial'] = (await self.middleware.call('system.dmidecode_info') )['system-serial-number'] license = (await self.middleware.call('system.info'))['license'] if license: data['company'] = license['customer_name'] else: data['company'] = 'Unknown' for i in required_attrs: if i not in data: raise CallError(f'{i} is required', errno.EINVAL) data['version'] = (await self.middleware.call('system.version')).split( '-', 1)[-1] if 'username' in data: data['user'] = data.pop('username') debug = data.pop('attach_debug') type_ = data.get('type') if type_: data['type'] = type_.lower() job.set_progress(20, 'Submitting ticket') try: r = await self.middleware.run_in_thread(lambda: requests.post( f'https://{ADDRESS}/{sw_name}/api/v1.0/ticket', data=json.dumps(data), headers={'Content-Type': 'application/json'}, timeout=INTERNET_TIMEOUT, )) result = r.json() except simplejson.JSONDecodeError: self.logger.debug( f'Failed to decode ticket attachment response: {r.text}') raise CallError('Invalid proxy server response', errno.EBADMSG) except requests.ConnectionError as e: raise CallError(f'Connection error {e}', errno.EBADF) except requests.Timeout: raise CallError('Connection time out', errno.ETIMEDOUT) if r.status_code != 200: self.logger.debug( f'Support Ticket failed ({r.status_code}): {r.text}', r.status_code, r.text) raise CallError('Ticket creation failed, try again later.', errno.EINVAL) if result['error']: raise CallError(result['message'], errno.EINVAL) ticket = result.get('ticketnum') url = result.get('message') if not ticket: raise CallError('New ticket number was not informed', errno.EINVAL) job.set_progress(50, f'Ticket created: {ticket}', extra={'ticket': ticket}) if debug: job.set_progress(60, 'Generating debug file') debug_job = await self.middleware.call( 'system.debug', pipes=Pipes(output=self.middleware.pipe()), ) if await self.middleware.call('failover.licensed'): debug_name = 'debug-{}.tar'.format( time.strftime('%Y%m%d%H%M%S')) else: debug_name = 'debug-{}-{}.txz'.format( socket.gethostname().split('.')[0], time.strftime('%Y%m%d%H%M%S'), ) job.set_progress(80, 'Attaching debug file') t = { 'ticket': ticket, 'filename': debug_name, } if 'user' in data: t['username'] = data['user'] if 'password' in data: t['password'] = data['password'] tjob = await self.middleware.call( 'support.attach_ticket', t, pipes=Pipes(input=self.middleware.pipe()), ) def copy(): try: rbytes = 0 while True: r = debug_job.pipes.output.r.read(1048576) if r == b'': break rbytes += len(r) if rbytes > DEBUG_MAX_SIZE * 1048576: raise CallError('Debug too large to attach', errno.EFBIG) tjob.pipes.input.w.write(r) finally: tjob.pipes.input.w.close() await self.middleware.run_in_thread(copy) await debug_job.wait() await tjob.wait() else: job.set_progress(100) return { 'ticket': ticket, 'url': url, }