def __rquery_to_start_end(self, query): unit = query.get('unit') if unit: verrors = ValidationErrors() for i in ('start', 'end'): if i in query: verrors.add( f'reporting_query.{i}', f'{i!r} should only be used if "unit" attribute is not provided.', ) verrors.check() else: if 'start' not in query: unit = 'HOURLY' else: starttime = query['start'] endtime = query.get('end') or 'now' if unit: unit = unit[0].lower() page = query['page'] starttime = f'end-{page + 1}{unit}' if not page: endtime = 'now' else: endtime = f'now-{page}{unit}' return starttime, endtime
async def do_update(self, data): old = await self.config() new = old.copy() new.update(data) verrors = ValidationErrors() if not new["v4"] and new["v4_v3owner"]: verrors.add("nfs_update.v4_v3owner", "This option requires enabling NFSv4") if new["v4_v3owner"] and new["userd_manage_gids"]: verrors.add( "nfs_update.userd_manage_gids", "This option is incompatible with NFSv3 ownership model for NFSv4") if verrors: raise verrors self.nfs_compress(new) await self._update_service(old, new) self.nfs_extend(new) return new
async def do_update(self, job, data): config = await self.config() new = config.copy() new.update(data) verrors = ValidationErrors() if not await self.middleware.call('zfs.pool.query', [('name', '=', data['pool'])]): verrors.add('sysdataset_update.pool', f'Pool "{data["pool"]}" not found', errno.ENOENT) if verrors: raise verrors new['syslog_usedataset'] = new['syslog'] new['rrd_usedataset'] = new['rrd'] await self.middleware.call('datastore.update', 'system.systemdataset', config['id'], new, {'prefix': 'sys_'}) if 'pool' in data and config['pool'] and data['pool'] != config['pool']: await self.migrate(config['pool'], data['pool']) if config['rrd'] != new['rrd']: # Stop collectd to flush data await self.middleware.call('service.stop', 'collectd') await self.setup() if config['syslog'] != new['syslog']: await self.middleware.call('service.restart', 'syslogd') if config['rrd'] != new['rrd']: await self.rrd_toggle() await self.middleware.call('service.restart', 'collectd') return config
async def validate_data(self, data, schema): verrors = ValidationErrors() pool_pk = data.get('pool') if pool_pk: pool_obj = await self.middleware.call( 'datastore.query', 'storage.volume', [('id', '=', pool_pk)] ) if len(pool_obj) == 0: verrors.add( f'{schema}.pool', 'The specified volume does not exist' ) elif ( 'id' not in data.keys() or ( 'id' in data.keys() and 'original_pool_id' in data.keys() and pool_pk != data['original_pool_id'] ) ): scrub_obj = await self.query(filters=[('pool', '=', pool_pk)]) if len(scrub_obj) != 0: verrors.add( f'{schema}.pool', 'A scrub with this pool already exists' ) return verrors, data
async def common_validation(self, data, schema_name): verrors = ValidationErrors() ip = data.get('ip') if ip: await resolve_hostname(self.middleware, verrors, f'{schema_name}.ip', ip) management_ip = data.get('management_ip') if management_ip and management_ip not in (await self.get_management_ip_choices()): verrors.add( f'{schema_name}.management_ip', 'Please select a valid IP for your TrueNAS system' ) action = data.get('action') if action and action != 'UNINSTALL': if ( not (await self.middleware.call('vcenteraux.config'))['enable_https'] and (await self.middleware.call('system.general.config'))['ui_protocol'].upper() == 'HTTPS' ): verrors.add( f'{schema_name}.action', 'Please enable vCenter plugin over HTTPS' ) return verrors
async def validate_data(self, data, schema): verrors = ValidationErrors() user = data.get('user') if user: # Windows users can have spaces in their usernames # http://www.freebsd.org/cgi/query-pr.cgi?pr=164808 if ' ' in user: verrors.add( f'{schema}.user', 'Usernames cannot have spaces' ) elif not ( await self.middleware.call( 'notifier.get_user_object', user ) ): verrors.add( f'{schema}.user', 'Specified user does not exist' ) return verrors, data
async def do_update(self, data): """ Update Mail Service Configuration. `fromemail` is used as a sending address which the mail server will use for sending emails. `outgoingserver` is the hostname or IP address of SMTP server used for sending an email. `security` is type of encryption desired. `smtp` is a boolean value which when set indicates that SMTP authentication has been enabled and `user`/`pass` are required attributes now. """ config = await self.config() new = config.copy() new.update(data) new['security'] = new['security'].lower() # Django Model compatibility verrors = ValidationErrors() if new['smtp'] and new['user'] == '': verrors.add( 'mail_update.user', 'This field is required when SMTP authentication is enabled', ) self.__password_verify(new['pass'], 'mail_update.pass', verrors) if verrors: raise verrors await self.middleware.call('datastore.update', 'system.email', config['id'], new, {'prefix': 'em_'}) return await self.config()
def do_create(self, data): """ Creates a ZFS dataset. """ verrors = ValidationErrors() if '/' not in data['name']: verrors.add('name', 'You need a full name, e.g. pool/newdataset') if verrors: raise verrors properties = data.get('properties') or {} sparse = properties.pop('sparse', False) params = {} for k, v in data['properties'].items(): params[k] = v try: with libzfs.ZFS() as zfs: pool = zfs.get(data['name'].split('/')[0]) pool.create(data['name'], params, fstype=getattr(libzfs.DatasetType, data['type']), sparse_vol=sparse) except libzfs.ZFSException as e: self.logger.error('Failed to create dataset', exc_info=True) raise CallError(f'Failed to create dataset: {e}')
async def do_update(self, id, data): """ Update idmap to backend mapping by id. """ old = await self._get_instance(id) new = old.copy() new.update(data) new = await self.middleware.call('idmap.common_backend_compress', new) verrors = ValidationErrors() if new['domain'] in [dstype.DS_TYPE_LDAP.value, dstype.DS_TYPE_DEFAULT_DOMAIN.value]: if new['idmap_backend'] not in ['ldap', 'tdb']: verrors.add( 'domaintobackend_create.idmap_backend', f'idmap backend [{new["idmap_backend"]}] is not appropriate for the system domain type {dstype[new["domain"]]}' ) if verrors: raise verrors await self.middleware.call( 'datastore.update', self._config.datastore, id, new, {'prefix': self._config.datastore_prefix} ) updated_entry = await self._get_instance(id) try: await self.middleware.call('idmap.get_or_create_idmap_by_domain', updated_entry['domain']['domain_name']) except Exception as e: self.logger.debug('Failed to generate new idmap backend: %s', e) return updated_entry
async def validate_data(self, data, schema): verrors = ValidationErrors() smart_tests = await self.query(filters=[('type', '=', data['type'])]) configured_disks = [d for test in smart_tests for d in test['disks']] disks_dict = {disk['identifier']: disk['name'] for disk in (await self.middleware.call('disk.query'))} disks = data.get('disks') used_disks = [] invalid_disks = [] for disk in disks: if disk in configured_disks: used_disks.append(disks_dict[disk]) if disk not in disks_dict.keys(): invalid_disks.append(disk) if used_disks: verrors.add( f'{schema}.disks', f'The following disks already have tests for this type: {", ".join(used_disks)}' ) if invalid_disks: verrors.add( f'{schema}.disks', f'The following disks are invalid: {", ".join(invalid_disks)}' ) return verrors
async def _validate(self, data): verrors = ValidationErrors() realms = await self.query() for realm in realms: if realm['realm'].upper() == data['realm'].upper(): verrors.add(f'kerberos_realm', f'kerberos realm with name {realm["realm"]} already exists.') return verrors
async def do_create(self, data): """ Creates a dataset/zvol. `volsize` is required for type=VOLUME and is supposed to be a multiple of the block size. """ verrors = ValidationErrors() if '/' not in data['name']: verrors.add('pool_dataset_create.name', 'You need a full name, e.g. pool/newdataset') else: await self.__common_validation(verrors, 'pool_dataset_create', data, 'CREATE') if verrors: raise verrors props = {} for i, real_name, transform in ( ('atime', None, str.lower), ('casesensitivity', None, str.lower), ('comments', 'org.freenas:description', None), ('compression', None, str.lower), ('copies', None, lambda x: str(x)), ('deduplication', 'dedup', str.lower), ('exec', None, str.lower), ('quota', None, _none), ('readonly', None, str.lower), ('recordsize', None, None), ('refquota', None, _none), ('refreservation', None, _none), ('reservation', None, _none), ('snapdir', None, str.lower), ('sparse', None, None), ('sync', None, str.lower), ('volblocksize', None, None), ('volsize', None, lambda x: str(x)), ): if i not in data: continue name = real_name or i props[name] = data[i] if not transform else transform(data[i]) await self.middleware.call('zfs.dataset.create', { 'name': data['name'], 'type': data['type'], 'properties': props, }) data['id'] = data['name'] await self.middleware.call('zfs.dataset.mount', data['name']) if data['type'] == 'FILESYSTEM': await self.middleware.call( 'notifier.change_dataset_share_type', data['name'], data.get('share_type', 'UNIX').lower() ) return await self._get_instance(data['id'])
async def add_admin_group(self, admin_group=None, check_deferred=False): """ Add a local or directory service group to BUILTIN\\Administrators (S-1-5-32-544) Members of this group have elevated privileges to the Samba server (ability to take ownership of files, override ACLs, view and modify user quotas, and administer the server via the Computer Management MMC Snap-In. Unfortuntely, group membership must be managed via "net groupmap listmem|addmem|delmem", which requires that winbind be running when the commands are executed. In this situation, net command will fail with WBC_ERR_WINBIND_NOT_AVAILABLE. If this error message is returned, then flag for a deferred command retry when service starts. @param-in (admin_group): This is the group to add to BUILTIN\\Administrators. If unset, then look up the value in the config db. @param-in (check_deferred): If this is True, then only perform the group mapping if this has been flagged as in need of deferred setup (i.e. Samba wasn't running when it was initially called). This is to avoid unecessarily calling during service start. """ verrors = ValidationErrors() if check_deferred: is_deferred = await self.middleware.call('cache.has_key', 'SMB_SET_ADMIN') if not is_deferred: self.logger.debug("No cache entry indicating delayed action to add admin_group was found.") return True else: await self.middleware.call('cache.pop', 'SMB_SET_ADMIN') if not admin_group: smb = await self.middleware.call('smb.config') admin_group = smb['admin_group'] # We must use GIDs because wbinfo --name-to-sid expects a domain prefix "FREENAS\user" group = await self.middleware.call("notifier.get_group_object", admin_group) if not group: verrors.add('smb_update.admin_group', f"Failed to validate group: {admin_group}") raise verrors sid = await self.wbinfo_gidtosid(group[2]) if sid == "WBC_ERR_WINBIND_NOT_AVAILABLE": self.logger.debug("Delaying admin group add until winbind starts") await self.middleware.call('cache.put', 'SMB_SET_ADMIN', True) return True must_add_sid = await self.validate_admin_groups(sid) if not must_add_sid: return True proc = await Popen( ['/usr/local/bin/net', 'groupmap', 'addmem', 'S-1-5-32-544', sid], stdout=subprocess.PIPE, stderr=subprocess.PIPE ) output = await proc.communicate() if proc.returncode != 0: raise CallError(f'net groupmap addmem failed: {output[1].decode()}') self.logger.debug(f"Successfully added {admin_group} to BUILTIN\\Administrators") return True
async def do_update(self, id, data): """ Updates a dataset/zvol `id`. """ verrors = ValidationErrors() dataset = await self.middleware.call('pool.dataset.query', [('id', '=', id)]) if not dataset: verrors.add('id', f'{id} does not exist', errno.ENOENT) else: data['type'] = dataset[0]['type'] data['name'] = dataset[0]['name'] if data['type'] == 'VOLUME': data['volblocksize'] = dataset[0]['volblocksize']['value'] await self.__common_validation(verrors, 'pool_dataset_update', data, 'UPDATE') if verrors: raise verrors props = {} for i, real_name, transform, inheritable in ( ('atime', None, str.lower, True), ('comments', 'org.freenas:description', None, False), ('sync', None, str.lower, True), ('compression', None, str.lower, True), ('deduplication', 'dedup', str.lower, True), ('exec', None, str.lower, True), ('quota', None, _none, False), ('refquota', None, _none, False), ('reservation', None, _none, False), ('refreservation', None, _none, False), ('copies', None, None, False), ('snapdir', None, str.lower, True), ('readonly', None, str.lower, True), ('recordsize', None, None, True), ('volsize', None, lambda x: str(x), False), ): if i not in data: continue name = real_name or i if inheritable and data[i] == 'INHERIT': props[name] = {'source': 'INHERIT'} else: props[name] = {'value': data[i] if not transform else transform(data[i])} rv = await self.middleware.call('zfs.dataset.update', id, {'properties': props}) if data['type'] == 'FILESYSTEM' and 'share_type' in data: await self.middleware.call( 'notifier.change_dataset_share_type', id, data['share_type'].lower() ) elif data['type'] == 'VOLUME' and 'volsize' in data: if await self.middleware.call('iscsi.extent.query', [('path', '=', f'zvol/{id}')]): await self.middleware.call('service.reload', 'iscsitarget') return rv
async def _validate(self, data): """ For now validation is limited to checking if we can resolve the hostnames configured for the kdc, admin_server, and kpasswd_server can be resolved by DNS, and if the realm can be resolved by DNS. """ verrors = ValidationErrors() try: base64.b64decode(data['file']) except Exception as e: verrors.add("kerberos.keytab_create", f"Keytab is a not a properly base64-encoded string: [{e}]") return verrors
def _validate(self, schema_name, data): verrors = ValidationErrors() if data["provider"] not in REMOTES: verrors.add(f"{schema_name}.provider", "Invalid provider") else: provider = REMOTES[data["provider"]] attributes_verrors = validate_attributes(provider.credentials_schema, data) verrors.add_child(f"{schema_name}.attributes", attributes_verrors) if verrors: raise verrors
async def validate_data(self, data, schema): verrors = ValidationErrors() driver = data.get('driver') if driver: if driver not in (await self.middleware.call('ups.driver_choices')).keys(): verrors.add( f'{schema}.driver', 'Driver selected does not match local machine\'s driver list' ) identifier = data['identifier'] if identifier: if not re.search(r'^[a-z0-9\.\-_]+$', identifier, re.I): verrors.add( f'{schema}.identifier', 'Use alphanumeric characters, ".", "-" and "_"' ) for field in [field for field in ['monpwd', 'monuser'] if data.get(field)]: if re.search(r'[ #]', data[field], re.I): verrors.add( f'{schema}.{field}', 'Spaces or number signs are not allowed' ) mode = data.get('mode') if mode: if mode == 'MASTER': if not data.get('port'): verrors.add( f'{schema}.port', 'This field is required' ) else: if not data.get('remotehost'): verrors.add( f'{schema}.remotehost', 'This field is required' ) to_emails = data.get('toemail') if to_emails: data['toemail'] = ';'.join(to_emails) else: data['toemail'] = '' data['mode'] = data['mode'].lower() data['shutdown'] = data['shutdown'].lower() return verrors, data
async def do_update(self, data): """ Update default Alert settings. """ old = await self.config() new = old.copy() new.update(data) verrors = ValidationErrors() for k, v in new["classes"].items(): if k not in AlertClass.class_by_name: verrors.add(f"alert_class_update.classes.{k}", "This alert class does not exist") if not isinstance(v, dict): verrors.add(f"alert_class_update.classes.{k}", "Not a dictionary") if "level" in v: if v["level"] not in AlertLevel.__members__: verrors.add(f"alert_class_update.classes.{k}.level", "This alert level does not exist") if "policy" in v: if v["policy"] not in POLICIES: verrors.add(f"alert_class_update.classes.{k}.policy", "This alert policy does not exist") if verrors: raise verrors await self.middleware.call("datastore.update", self._config.datastore, old["id"], new) return new
async def do_update(self, id, data): if not await self.is_loaded(): raise CallError('The ipmi device could not be found') verrors = ValidationErrors() if not data.get('dhcp'): for k in ['ipaddress', 'netmask', 'gateway']: if not data.get(k): verrors.add( f'ipmi_update.{k}', 'This field is required when dhcp is not given' ) if verrors: raise verrors args = ['ipmitool', 'lan', 'set', str(id)] rv = 0 if data.get('dhcp'): rv |= (await run(*args, 'ipsrc', 'dhcp', check=False)).returncode else: rv |= (await run(*args, 'ipsrc', 'static', check=False)).returncode rv |= (await run(*args, 'ipaddr', data['ipaddress'], check=False)).returncode rv |= (await run(*args, 'netmask', data['netmask'], check=False)).returncode rv |= (await run(*args, 'defgw', 'ipaddr', data['gateway'], check=False)).returncode rv |= (await run( *args, 'vlan', 'id', str(data['vlan']) if data.get('vlan') else 'off' )).returncode rv |= (await run(*args, 'access', 'on', check=False)).returncode rv |= (await run(*args, 'auth', 'USER', 'MD2,MD5', check=False)).returncode rv |= (await run(*args, 'auth', 'OPERATOR', 'MD2,MD5', check=False)).returncode rv |= (await run(*args, 'auth', 'ADMIN', 'MD2,MD5', check=False)).returncode rv |= (await run(*args, 'auth', 'CALLBACK', 'MD2,MD5', check=False)).returncode # Setting arp have some issues in some hardwares # Do not fail if setting these couple settings do not work # See #15578 await run(*args, 'arp', 'respond', 'on', check=False) await run(*args, 'arp', 'generate', 'on', check=False) if data.get('password'): rv |= (await run( 'ipmitool', 'user', 'set', 'password', '2', data.get('password'), )).returncode rv |= (await run('ipmitool', 'user', 'enable', '2')).returncode # XXX: according to dwhite, this needs to be executed off the box via # the lanplus interface. # rv |= (await run('ipmitool', 'sol', 'set', 'enabled', 'true', '1')).returncode # ) return rv
async def do_update(self, data): old = await self.config() new = old.copy() new.update(data) verrors = ValidationErrors() if not data['v3'] and not data['community']: verrors.add('snmp_update.community', 'This field is required when SNMPv3 is disabled') if data['v3_authtype'] and not data['v3_password']: verrors.add('snmp_update.v3_password', 'This field is requires when SNMPv3 auth type is specified') if data['v3_password'] and len(data['v3_password']) < 8: verrors.add('snmp_update.v3_password', 'Password must contain at least 8 characters') if data['v3_privproto'] and not data['v3_privpassphrase']: verrors.add('snmp_update.v3_privpassphrase', 'This field is requires when SNMPv3 private protocol is specified') if verrors: raise verrors await self._update_service(old, new) return new
async def _validate(self, schema_name, data, id=None): verrors = ValidationErrors() await self._ensure_unique(verrors, schema_name, "name", data["name"], id) if data["provider"] not in REMOTES: verrors.add(f"{schema_name}.provider", "Invalid provider") else: provider = REMOTES[data["provider"]] attributes_verrors = validate_attributes(provider.credentials_schema, data) verrors.add_child(f"{schema_name}.attributes", attributes_verrors) if verrors: raise verrors
async def _query_periodic_snapshot_tasks(self, ids): verrors = ValidationErrors() query_result = await self.middleware.call("pool.snapshottask.query", [["id", "in", ids]]) snapshot_tasks = [] for i, task_id in enumerate(ids): for task in query_result: if task["id"] == task_id: snapshot_tasks.append(task) break else: verrors.add(str(i), "This snapshot task does not exist") return verrors, snapshot_tasks
async def common_validation(self, data, schema_name): verrors = ValidationErrors() if not data['dow']: verrors.add( f'{schema_name}.dow', 'At least one day must be chosen' ) data['ret_unit'] = data['ret_unit'].lower() data['begin'] = time(*[int(value) for value in data['begin'].split(':')]) data['end'] = time(*[int(value) for value in data['end'].split(':')]) data['byweekday'] = ','.join([str(day) for day in data.pop('dow')]) return data, verrors
async def _validate(self, service, schema_name): verrors = ValidationErrors() factory = ALERT_SERVICES_FACTORIES.get(service["type"]) if factory is None: verrors.add(f"{schema_name}.type", "This field has invalid value") raise verrors try: factory.validate(service.get('attributes', {})) except ValidationErrors as e: verrors.add_child(f"{schema_name}.attributes", e) if verrors: raise verrors
async def wbinfo_gidtosid(self, gid): verrors = ValidationErrors() proc = await Popen( ['/usr/local/bin/wbinfo', '--gid-to-sid', f"{gid}"], stdout=subprocess.PIPE, stderr=subprocess.PIPE ) output = await proc.communicate() if proc.returncode != 0: if "WBC_ERR_WINBIND_NOT_AVAILABLE" in output[1].decode(): return "WBC_ERR_WINBIND_NOT_AVAILABLE" else: verrors.add('smb_update.admin_group', f"Failed to identify Windows SID for group: {output[1].decode()}") raise verrors return output[0].decode().strip()
def validate_attributes(schema, data, additional_attrs=False): verrors = ValidationErrors() schema = Dict("attributes", *schema, additional_attrs=additional_attrs) try: data["attributes"] = schema.clean(data["attributes"]) except Error as e: verrors.add(e.attribute, e.errmsg, e.errno) try: schema.validate(data["attributes"]) except ValidationErrors as e: verrors.extend(e) return verrors
def __password_verify(self, password, schema, verrors=None): if not password: return if verrors is None: verrors = ValidationErrors() # FIXME: smtplib does not support non-ascii password yet # https://github.com/python/cpython/pull/8938 try: password.encode('ascii') except UnicodeEncodeError: verrors.add( schema, 'Only plain text characters (7-bit ASCII) are allowed in passwords. ' 'UTF or composed characters are not allowed.' ) return verrors
async def do_update(self, data): config = await self.config() new = config.copy() new.update(data) new['security'] = new['security'].lower() # Django Model compatibility verrors = ValidationErrors() if new['smtp'] and new['user'] == '': verrors.add('mail_update.user', 'This field is required when SMTP authentication is enabled') if verrors: raise verrors await self.middleware.call('datastore.update', 'system.email', config['id'], new, {'prefix': 'em_'}) return config
async def create_unlocked(self, data): iqns = data.pop("iqns") try: id = await self.middleware.call("datastore.insert", self._config.datastore, data) except IntegrityError: verrors = ValidationErrors() verrors.add("iscsi_host_create.ip", "This IP address already exists", errno.EEXIST) raise verrors await self._set_datastore_iqns(id, iqns) host = await self.get_instance(id) self.hosts[host["ip"]] = host self._set_cache_iqns(id, iqns) return host
async def __validate_fields(self, schema, data): verrors = ValidationErrors() user = data.get('periodic_notifyuser') if user: if not (await self.middleware.call('notifier.get_user_object', user)): verrors.add(f'{schema}.periodic_notifyuser', 'Specified user does not exist') serial_choice = data.get('serialport') if serial_choice: if serial_choice not in await self.serial_port_choices(): verrors.add( f'{schema}.serialport', 'Serial port specified has not been identified by the system' ) return verrors, data
def validate_attributes(schema, data, additional_attrs=False, attr_key="attributes"): from middlewared.schema import Dict, Error from middlewared.service import ValidationErrors verrors = ValidationErrors() schema = Dict("attributes", *schema, additional_attrs=additional_attrs) try: data[attr_key] = schema.clean(data[attr_key]) except Error as e: verrors.add(e.attribute, e.errmsg, e.errno) try: schema.validate(data[attr_key]) except ValidationErrors as e: verrors.extend(e) return verrors
async def validate(self, data): verrors = ValidationErrors() allowed = AllowedEvents.EVENTS.value if data['event'] not in allowed: verrors.add( f'localevent_send.{data["event"]}', f'event: "{data["event"]}" is not allowed', ) vols = await self.middleware.call('gluster.volume.list') if data['name'] not in vols: verrors.add( f'localevent_send.{data["name"]}', 'gluster volume: "{data["name"]}" does not exist', ) verrors.check()
async def _validate(self, service, schema_name): verrors = ValidationErrors() factory = ALERT_SERVICES_FACTORIES.get(service["type"]) if factory is None: verrors.add(f"{schema_name}.type", "This field has invalid value") raise verrors try: factory.validate(service["attributes"]) except ValidationErrors as e: verrors.add_child(f"{schema_name}.attributes", e) validate_settings(verrors, f"{schema_name}.settings", service["settings"]) if verrors: raise verrors
async def validate_aux_params(self, data, schema_name): """ libsmbconf expects to be provided with key-value pairs. """ verrors = ValidationErrors() for entry in data.splitlines(): if entry == '' or entry.startswith(('#', ';')): continue kv = entry.split('=', 1) if len(kv) != 2: verrors.add( f'{schema_name}.auxsmbconf', f'Auxiliary parameters must be in the format of "key = value": {entry}' ) continue verrors.check()
def _validate_legacy(self, data): verrors = ValidationErrors() if data['exclude']: verrors.add( 'exclude', ('Excluding child datasets is not available because this snapshot task is being used in ' 'legacy replication task. Please upgrade your replication tasks to edit this field.'), ) if not data['allow_empty']: verrors.add( 'allow_empty', ('Disallowing empty snapshots is not available because this snapshot task is being used in ' 'legacy replication task. Please upgrade your replication tasks to edit this field.'), ) return verrors
async def pool_lock_pre_check(self, pool, passphrase): verrors = ValidationErrors() # Make sure that this pool is not being used by system dataset service if pool['name'] == ( await self.middleware.call('systemdataset.config'))['pool']: verrors.add( 'id', f'Pool {pool["name"]} contains the system dataset. The system dataset pool cannot be locked.' ) else: if not await self.middleware.call('disk.geli_testkey', pool, passphrase): verrors.add( 'passphrase', 'The entered passphrase was not valid. Please enter the correct passphrase to lock the pool.' ) return verrors
async def validate_data(self, data, schema): verrors = ValidationErrors() driver = data.get('driver') if driver: if driver not in ( await self.middleware.call('ups.driver_choices')).keys(): verrors.add( f'{schema}.driver', 'Driver selected does not match local machine\'s driver list' ) identifier = data['identifier'] if identifier: if not re.search(r'^[a-z0-9\.\-_]+$', identifier, re.I): verrors.add(f'{schema}.identifier', 'Use alphanumeric characters, ".", "-" and "_"') for field in [ field for field in ['monpwd', 'monuser'] if data.get(field) ]: if re.search(r'[ #]', data[field], re.I): verrors.add(f'{schema}.{field}', 'Spaces or number signs are not allowed') mode = data.get('mode') if mode == 'MASTER': for field in filter(lambda f: not data[f], ['port', 'driver']): verrors.add(f'{schema}.{field}', 'This field is required') else: if not data.get('remotehost'): verrors.add(f'{schema}.remotehost', 'This field is required') to_emails = data.get('toemail') if to_emails: data['toemail'] = ';'.join(to_emails) else: data['toemail'] = '' data['mode'] = data['mode'].lower() data['shutdown'] = data['shutdown'].lower() return verrors, data
async def do_update(self, id, data): """ Updates a dataset/zvol `id`. """ verrors = ValidationErrors() dataset = await self.middleware.call('pool.dataset.query', [('id', '=', id)]) if not dataset: verrors.add('id', f'{id} does not exist', errno.ENOENT) else: data['type'] = dataset[0]['type'] await self.__common_validation(verrors, 'pool_dataset_update', data, 'UPDATE') if verrors: raise verrors props = {} for i, real_name, transform, inheritable in ( ('atime', None, str.lower, True), ('comments', 'org.freenas:description', None, False), ('sync', None, str.lower, True), ('compression', None, str.lower, True), ('deduplication', 'dedup', str.lower, True), ('quota', None, _none, False), ('refquota', None, _none, False), ('reservation', None, _none, False), ('refreservation', None, _none, False), ('copies', None, None, False), ('snapdir', None, str.lower, True), ('readonly', None, str.lower, True), ('recordsize', None, None, True), ('volsize', None, lambda x: str(x), False), ): if i not in data: continue name = real_name or i if inheritable and data[i] == 'INHERIT': props[name] = {'source': 'INHERIT'} else: props[name] = {'value': data[i] if not transform else transform(data[i])} return await self.middleware.call('zfs.dataset.update', id, {'properties': props})
def do_create(self, data): """ Creates a ZFS dataset. """ verrors = ValidationErrors() if '/' not in data['name']: verrors.add('name', 'You need a full name, e.g. pool/newdataset') if verrors: raise verrors properties = data.get('properties') or {} sparse = properties.pop('sparse', False) params = {} for k, v in data['properties'].items(): params[k] = v # it's important that we set xattr=sa for various # performance reasons related to ea handling # pool.dataset.create already sets this by default # so mirror the behavior here if data['type'] == 'FILESYSTEM' and 'xattr' not in params: params['xattr'] = 'sa' try: with libzfs.ZFS() as zfs: pool = zfs.get(data['name'].split('/')[0]) pool.create( data['name'], params, fstype=getattr(libzfs.DatasetType, data['type']), sparse_vol=sparse, create_ancestors=data['create_ancestors'], ) except libzfs.ZFSException as e: self.logger.error('Failed to create dataset', exc_info=True) raise CallError(f'Failed to create dataset: {e}') else: return data
async def do_update(self, id, data): old = await self._get_instance(id) # signedby is changed back to integer from a dict old['signedby'] = old['signedby']['id'] if old.get( 'signedby') else None new = old.copy() new.update(data) verrors = ValidationErrors() # TODO: THIS WILL BE REMOVED IN 11.3 - WO DON'T WANT TO ALLOW UPDATES TO THE CERTIFICATE FIELD if new['type'] != CERT_TYPE_CSR and data.get('certificate'): verrors.add('certificate_update.certificate', 'Certificate field cannot be updated') elif data.get('certificate'): verrors = await self.validate_common_attributes( new, 'certificate_update') if not verrors: new['type'] = CERT_TYPE_EXISTING if new['name'] != old['name']: await validate_cert_name(self.middleware, data['name'], self._config.datastore, verrors, 'certificate_update.name') if verrors: raise verrors new['san'] = ' '.join(new.pop('san', []) or []) await self.middleware.call('datastore.update', self._config.datastore, id, new, {'prefix': self._config.datastore_prefix}) await self.middleware.call('service.start', 'ix-ssl', {'onetime': False}) return await self._get_instance(id)
async def do_update(self, data): config = await self.config() new = config.copy() new.update(data) new['security'] = new['security'].lower() # Django Model compatibility verrors = ValidationErrors() if new['smtp'] and new['user'] == '': verrors.add( 'mail_update.user', 'This field is required when SMTP authentication is enabled') if verrors: raise verrors await self.middleware.call('datastore.update', 'system.email', config['id'], new, {'prefix': 'em_'}) return config
async def permission(self, id, data): path = (await self._get_instance(id))['mountpoint'] user = data.get('user', None) group = data.get('group', None) mode = data.get('mode', None) recursive = data.get('recursive', False) acl = data['acl'] verrors = ValidationErrors() if (acl == 'UNIX' or acl == 'MAC') and mode is None: verrors.add('pool_dataset_permission.mode', 'This field is required') if verrors: raise verrors await self.middleware.call('notifier.mp_change_permission', path, user, group, mode, recursive, acl.lower()) return data
def do_update(self, snap_id, data): verrors = ValidationErrors() props = data['user_properties_update'] for index, prop in enumerate(props): if prop.get('remove') and 'value' in prop: verrors.add( f'snapshot_update.user_properties_update.{index}.remove', 'Must not be set when value is specified' ) verrors.check() try: with libzfs.ZFS() as zfs: snap = zfs.get_snapshot(snap_id) user_props = self.middleware.call_sync('pool.dataset.get_create_update_user_props', props, True) self.middleware.call_sync('zfs.dataset.update_zfs_object_props', user_props, snap) except libzfs.ZFSException as e: raise CallError(str(e)) else: return self.middleware.call_sync('zfs.snapshot.get_instance', snap_id)
async def resolve_hostnames(self, hostnames): """ Takes a list of hostnames to be asynchronously resolved to their respective IP address. """ hostnames = list(set(hostnames)) verrors = ValidationErrors() avail_ips = await self.middleware.call('gluster.peer.ips_available') results = await asyncio.gather( *[self._resolve_hostname(host, avail_ips) for host in hostnames]) ips = [] for host, result in zip(hostnames, results): if result['error']: verrors.add(f'resolve_hostname.{host}', result['error']) else: ips.append(result['ip']) verrors.check() return list(set(ips))
async def do_update(self, job, data): config = await self.config() new = config.copy() new.update(data) verrors = ValidationErrors() if new['pool'] and not await self.middleware.call( 'zfs.pool.query', [('name', '=', new['pool'])] ): verrors.add('sysdataset_update.pool', f'Pool "{new["pool"]}" not found', errno.ENOENT) elif not new['pool']: for pool in await self.middleware.call('pool.query'): if data.get('pool_exclude') == pool['name']: continue new['pool'] = pool['name'] break else: new['pool'] = 'freenas-boot' verrors.check() new['syslog_usedataset'] = new['syslog'] new['rrd_usedataset'] = new['rrd'] await self.middleware.call('datastore.update', 'system.systemdataset', config['id'], new, {'prefix': 'sys_'}) if config['pool'] != new['pool']: await self.migrate(config['pool'], new['pool']) if config['rrd'] != new['rrd']: # Stop collectd to flush data await self.middleware.call('service.stop', 'collectd') await self.setup() if config['syslog'] != new['syslog']: await self.middleware.call('service.restart', 'syslogd') if config['rrd'] != new['rrd']: await self.rrd_toggle() await self.middleware.call('service.restart', 'collectd') return await self.config()
async def validate_data(self, data, schema): verrors = ValidationErrors() pool_pk = data.get('pool') if pool_pk: pool_obj = await self.middleware.call('datastore.query', 'storage.volume', [('id', '=', pool_pk)]) if len(pool_obj) == 0: verrors.add(f'{schema}.pool', 'The specified volume does not exist') elif ('id' not in data.keys() or ('id' in data.keys() and 'original_pool_id' in data.keys() and pool_pk != data['original_pool_id'])): scrub_obj = await self.query(filters=[('volume_id', '=', pool_pk)]) if len(scrub_obj) != 0: verrors.add(f'{schema}.pool', 'A scrub with this pool already exists') else: verrors.add(f'{schema}.pool', 'This field is required') month = data.get('month') if not month: verrors.add(f'{schema}.month', 'This field is required') elif len(month) == 12: data['month'] = '*' else: data['month'] = ','.join(month) dayweek = data.get('dayweek') if not dayweek: verrors.add(f'{schema}.dayweek', 'This field is required') elif len(dayweek) == 7: data['dayweek'] = '*' else: data['dayweek'] = ','.join(dayweek) return verrors, data
async def validate_data(self, data, schema): verrors = ValidationErrors() pool_pk = data.get('pool') if pool_pk: pool_obj = await self.middleware.call('datastore.query', 'storage.volume', [('id', '=', pool_pk)]) if len(pool_obj) == 0: verrors.add(f'{schema}.pool', 'The specified volume does not exist') elif ('id' not in data.keys() or ('id' in data.keys() and 'original_pool_id' in data.keys() and pool_pk != data['original_pool_id'])): scrub_obj = await self.query(filters=[('pool', '=', pool_pk)]) if len(scrub_obj) != 0: verrors.add(f'{schema}.pool', 'A scrub with this pool already exists') return verrors, data
async def do_create(self, data): """ Create a new filesystem ACL template. """ verrors = ValidationErrors() if len(data['acl']) == 0: verrors.add( "filesystem_acltemplate_create.acl", "At least one ACL entry must be specified." ) await self.validate_acl(data, "filesystem_acltemplate_create.acl", verrors) verrors.check() data['builtin'] = False data['id'] = await self.middleware.call( 'datastore.insert', self._config.datastore, data, {'prefix': self._config.datastore_prefix} ) return await self._get_instance(data['id'])
async def common_validation(self, data, schema_name): verrors = ValidationErrors() await self.validate_path_field(data, schema_name, verrors) for entity in ('user', 'group'): value = data.get(entity) try: await self.middleware.call(f'{entity}.get_{entity}_obj', {f'{entity}name': value}) except Exception: verrors.add(f'{schema_name}.{entity}', f'Please specify a valid {entity}') verrors.check() data['hostsallow'] = ' '.join(data['hostsallow']) data['hostsdeny'] = ' '.join(data['hostsdeny']) data['mode'] = data['mode'].lower() return data
async def validate_data(self, data, schema): verrors = ValidationErrors() user = data.get('user') if user: # Windows users can have spaces in their usernames # http://www.freebsd.org/cgi/query-pr.cgi?pr=164808 if ' ' in user: verrors.add( f'{schema}.user', 'Usernames cannot have spaces' ) else: user_data = None with contextlib.suppress(KeyError): user_data = await self.middleware.call('dscache.get_uncached_user', user) if not user_data: verrors.add( f'{schema}.user', 'Specified user does not exist' ) command = data.get('command') if not command: verrors.add( f'{schema}.command', 'Please specify a command for cronjob task.' ) return verrors, data
async def validate_credentials(middleware, data): # We would like to validate the following bits: # 1) script exists and is executable # 2) user exists # 3) User can access the script in question verrors = ValidationErrors() try: await middleware.call('user.get_user_obj', {'username': data['user']}) except KeyError: verrors.add('user', f'Unable to locate {data["user"]!r} user') await check_path_resides_within_volume(verrors, middleware, 'script', data['script']) try: can_access = await middleware.call( 'filesystem.can_access_as_user', data['user'], data['script'], {'execute': True} ) except CallError as e: verrors.add('script', f'Unable to validate script: {e}') else: if not can_access: verrors.add('user', f'{data["user"]!r} user does not has permission to execute the script') verrors.check() return data
async def __validate_fields(self, schema, data): verrors = ValidationErrors() user = data.get('periodic_notifyuser') if user: if not ( await self.middleware.call( 'notifier.get_user_object', user ) ): verrors.add( f'{schema}.periodic_notifyuser', 'Specified user does not exist' ) serial_choice = data.get('serialport') if data.get('serialconsole'): if not serial_choice: verrors.add( f'{schema}.serialport', 'Please specify a serial port when serial console option is checked' ) else: data['serialport'] = serial_choice = hex( int(serial_choice) ) if serial_choice.isdigit() else serial_choice if serial_choice not in await self.serial_port_choices(): verrors.add( f'{schema}.serialport', 'Serial port specified has not been identified by the system' ) return verrors, data
def validate_extensions(self, extensions_data, schema): # We do not need to validate some extensions like `AuthorityKeyIdentifier`. # They are generated from the cert/ca's public key contents. So we skip these. skip_extension = ['AuthorityKeyIdentifier'] verrors = ValidationErrors() for extension in filter( lambda v: v[1]['enabled'] and v[0] not in skip_extension, extensions_data.items()): klass = getattr(x509.extensions, extension[0]) try: klass(*get_extension_params(extension)) except Exception as e: verrors.add( f'{schema}.{extension[0]}', f'Please provide valid values for {extension[0]}: {e}') if extensions_data['KeyUsage']['enabled'] and extensions_data[ 'KeyUsage']['key_cert_sign']: if not extensions_data['BasicConstraints'][ 'enabled'] or not extensions_data['BasicConstraints']['ca']: verrors.add( f'{schema}.BasicConstraints', 'Please enable ca when key_cert_sign is set in KeyUsage as per RFC 5280.' ) if extensions_data['ExtendedKeyUsage'][ 'enabled'] and not extensions_data['ExtendedKeyUsage'][ 'usages']: verrors.add( f'{schema}.ExtendedKeyUsage.usages', 'Please specify at least one USAGE for this extension.') return verrors
async def validate_data(self, data, schema_name): verrors = ValidationErrors() await resolve_hostname(self.middleware, verrors, f'{schema_name}.hostname', data['hostname']) if data['filesystem'] not in ( await self.middleware.call('pool.filesystem_choices')): verrors.add(f'{schema_name}.filesystem', 'Invalid ZFS filesystem') datastore = data.get('datastore') try: ds = await self.middleware.run_in_thread( self.get_datastores, { 'hostname': data.get('hostname'), 'username': data.get('username'), 'password': data.get('password'), }) if data.get('datastore') not in ds: verrors.add( f'{schema_name}.datastore', f'Datastore "{datastore}" not found on the server') except Exception as e: verrors.add(f'{schema_name}.datastore', 'Failed to connect: ' + str(e)) if verrors: raise verrors
async def do_update(self, id, data): """ update filesystem ACL template with `id`. """ old = await self.get_instance(id) new = old.copy() new.update(data) verrors = ValidationErrors() if old['builtin']: verrors.add("filesystem_acltemplate_update.builtin", "built-in ACL templates may not be changed") if new['name'] != old['name']: name_exists = bool(await self.query([('name', '=', new['name'])])) if name_exists: verrors.add("filesystem_acltemplate_update.name", f"{data['name']}: name is not unique") if len(new['acl']) == 0: verrors.add("filesystem_acltemplate_update.acl", "At least one ACL entry must be specified.") await self.validate_acl(new, "filesystem_acltemplate_update.acl", verrors) verrors.check() await self.middleware.call('datastore.update', self._config.datastore, id, new, {'prefix': self._config.datastore_prefix}) return await self.get_instance(id)
async def do_update(self, data): """ Update default Alert settings. .. examples(rest):: Set ClassName's level to LEVEL and policy to POLICY. Reset settings for other alert classes. { "classes": { "ClassName": { "level": "LEVEL", "policy": "POLICY" } } } """ old = await self.config() new = old.copy() new.update(data) verrors = ValidationErrors() for k, v in new["classes"].items(): if k not in AlertClass.class_by_name: verrors.add(f"alert_class_update.classes.{k}", "This alert class does not exist") if not isinstance(v, dict): verrors.add(f"alert_class_update.classes.{k}", "Not a dictionary") if "level" in v: if v["level"] not in AlertLevel.__members__: verrors.add(f"alert_class_update.classes.{k}.level", "This alert level does not exist") if "policy" in v: if v["policy"] not in POLICIES: verrors.add(f"alert_class_update.classes.{k}.policy", "This alert policy does not exist") if verrors: raise verrors await self.middleware.call("datastore.update", self._config.datastore, old["id"], new) return await self.config()
async def _validate_libdefaults(self, libdefaults): verrors = ValidationErrors() for line in libdefaults.splitlines(): param = line.split('=') if len(param) == 2: validated_param = list( filter(lambda x: param[0].strip() in (x.value)[0], KRB_LibDefaults)) if not validated_param: verrors.add( 'kerberos_libdefaults', f'{param[0]} is an invalid libdefaults parameter.') continue try: await self._validate_param_type({ 'ptype': (validated_param[0]).value[1], 'value': param[1].strip() }) except Exception as e: verrors.add('kerberos_libdefaults', f'{param[0]} has invalid value: {e.errmsg}.') else: verrors.add('kerberos_libdefaults', f'{line} is an invalid libdefaults parameter.') return verrors
def update_zfs_object_props(self, properties, zfs_object): verrors = ValidationErrors() for k, v in properties.items(): # If prop already exists we just update it, # otherwise create a user property prop = zfs_object.properties.get(k) if v.get('source') == 'INHERIT': if not prop: verrors.add( f'properties.{k}', 'Property does not exist and cannot be inherited') else: if not any(i in v for i in ('parsed', 'value')): verrors.add( f'properties.{k}', '"value" or "parsed" must be specified when setting a property' ) if not prop and ':' not in k: verrors.add(f'properties.{k}', 'User property needs a colon (:) in its name') verrors.check() try: zfs_object.update_properties(properties) except libzfs.ZFSException as e: raise CallError(f'Failed to update properties: {e!r}')