class AFPService(SystemServiceService): class Config: service = "afp" datastore_prefix = "afp_srv_" @accepts( Dict( 'afp_update', Bool('guest'), Str('guest_user'), List('bindip', items=[Str('ip', validators=[IpAddress()])]), Int('connections_limit', validators=[Range(min=1, max=65535)]), Bool('homedir_enable'), Dir('homedir'), Str('homename'), Bool('hometimemachine'), Dir('dbpath'), Str('global_aux'), Str('map_acls', enum=["rights", "mode", "none"]), Str('chmod_request', enum=["preserve", "simple", "ignore"]), ), Bool('dry_run')) async def update(self, data, dry_run=False): old = await self.config() new = old.copy() new.update(data) verrors = ValidationErrors() if new["homedir_enable"] and not new["homedir"]: verrors.add("afp_update.homedir", "This field is required for \"Home directories\".") if not new["homedir_enable"] and new["homedir"]: verrors.add("afp_update.homedir_enable", "This field is required for \"Home directories\".") if new["homedir"]: await check_path_resides_within_volume(verrors, self.middleware, "afp_update.homedir", new["homedir"]) if new["dbpath"]: await check_path_resides_within_volume(verrors, self.middleware, "afp_update.dbpath", new["dbpath"]) if verrors: raise verrors if not dry_run: await self._update_service(old, new) return new
class AFPService(SystemServiceService): class Config: service = "afp" datastore_prefix = "afp_srv_" @accepts(Dict( 'afp_update', Bool('guest'), Str('guest_user'), List('bindip', items=[Str('ip', validators=[IpAddress()])]), Int('connections_limit', validators=[Range(min=1, max=65535)]), Dir('dbpath'), Str('global_aux'), Str('map_acls', enum=["rights", "mode", "none"]), Str('chmod_request', enum=["preserve", "simple", "ignore"]), update=True )) async def do_update(self, data): old = await self.config() new = old.copy() new.update(data) verrors = ValidationErrors() if new["dbpath"]: await check_path_resides_within_volume(verrors, self.middleware, "afp_update.dbpath", new["dbpath"]) if verrors: raise verrors await self._update_service(old, new) return new
class TFTPService(SystemServiceService): class Config: service = "tftp" datastore_prefix = "tftp_" @accepts( Dict( 'tftp_update', Dir('directory'), Bool('newfiles'), Int('port', validators=[Range(min=1, max=65535)]), Str('username'), Str('umask', validators=[Match(r"^[0-7]{3}$")]), Str('options'), )) async def update(self, data): old = await self.config() new = old.copy() new.update(data) verrors = ValidationErrors() if new["directory"]: await check_path_resides_within_volume(verrors, self.middleware, "tftp_update.directory", new["directory"]) if verrors: raise verrors await self._update_service(old, new) return new
def test__schema_dir_null(): @accepts(Dir('data', null=True)) def dirnull(self, data): return data self = Mock() assert dirnull(self, None) is None
class TFTPService(SystemServiceService): class Config: service = "tftp" datastore_prefix = "tftp_" cli_namespace = "service.tftp" ENTRY = Dict( 'tftp_entry', Bool('newfiles', required=True), Str('directory', required=True), Str('host', validators=[IpAddress()], required=True), Int('port', validators=[Port()], required=True), Str('options', required=True), Str('umask', required=True, validators=[Match(r'^[0-7]{3}$')]), Str('username', required=True), Int('id', required=True), ) @accepts( Patch( 'tftp_entry', 'tftp_update', ('rm', { 'name': 'id' }), ('replace', Dir('directory')), ('attr', { 'update': True }), )) async def do_update(self, data): """ Update TFTP Service Configuration. `newfiles` when set enables network devices to send files to the system. `username` sets the user account which will be used to access `directory`. It should be ensured `username` has access to `directory`. """ old = await self.config() new = old.copy() new.update(data) verrors = ValidationErrors() if new["directory"]: await check_path_resides_within_volume(verrors, self.middleware, "tftp_update.directory", new["directory"]) if verrors: raise verrors await self._update_service(old, new) return await self.config()
class S3Service(SystemServiceService): class Config: service = "s3" datastore_prefix = "s3_" datastore_extend = "s3.config_extend" @private async def config_extend(self, s3): s3['storage_path'] = s3.pop('disks', None) s3.pop('mode', None) return s3 @accepts(Dict( 's3_update', Str('bindip'), Int('bindport', validators=[Range(min=1, max=65535)]), Str('access_key'), Str('secret_key'), Bool('browser'), Dir('storage_path'), Int('certificate'), update=True, )) async def do_update(self, data): old = await self.config() new = old.copy() new.update(data) verrors = ValidationErrors() for attr, minlen, maxlen in ( ('access_key', 5, 20), ('secret_key', 8, 40), ): curlen = len(new.get(attr, '')) if curlen < minlen or curlen > maxlen: verrors.add( f's3_update.{attr}', f'Attribute should be {minlen} to {maxlen} in length' ) if not new['storage_path'] or not os.path.exists(new['storage_path']): verrors.add('s3_update.storage_path', 'Storage path is required') if verrors: raise verrors new['disks'] = new.pop('storage_path') await self._update_service(old, new) if await self.middleware.call('notifier.mp_get_owner', new['disks']) != 'minio': await self.middleware.call('notifier.winacl_reset', new['disks'], 'minio', 'minio') return await self.config()
class FTPService(SystemServiceService): class Config: service = "ftp" datastore_prefix = "ftp_" datastore_extend = "ftp.ftp_extend" cli_namespace = "service.ftp" @private async def ftp_extend(self, data): if data['ssltls_certificate']: data['ssltls_certificate'] = data['ssltls_certificate']['id'] return data @accepts(Dict( 'ftp_update', Int('port', validators=[Range(min=1, max=65535)]), Int('clients', validators=[Range(min=1, max=10000)]), Int('ipconnections', validators=[Range(min=0, max=1000)]), Int('loginattempt', validators=[Range(min=0, max=1000)]), Int('timeout', validators=[Range(min=0, max=10000)]), Bool('rootlogin'), Bool('onlyanonymous'), Dir('anonpath', null=True), Bool('onlylocal'), Str('banner', max_length=None), Str('filemask', validators=[Match(r"^[0-7]{3}$")]), Str('dirmask', validators=[Match(r"^[0-7]{3}$")]), Bool('fxp'), Bool('resume'), Bool('defaultroot'), Bool('ident'), Bool('reversedns'), Str('masqaddress'), Int('passiveportsmin', validators=[Or(Exact(0), Range(min=1024, max=65535))]), Int('passiveportsmax', validators=[Or(Exact(0), Range(min=1024, max=65535))]), Int('localuserbw', validators=[Range(min=0)]), Int('localuserdlbw', validators=[Range(min=0)]), Int('anonuserbw', validators=[Range(min=0)]), Int('anonuserdlbw', validators=[Range(min=0)]), Bool('tls'), Str('tls_policy', enum=["on", "off", "data", "!data", "auth", "ctrl", "ctrl+data", "ctrl+!data", "auth+data", "auth+!data"]), Bool('tls_opt_allow_client_renegotiations'), Bool('tls_opt_allow_dot_login'), Bool('tls_opt_allow_per_user'), Bool('tls_opt_common_name_required'), Bool('tls_opt_enable_diags'), Bool('tls_opt_export_cert_data'), Bool('tls_opt_no_cert_request'), Bool('tls_opt_no_empty_fragments'), Bool('tls_opt_no_session_reuse_required'), Bool('tls_opt_stdenvvars'), Bool('tls_opt_dns_name_required'), Bool('tls_opt_ip_address_required'), Int('ssltls_certificate', null=True), Str('options', max_length=None), update=True )) async def do_update(self, data): """ Update ftp service configuration. `clients` is an integer value which sets the maximum number of simultaneous clients allowed. It defaults to 32. `ipconnections` is an integer value which shows the maximum number of connections per IP address. It defaults to 0 which equals to unlimited. `timeout` is the maximum client idle time in seconds before client is disconnected. `rootlogin` is a boolean value which when configured to true enables login as root. This is generally discouraged because of the security risks. `onlyanonymous` allows anonymous FTP logins with access to the directory specified by `anonpath`. `banner` is a message displayed to local login users after they successfully authenticate. It is not displayed to anonymous login users. `filemask` sets the default permissions for newly created files which by default are 077. `dirmask` sets the default permissions for newly created directories which by default are 077. `resume` if set allows FTP clients to resume interrupted transfers. `fxp` if set to true indicates that File eXchange Protocol is enabled. Generally it is discouraged as it makes the server vulnerable to FTP bounce attacks. `defaultroot` when set ensures that for local users, home directory access is only granted if the user is a member of group wheel. `ident` is a boolean value which when set to true indicates that IDENT authentication is required. If identd is not running on the client, this can result in timeouts. `masqaddress` is the public IP address or hostname which is set if FTP clients cannot connect through a NAT device. `localuserbw` is a positive integer value which indicates maximum upload bandwidth in KB/s for local user. Default of zero indicates unlimited upload bandwidth ( from the FTP server configuration ). `localuserdlbw` is a positive integer value which indicates maximum download bandwidth in KB/s for local user. Default of zero indicates unlimited download bandwidth ( from the FTP server configuration ). `anonuserbw` is a positive integer value which indicates maximum upload bandwidth in KB/s for anonymous user. Default of zero indicates unlimited upload bandwidth ( from the FTP server configuration ). `anonuserdlbw` is a positive integer value which indicates maximum download bandwidth in KB/s for anonymous user. Default of zero indicates unlimited download bandwidth ( from the FTP server configuration ). `tls` is a boolean value which when set indicates that encrypted connections are enabled. This requires a certificate to be configured first with the certificate service and the id of certificate is passed on in `ssltls_certificate`. `tls_policy` defines whether the control channel, data channel, both channels, or neither channel of an FTP session must occur over SSL/TLS. `tls_opt_enable_diags` is a boolean value when set, logs verbosely. This is helpful when troubleshooting a connection. `options` is a string used to add proftpd(8) parameters not covered by ftp service. """ old = await self.config() new = old.copy() new.update(data) verrors = ValidationErrors() if not ((new["passiveportsmin"] == 0) == (new["passiveportsmax"] == 0)): verrors.add("passiveportsmin", "passiveportsmin and passiveportsmax should be both zero or non-zero") if not ((new["passiveportsmin"] == 0 and new["passiveportsmax"] == 0) or (new["passiveportsmax"] > new["passiveportsmin"])): verrors.add("ftp_update.passiveportsmax", "When specified, should be greater than passiveportsmin") if new["onlyanonymous"]: if not new["anonpath"]: verrors.add("ftp_update.anonpath", "This field is required for anonymous login") else: await check_path_resides_within_volume(verrors, self.middleware, "ftp_update.anonpath", new["anonpath"]) if new["tls"]: if not new["ssltls_certificate"]: verrors.add( "ftp_update.ssltls_certificate", "Please provide a valid certificate id when TLS is enabled" ) else: verrors.extend((await self.middleware.call( "certificate.cert_services_validation", new["ssltls_certificate"], "ftp_update.ssltls_certificate", False ))) if new["masqaddress"]: await resolve_hostname(self.middleware, verrors, "ftp_update.masqaddress", new["masqaddress"]) if verrors: raise verrors await self._update_service(old, new) if not old['tls'] and new['tls']: await self.middleware.call('service.start', 'ssl') return new
class SharingNFSService(CRUDService): class Config: namespace = "sharing.nfs" datastore = "sharing.nfs_share" datastore_prefix = "nfs_" datastore_extend = "sharing.nfs.extend" @accepts( Dict( "sharingnfs_create", List("paths", items=[Dir("path")]), Str("comment"), List("networks", items=[IPAddr("network", cidr=True)]), List("hosts", items=[Str("host")]), Bool("alldirs"), Bool("ro"), Bool("quiet"), Str("maproot_user", required=False, default=None), Str("maproot_group", required=False, default=None), Str("mapall_user", required=False, default=None), Str("mapall_group", required=False, default=None), List("security", items=[ Str("provider", enum=["SYS", "KRB5", "KRB5I", "KRB5P"]) ]), register=True, )) async def do_create(self, data): verrors = ValidationErrors() await self.validate(data, "sharingnfs_create", verrors) if verrors: raise verrors await self.compress(data) paths = data.pop("paths") data["id"] = await self.middleware.call( "datastore.insert", self._config.datastore, data, {"prefix": self._config.datastore_prefix}, ) for path in paths: await self.middleware.call( "datastore.insert", "sharing.nfs_share_path", { "share_id": data["id"], "path": path, }, ) await self.extend(data) await self.middleware.call("service.reload", "nfs") return data @accepts(Int("id"), Patch("sharingnfs_create", "sharingnfs_update", ("attr", { "update": True }))) async def do_update(self, id, data): verrors = ValidationErrors() old = await self.middleware.call( "datastore.query", self._config.datastore, [("id", "=", id)], { "extend": self._config.datastore_extend, "prefix": self._config.datastore_prefix, "get": True }, ) new = old.copy() new.update(data) await self.validate(new, "sharingnfs_update", verrors, old=old) if verrors: raise verrors await self.compress(new) paths = new.pop("paths") await self.middleware.call("datastore.update", self._config.datastore, id, new, {"prefix": self._config.datastore_prefix}) await self.middleware.call("datastore.delete", "sharing.nfs_share_path", [["share_id", "=", id]]) for path in paths: await self.middleware.call( "datastore.insert", "sharing.nfs_share_path", { "share_id": id, "path": path, }, ) await self.extend(new) new["paths"] = paths await self.middleware.call("service.reload", "nfs") return new @accepts(Int("id")) async def do_delete(self, id): await self.middleware.call("datastore.delete", "sharing.nfs_share_path", [["share_id", "=", id]]) await self.middleware.call("datastore.delete", self._config.datastore, id) @private async def validate(self, data, schema_name, verrors, old=None): if not data["paths"]: verrors.add(f"{schema_name}.paths", "At least one path is required") await self.middleware.run_in_thread(self.validate_paths, data, schema_name, verrors) filters = [] if old: filters.append(["id", "!=", old["id"]]) other_shares = await self.middleware.call("sharing.nfs.query", filters) dns_cache = await self.resolve_hostnames( sum([share["hosts"] for share in other_shares], []) + data["hosts"]) await self.middleware.run_in_thread(self.validate_hosts_and_networks, other_shares, data, schema_name, verrors, dns_cache) for k in ["maproot", "mapall"]: if not data[f"{k}_user"] and not data[f"{k}_group"]: pass elif not data[f"{k}_user"] and data[f"{k}_group"]: verrors.add( f"{schema_name}.{k}_user", "This field is required when map group is specified") else: user = await self.middleware.call( "user.query", [("username", "=", data[f"{k}_user"])]) if not user: verrors.add(f"{schema_name}.{k}_user", "User not found") if data[f"{k}_group"]: group = await self.middleware.call( "group.query", [("group", "=", data[f"{k}_group"])]) if not group: verrors.add(f"{schema_name}.{k}_group", "Group not found") if data["maproot_user"] and data["mapall_user"]: verrors.add(f"{schema_name}.mapall_user", "maproot_user disqualifies mapall_user") if data["security"]: nfs_config = await self.middleware.call("nfs.config") if not nfs_config["v4"]: verrors.add(f"{schema_name}.security", "This is not allowed when NFS v4 is disabled") @private def validate_paths(self, data, schema_name, verrors): dev = None is_mountpoint = False for i, path in enumerate(data["paths"]): stat = os.stat(path) if dev is None: dev = stat.st_dev else: if dev != stat.st_dev: verrors.add( f"{schema_name}.paths.{i}", "Paths for a NFS share must reside within the same filesystem" ) parent = os.path.abspath(os.path.join(path, "..")) if os.stat(parent).st_dev != dev: is_mountpoint = True if any( os.path.abspath(p).startswith(parent + "/") for p in data["paths"] if p != path): verrors.add( f"{schema_name}.paths.{i}", "You cannot share a mount point and subdirectories all at once" ) if not is_mountpoint and data["alldirs"]: verrors.add(f"{schema_name}.alldirs", "This option can only be used for datasets") @private async def resolve_hostnames(self, hostnames): hostnames = list(set(hostnames)) async def resolve(hostname): try: return (await asyncio.wait_for( self.middleware.run_in_thread(socket.getaddrinfo, hostname, None), 5))[0][4][0] except Exception as e: self.logger.warning("Unable to resolve host %r: %r", hostname, e) return None resolved_hostnames = await asyncio_map(resolve, hostnames, 8) return dict(zip(hostnames, resolved_hostnames)) @private def validate_hosts_and_networks(self, other_shares, data, schema_name, verrors, dns_cache): explanation = ( ". This is so because /etc/exports does not act like ACL and it is undefined which rule among " "all overlapping networks will be applied.") dev = os.stat(data["paths"][0]).st_dev used_networks = set() for share in other_shares: try: share_dev = os.stat(share["paths"][0]).st_dev except Exception: self.logger.warning("Failed to stat first path for %r", share, exc_info=True) continue if share_dev == dev: for host in share["hosts"]: host = dns_cache[host] if host is None: continue try: network = ipaddress.ip_network(host) except Exception: self.logger.warning("Got invalid host %r", host) continue else: used_networks.add(network) for network in share["networks"]: try: network = ipaddress.ip_network(network, strict=False) except Exception: self.logger.warning("Got invalid network %r", network) continue else: used_networks.add(network) if not share["hosts"] and not share["networks"]: used_networks.add(ipaddress.ip_network("0.0.0.0/0")) used_networks.add(ipaddress.ip_network("::/0")) if share["alldirs"] and data["alldirs"]: verrors.add( f"{schema_name}.alldirs", "This option is only available once per mountpoint") had_explanation = False for i, host in enumerate(data["hosts"]): host = dns_cache[host] if host is None: continue network = ipaddress.ip_network(host) for another_network in used_networks: if network.overlaps(another_network): verrors.add(f"{schema_name}.hosts.{i}", ( f"You can't share same filesystem with overlapping networks {network} and {another_network}" + ("" if had_explanation else explanation))) had_explanation = True used_networks.add(network) had_explanation = False for i, network in enumerate(data["networks"]): network = ipaddress.ip_network(network, strict=False) for another_network in used_networks: if network.overlaps(another_network): verrors.add(f"{schema_name}.networks.{i}", ( f"You can't share same filesystem with overlapping networks {network} and {another_network}" + ("" if had_explanation else explanation))) had_explanation = True used_networks.add(network) if not data["hosts"] and not data["networks"]: if used_networks: verrors.add( f"{schema_name}.networks", (f"You can't share same filesystem with all hosts twice" + ("" if had_explanation else explanation))) @private async def extend(self, data): data["paths"] = [ path["path"] for path in await self.middleware.call( "datastore.query", "sharing.nfs_share_path", [["share_id", "=", data["id"]]]) ] data["networks"] = data.pop("network").split() data["hosts"] = data["hosts"].split() data["security"] = [s.upper() for s in data["security"]] return data @private async def compress(self, data): data["network"] = " ".join(data.pop("networks")) data["hosts"] = " ".join(data["hosts"]) data["security"] = [s.lower() for s in data["security"]] return data
class SharingNFSService(SharingService): path_field = 'paths' share_task_type = 'NFS' class Config: namespace = "sharing.nfs" datastore = "sharing.nfs_share" datastore_prefix = "nfs_" datastore_extend = "sharing.nfs.extend" cli_namespace = "sharing.nfs" async def human_identifier(self, share_task): return ', '.join(share_task[self.path_field]) @private async def sharing_task_determine_locked(self, data, locked_datasets): for path in data[self.path_field]: if await self.middleware.call( 'pool.dataset.path_in_locked_datasets', path, locked_datasets): return True else: return False @accepts( Dict( "sharingnfs_create", List("paths", items=[Dir("path")], empty=False), List("aliases", items=[Str("path", validators=[Match(r"^/.*")])]), Str("comment", default=""), List("networks", items=[IPAddr("network", network=True)]), List("hosts", items=[Str("host")]), Bool("alldirs", default=False), Bool("ro", default=False), Bool("quiet", default=False), Str("maproot_user", required=False, default=None, null=True), Str("maproot_group", required=False, default=None, null=True), Str("mapall_user", required=False, default=None, null=True), Str("mapall_group", required=False, default=None, null=True), List( "security", items=[ Str("provider", enum=["SYS", "KRB5", "KRB5I", "KRB5P"]) ], ), Bool("enabled", default=True), register=True, strict=True, )) async def do_create(self, data): """ Create a NFS Share. `paths` is a list of valid paths which are configured to be shared on this share. `aliases` is a list of aliases for each path (or an empty list if aliases are not used). `networks` is a list of authorized networks that are allowed to access the share having format "network/mask" CIDR notation. If empty, all networks are allowed. `hosts` is a list of IP's/hostnames which are allowed to access the share. If empty, all IP's/hostnames are allowed. `alldirs` is a boolean value which when set indicates that the client can mount any subdirectories of the selected pool or dataset. """ verrors = ValidationErrors() await self.validate(data, "sharingnfs_create", verrors) if verrors: raise verrors await self.compress(data) data["id"] = await self.middleware.call( "datastore.insert", self._config.datastore, data, {"prefix": self._config.datastore_prefix}, ) await self.extend(data) await self._service_change("nfs", "reload") return await self.get_instance(data["id"]) @accepts(Int("id"), Patch("sharingnfs_create", "sharingnfs_update", ("attr", { "update": True }))) async def do_update(self, id, data): """ Update NFS Share of `id`. """ verrors = ValidationErrors() old = await self.get_instance(id) new = old.copy() new.update(data) await self.validate(new, "sharingnfs_update", verrors, old=old) if verrors: raise verrors await self.compress(new) await self.middleware.call("datastore.update", self._config.datastore, id, new, {"prefix": self._config.datastore_prefix}) await self._service_change("nfs", "reload") return await self.get_instance(id) @accepts(Int("id")) async def do_delete(self, id): """ Delete NFS Share of `id`. """ await self.middleware.call("datastore.delete", self._config.datastore, id) await self._service_change("nfs", "reload") @private async def validate(self, data, schema_name, verrors, old=None): if len(data["aliases"]): if not osc.IS_LINUX: verrors.add( f"{schema_name}.aliases", "This field is only supported on SCALE", ) if len(data["aliases"]) != len(data["paths"]): verrors.add( f"{schema_name}.aliases", "This field should be either empty of have the same number of elements as paths", ) if data["alldirs"] and len(data["paths"]) > 1: verrors.add( f"{schema_name}.alldirs", "This option can only be used for shares that contain single path" ) # if any of the `paths` that were passed to us by user are within the gluster volume # mountpoint then we need to pass the `gluster_bypass` kwarg so that we don't raise a # validation error complaining about using a gluster path within the zpool mountpoint bypass = any('.glusterfs' in i for i in data["paths"] + data["aliases"]) # need to make sure that the nfs share is within the zpool mountpoint for idx, i in enumerate(data["paths"]): await check_path_resides_within_volume( verrors, self.middleware, f'{schema_name}.paths.{idx}', i, gluster_bypass=bypass) await self.middleware.run_in_thread(self.validate_paths, data, schema_name, verrors) filters = [] if old: filters.append(["id", "!=", old["id"]]) other_shares = await self.middleware.call("sharing.nfs.query", filters) dns_cache = await self.resolve_hostnames( sum([share["hosts"] for share in other_shares], []) + data["hosts"]) await self.middleware.run_in_thread(self.validate_hosts_and_networks, other_shares, data, schema_name, verrors, dns_cache) for k in ["maproot", "mapall"]: if not data[f"{k}_user"] and not data[f"{k}_group"]: pass elif not data[f"{k}_user"] and data[f"{k}_group"]: verrors.add( f"{schema_name}.{k}_user", "This field is required when map group is specified") else: user = group = None with contextlib.suppress(KeyError): user = await self.middleware.call( 'dscache.get_uncached_user', data[f'{k}_user']) if not user: verrors.add(f"{schema_name}.{k}_user", "User not found") if data[f'{k}_group']: with contextlib.suppress(KeyError): group = await self.middleware.call( 'dscache.get_uncached_group', data[f'{k}_group']) if not group: verrors.add(f"{schema_name}.{k}_group", "Group not found") if data["maproot_user"] and data["mapall_user"]: verrors.add(f"{schema_name}.mapall_user", "maproot_user disqualifies mapall_user") if data["security"]: nfs_config = await self.middleware.call("nfs.config") if not nfs_config["v4"]: verrors.add(f"{schema_name}.security", "This is not allowed when NFS v4 is disabled") @private def validate_paths(self, data, schema_name, verrors): if osc.IS_LINUX: # Ganesha does not have such a restriction, each path is a different share return dev = None for i, path in enumerate(data["paths"]): stat = os.stat(path) if dev is None: dev = stat.st_dev else: if dev != stat.st_dev: verrors.add( f'{schema_name}.paths.{i}', 'Paths for a NFS share must reside within the same filesystem' ) @private async def resolve_hostnames(self, hostnames): hostnames = list(set(hostnames)) async def resolve(hostname): try: return (await asyncio.wait_for( self.middleware.run_in_thread(socket.getaddrinfo, hostname, None), 5))[0][4][0] except Exception as e: self.logger.warning("Unable to resolve host %r: %r", hostname, e) return None resolved_hostnames = await asyncio_map(resolve, hostnames, 8) return dict(zip(hostnames, resolved_hostnames)) @private def validate_hosts_and_networks(self, other_shares, data, schema_name, verrors, dns_cache): dev = os.stat(data["paths"][0]).st_dev used_networks = set() for share in other_shares: try: share_dev = os.stat(share["paths"][0]).st_dev except Exception: self.logger.warning("Failed to stat first path for %r", share, exc_info=True) continue if share_dev == dev: for host in share["hosts"]: host = dns_cache[host] if host is None: continue try: network = ipaddress.ip_network(host) except Exception: self.logger.warning("Got invalid host %r", host) continue else: used_networks.add(network) for network in share["networks"]: try: network = ipaddress.ip_network(network, strict=False) except Exception: self.logger.warning("Got invalid network %r", network) continue else: used_networks.add(network) if not share["hosts"] and not share["networks"]: used_networks.add(ipaddress.ip_network("0.0.0.0/0")) used_networks.add(ipaddress.ip_network("::/0")) for host in set(data["hosts"]): host = dns_cache[host] if host is None: continue network = ipaddress.ip_network(host) if network in used_networks: verrors.add( f"{schema_name}.hosts", f"Another NFS share already exports this dataset for {host}" ) used_networks.add(network) for network in set(data["networks"]): network = ipaddress.ip_network(network, strict=False) if network in used_networks: verrors.add( f"{schema_name}.networks", f"Another NFS share already exports this dataset for {network}" ) used_networks.add(network) if not data["hosts"] and not data["networks"]: if used_networks: verrors.add( f"{schema_name}.networks", "Another NFS share already exports this dataset for some network" ) @private async def extend(self, data): data["networks"] = data.pop("network").split() data["hosts"] = data["hosts"].split() data["security"] = [s.upper() for s in data["security"]] return data @private async def compress(self, data): data["network"] = " ".join(data.pop("networks")) data["hosts"] = " ".join(data["hosts"]) data["security"] = [s.lower() for s in data["security"]] data.pop(self.locked_field, None) return data
class AFPService(SystemServiceService): class Config: service = 'afp' datastore_extend = 'afp.extend' datastore_prefix = 'afp_srv_' @private async def extend(self, afp): for i in ('map_acls', 'chmod_request'): afp[i] = afp[i].upper() return afp @private async def compress(self, afp): for i in ('map_acls', 'chmod_request'): value = afp.get(i) if value: afp[i] = value.lower() return afp @accepts(Dict( 'afp_update', Bool('guest'), Str('guest_user'), List('bindip', items=[Str('ip', validators=[IpAddress()])]), Int('connections_limit', validators=[Range(min=1, max=65535)]), Dir('dbpath'), Str('global_aux', max_length=None), Str('map_acls', enum=['RIGHTS', 'MODE', 'NONE']), Str('chmod_request', enum=['PRESERVE', 'SIMPLE', 'IGNORE']), Str('loglevel', enum=[x.name for x in AFPLogLevel]), update=True )) async def do_update(self, data): """ Update AFP service settings. `bindip` is a list of IPs to bind AFP to. Leave blank (empty list) to bind to all available IPs. `map_acls` defines how to map the effective permissions of authenticated users. RIGHTS - Unix-style permissions MODE - ACLs NONE - Do not map `chmod_request` defines advanced permission control that deals with ACLs. PRESERVE - Preserve ZFS ACEs for named users and groups or POSIX ACL group mask SIMPLE - Change permission as requested without any extra steps IGNORE - Permission change requests are ignored """ old = await self.config() new = old.copy() new.update(data) verrors = ValidationErrors() if new['dbpath']: await check_path_resides_within_volume( verrors, self.middleware, 'afp_update.dbpath', new['dbpath'], ) verrors.check() new = await self.compress(new) await self._update_service(old, new) return await self.config() @accepts() async def bindip_choices(self): """ List of valid choices for IP addresses to which to bind the AFP service. """ return { d['address']: d['address'] for d in await self.middleware.call('interface.ip_in_use') }
class FTPService(SystemServiceService): class Config: service = "ftp" datastore_prefix = "ftp_" datastore_extend = "ftp.ftp_extend" @private async def ftp_extend(self, data): if data['ssltls_certificate']: data['ssltls_certificate'] = data['ssltls_certificate']['id'] return data @accepts( Dict('ftp_update', Int('port', validators=[Range(min=1, max=65535)]), Int('clients', validators=[Range(min=1, max=10000)]), Int('ipconnections', validators=[Range(min=0, max=1000)]), Int('loginattempt', validators=[Range(min=0, max=1000)]), Int('timeout', validators=[Range(min=0, max=10000)]), Bool('rootlogin'), Bool('onlyanonymous'), Dir('anonpath', null=True), Bool('onlylocal'), Str('banner'), Str('filemask', validators=[Match(r"^[0-7]{3}$")]), Str('dirmask', validators=[Match(r"^[0-7]{3}$")]), Bool('fxp'), Bool('resume'), Bool('defaultroot'), Bool('ident'), Bool('reversedns'), Str('masqaddress'), Int('passiveportsmin', validators=[Or(Exact(0), Range(min=1024, max=65535))]), Int('passiveportsmax', validators=[Or(Exact(0), Range(min=1024, max=65535))]), Int('localuserbw', validators=[Range(min=0)]), Int('localuserdlbw', validators=[Range(min=0)]), Int('anonuserbw', validators=[Range(min=0)]), Int('anonuserdlbw', validators=[Range(min=0)]), Bool('tls'), Str('tls_policy', enum=[ "on", "off", "data", "!data", "auth", "ctrl", "ctrl+data", "ctrl+!data", "auth+data", "auth+!data" ]), Bool('tls_opt_allow_client_renegotiations'), Bool('tls_opt_allow_dot_login'), Bool('tls_opt_allow_per_user'), Bool('tls_opt_common_name_required'), Bool('tls_opt_enable_diags'), Bool('tls_opt_export_cert_data'), Bool('tls_opt_no_cert_request'), Bool('tls_opt_no_empty_fragments'), Bool('tls_opt_no_session_reuse_required'), Bool('tls_opt_stdenvvars'), Bool('tls_opt_dns_name_required'), Bool('tls_opt_ip_address_required'), Int('ssltls_certificate', null=True), Str('options'), update=True)) async def do_update(self, data): old = await self.config() new = old.copy() new.update(data) verrors = ValidationErrors() if not ((new["passiveportsmin"] == 0) == (new["passiveportsmax"] == 0)): verrors.add( "passiveportsmin", "passiveportsmin and passiveportsmax should be both zero or non-zero" ) if not ((new["passiveportsmin"] == 0 and new["passiveportsmax"] == 0) or (new["passiveportsmax"] > new["passiveportsmin"])): verrors.add( "ftp_update.passiveportsmax", "When specified, should be greater than passiveportsmin") if new["onlyanonymous"] and not new["anonpath"]: verrors.add("ftp_update.anonpath", "This field is required for anonymous login") if new["anonpath"]: await check_path_resides_within_volume(verrors, self.middleware, "ftp_update.anonpath", new["anonpath"]) if new["tls"]: if not new["ssltls_certificate"]: verrors.add( "ftp_update.ssltls_certificate", "Please provide a valid certificate id when TLS is enabled" ) else: verrors.extend((await self.middleware.call( "certificate.cert_services_validation", new["ssltls_certificate"], "ftp_update.ssltls_certificate", False))) if new["masqaddress"]: await resolve_hostname(self.middleware, verrors, "ftp_update.masqaddress", new["masqaddress"]) if verrors: raise verrors await self._update_service(old, new) if not old['tls'] and new['tls']: await self.middleware.call('service.start', 'ssl') return new
class SharingNFSService(CRUDService): class Config: namespace = "sharing.nfs" datastore = "sharing.nfs_share" datastore_prefix = "nfs_" datastore_extend = "sharing.nfs.extend" @accepts(Dict( "sharingnfs_create", List("paths", items=[Dir("path")], empty=False), Str("comment", default=""), List("networks", items=[IPAddr("network", network=True)], default=[]), List("hosts", items=[Str("host")], default=[]), Bool("alldirs", default=False), Bool("ro", default=False), Bool("quiet", default=False), Str("maproot_user", required=False, default=None, null=True), Str("maproot_group", required=False, default=None, null=True), Str("mapall_user", required=False, default=None, null=True), Str("mapall_group", required=False, default=None, null=True), List( "security", default=[], items=[Str("provider", enum=["SYS", "KRB5", "KRB5I", "KRB5P"])], ), Bool("enabled", default=True), register=True, )) async def do_create(self, data): """ Create a NFS Share. `paths` is a list of valid paths which are configured to be shared on this share. `networks` is a list of authorized networks that are allowed to access the share having format "network/mask" CIDR notation. If empty, all networks are allowed. `hosts` is a list of IP's/hostnames which are allowed to access the share. If empty, all IP's/hostnames are allowed. `alldirs` is a boolean value which when set indicates that the client can mount any subdirectories of the selected pool or dataset. """ verrors = ValidationErrors() await self.validate(data, "sharingnfs_create", verrors) if verrors: raise verrors await self.compress(data) paths = data.pop("paths") data["id"] = await self.middleware.call( "datastore.insert", self._config.datastore, data, { "prefix": self._config.datastore_prefix }, ) for path in paths: await self.middleware.call( "datastore.insert", "sharing.nfs_share_path", { "share_id": data["id"], "path": path, }, ) await self.extend(data) await self._service_change("nfs", "reload") return data @accepts( Int("id"), Patch( "sharingnfs_create", "sharingnfs_update", ("attr", {"update": True}) ) ) async def do_update(self, id, data): """ Update NFS Share of `id`. """ verrors = ValidationErrors() old = await self._get_instance(id) new = old.copy() new.update(data) await self.validate(new, "sharingnfs_update", verrors, old=old) if verrors: raise verrors await self.compress(new) paths = new.pop("paths") await self.middleware.call( "datastore.update", self._config.datastore, id, new, { "prefix": self._config.datastore_prefix } ) await self.middleware.call("datastore.delete", "sharing.nfs_share_path", [["share_id", "=", id]]) for path in paths: await self.middleware.call( "datastore.insert", "sharing.nfs_share_path", { "share_id": id, "path": path, }, ) await self.extend(new) new["paths"] = paths await self._service_change("nfs", "reload") return new @accepts(Int("id")) async def do_delete(self, id): """ Delete NFS Share of `id`. """ await self.middleware.call("datastore.delete", self._config.datastore, id) await self._service_change("nfs", "reload") @private async def validate(self, data, schema_name, verrors, old=None): if data["alldirs"] and len(data["paths"]) > 1: verrors.add(f"{schema_name}.alldirs", "This option can only be used for shares that contain single path") await self.middleware.run_in_thread(self.validate_paths, data, schema_name, verrors) filters = [] if old: filters.append(["id", "!=", old["id"]]) other_shares = await self.middleware.call("sharing.nfs.query", filters) dns_cache = await self.resolve_hostnames( sum([share["hosts"] for share in other_shares], []) + data["hosts"] ) await self.middleware.run_in_thread( self.validate_hosts_and_networks, other_shares, data, schema_name, verrors, dns_cache ) for k in ["maproot", "mapall"]: if not data[f"{k}_user"] and not data[f"{k}_group"]: pass elif not data[f"{k}_user"] and data[f"{k}_group"]: verrors.add(f"{schema_name}.{k}_user", "This field is required when map group is specified") else: user = group = None with contextlib.suppress(KeyError): user = await self.middleware.call('dscache.get_uncached_user', data[f'{k}_user']) if not user: verrors.add(f"{schema_name}.{k}_user", "User not found") if data[f'{k}_group']: with contextlib.suppress(KeyError): group = await self.middleware.call('dscache.get_uncached_group', data[f'{k}_group']) if not group: verrors.add(f"{schema_name}.{k}_group", "Group not found") if data["maproot_user"] and data["mapall_user"]: verrors.add(f"{schema_name}.mapall_user", "maproot_user disqualifies mapall_user") if data["security"]: nfs_config = await self.middleware.call("nfs.config") if not nfs_config["v4"]: verrors.add(f"{schema_name}.security", "This is not allowed when NFS v4 is disabled") @private def validate_paths(self, data, schema_name, verrors): dev = None for i, path in enumerate(data["paths"]): stat = os.stat(path) if dev is None: dev = stat.st_dev else: if dev != stat.st_dev: verrors.add(f"{schema_name}.paths.{i}", "Paths for a NFS share must reside within the same filesystem") @private async def resolve_hostnames(self, hostnames): hostnames = list(set(hostnames)) async def resolve(hostname): try: return ( await asyncio.wait_for(self.middleware.run_in_thread(socket.getaddrinfo, hostname, None), 5) )[0][4][0] except Exception as e: self.logger.warning("Unable to resolve host %r: %r", hostname, e) return None resolved_hostnames = await asyncio_map(resolve, hostnames, 8) return dict(zip(hostnames, resolved_hostnames)) @private def validate_hosts_and_networks(self, other_shares, data, schema_name, verrors, dns_cache): dev = os.stat(data["paths"][0]).st_dev used_networks = set() for share in other_shares: try: share_dev = os.stat(share["paths"][0]).st_dev except Exception: self.logger.warning("Failed to stat first path for %r", share, exc_info=True) continue if share_dev == dev: for host in share["hosts"]: host = dns_cache[host] if host is None: continue try: network = ipaddress.ip_network(host) except Exception: self.logger.warning("Got invalid host %r", host) continue else: used_networks.add(network) for network in share["networks"]: try: network = ipaddress.ip_network(network, strict=False) except Exception: self.logger.warning("Got invalid network %r", network) continue else: used_networks.add(network) if not share["hosts"] and not share["networks"]: used_networks.add(ipaddress.ip_network("0.0.0.0/0")) used_networks.add(ipaddress.ip_network("::/0")) for i, host in enumerate(data["hosts"]): host = dns_cache[host] if host is None: continue network = ipaddress.ip_network(host) if network in used_networks: verrors.add( f"{schema_name}.hosts.{i}", "Another NFS share already exports this dataset for this host" ) used_networks.add(network) for i, network in enumerate(data["networks"]): network = ipaddress.ip_network(network, strict=False) if network in used_networks: verrors.add( f"{schema_name}.networks.{i}", "Another NFS share already exports this dataset for this network" ) used_networks.add(network) if not data["hosts"] and not data["networks"]: if used_networks: verrors.add( f"{schema_name}.networks", "Another NFS share already exports this dataset for some network" ) @private async def extend(self, data): data["paths"] = [path["path"] for path in await self.middleware.call("datastore.query", "sharing.nfs_share_path", [["share_id", "=", data["id"]]])] data["networks"] = data.pop("network").split() data["hosts"] = data["hosts"].split() data["security"] = [s.upper() for s in data["security"]] return data @private async def compress(self, data): data["network"] = " ".join(data.pop("networks")) data["hosts"] = " ".join(data["hosts"]) data["security"] = [s.lower() for s in data["security"]] return data
class TFTPService(SystemServiceService): class Config: service = "tftp" datastore_prefix = "tftp_" cli_namespace = "service.tftp" ENTRY = Dict( 'tftp_entry', Bool('newfiles', required=True), Str('directory', required=True), Str('host', validators=[IpAddress()], required=True), Int('port', validators=[Port()], required=True), Str('options', required=True), Str('umask', required=True, validators=[Match(r'^[0-7]{3}$')]), Str('username', required=True), Int('id', required=True), ) @accepts() @returns(Dict('tftp_host_choices', additional_attrs=True)) async def host_choices(self): """ Return host choices for TFTP service to use. """ return { d['address']: d['address'] for d in await self.middleware.call('interface.ip_in_use', { 'static': True, 'any': True }) } @accepts( Patch( 'tftp_entry', 'tftp_update', ('rm', { 'name': 'id' }), ('replace', Dir('directory')), ('attr', { 'update': True }), )) async def do_update(self, data): """ Update TFTP Service Configuration. `newfiles` when set enables network devices to send files to the system. `username` sets the user account which will be used to access `directory`. It should be ensured `username` has access to `directory`. """ old = await self.config() new = old.copy() new.update(data) verrors = ValidationErrors() if new["directory"]: await check_path_resides_within_volume(verrors, self.middleware, "tftp_update.directory", new["directory"]) if new['host'] not in await self.host_choices(): verrors.add('tftp_update.host', 'Please provide a valid ip address') if verrors: raise verrors await self._update_service(old, new) return await self.config()
class SharingNFSService(CRUDService): class Config: namespace = "sharing.nfs" datastore = "sharing.nfs_share" datastore_prefix = "nfs_" datastore_extend = "sharing.nfs.extend" @accepts( Dict( "sharingnfs_create", List("paths", items=[Dir("path")]), Str("comment"), List("networks", items=[IPAddr("network", cidr=True)]), List("hosts", items=[IPAddr("host")]), Bool("alldirs"), Bool("ro"), Bool("quiet"), Str("maproot_user", required=False), Str("maproot_group", required=False), Str("mapall_user", required=False), Str("mapall_group", required=False), List("security", items=[ Str("provider", enum=["SYS", "KRB5", "KRB5I", "KRB5P"]) ]), register=True, )) async def do_create(self, data): verrors = ValidationErrors() await self.validate(data, "sharingnfs_create", verrors) if verrors: raise verrors await self.compress(data) paths = data.pop("paths") data["id"] = await self.middleware.call( "datastore.insert", self._config.datastore, data, {"prefix": self._config.datastore_prefix}, ) for path in paths: await self.middleware.call( "datastore.insert", "sharing.nfs_share_path", { "share_id": data["id"], "path": path, }, ) await self.extend(data) await self.middleware.call("service.reload", "nfs") return data @accepts(Int("id"), Patch("sharingnfs_create", "sharingnfs_update", ("attr", { "update": True }))) async def do_update(self, id, data): verrors = ValidationErrors() old = await self.middleware.call( "datastore.query", self._config.datastore, [("id", "=", id)], { "extend": self._config.datastore_extend, "prefix": self._config.datastore_prefix, "get": True }, ) new = old.copy() new.update(data) await self.validate(new, "sharingnfs_update", verrors, old=old) if verrors: raise verrors await self.compress(new) paths = new.pop("paths") await self.middleware.call("datastore.update", self._config.datastore, id, new, {"prefix": self._config.datastore_prefix}) await self.middleware.call("datastore.delete", "sharing.nfs_share_path", [["share_id", "=", id]]) for path in paths: await self.middleware.call( "datastore.insert", "sharing.nfs_share_path", { "share_id": id, "path": path, }, ) await self.extend(new) new["paths"] = paths await self.middleware.call("service.reload", "nfs") return new @accepts(Int("id")) async def do_delete(self, id): await self.middleware.call("datastore.delete", "sharing.nfs_share_path", [["share_id", "=", id]]) await self.middleware.call("datastore.delete", self._config.datastore, id) @private async def validate(self, data, schema_name, verrors, old=None): if not data["paths"]: verrors.add(f"{schema_name}.paths", "At least one path is required") await self.middleware.run_in_io_thread(self.validate_paths, data, schema_name, verrors) if not data["networks"]: verrors.add(f"{schema_name}.networks", "At least one network is required") for i, network1 in enumerate(data["networks"]): network1 = ipaddress.ip_network(network1, strict=False) for j, network2 in enumerate(data["networks"]): if j > i: network2 = ipaddress.ip_network(network2, strict=False) if network1.overlaps(network2): verrors.add( f"{schema_name}.network.{j}", "Networks {network1} and {network2} overlap") filters = [] if old: filters.append(["id", "!=", old["id"]]) other_shares = await self.middleware.call("sharing.nfs.query", filters) await self.middleware.run_in_io_thread(self.validate_user_networks, other_shares, data, schema_name, verrors) for k in ["maproot", "mapall"]: if not data[f"{k}_user"] and not data[f"{k}_group"]: pass elif not data[f"{k}_user"] and data[f"{k}_group"]: verrors.add( f"{schema_name}.{k}_user", "This field is required when map group is specified") elif data[f"{k}_user"] and not data[f"{k}_group"]: verrors.add( f"{schema_name}.{k}_group", "This field is required when map user is specified") else: user = await self.middleware.call( "user.query", [("username", "=", data[f"{k}_user"])]) if not user: verrors.add(f"{schema_name}.{k}_user", "User not found") group = await self.middleware.call( "group.query", [("group", "=", data[f"{k}_group"])]) if not group: verrors.add(f"{schema_name}.{k}_group", "Group not found") if data["maproot_user"] and data["mapall_user"]: verrors.add(f"{schema_name}.mapall_user", "maproot_user disqualifies mapall_user") if data["security"]: nfs_config = await self.middleware.call("nfs.config") if not nfs_config["v4"]: verrors.add(f"{schema_name}.security", "This is not allowed when NFS v4 is disabled") @private def validate_paths(self, data, schema_name, verrors): dev = None is_mountpoint = False for i, path in enumerate(data["paths"]): stat = os.stat(path) if dev is None: dev = stat.st_dev else: if dev != stat.st_dev: verrors.add( f"{schema_name}.paths.{i}", "Paths for a NFS share must reside within the same filesystem" ) parent = os.path.abspath(os.path.join(path, "..")) if os.stat(parent).st_dev != dev: is_mountpoint = True if any( os.path.abspath(p).startswith(parent + "/") for p in data["paths"] if p != path): verrors.add( f"{schema_name}.paths.{i}", "You cannot share a mount point and subdirectories all at once" ) if not is_mountpoint and data["alldirs"]: verrors.add(f"{schema_name}.alldirs", "This option can only be used for datasets") @private def validate_user_networks(self, other_shares, data, schema_name, verrors): dev = os.stat(data["paths"][0]).st_dev used_networks = [] for share in other_shares: try: share_dev = os.stat(share["paths"][0]).st_dev except Exception: self.logger.warning("Failed to stat first path for %r", share, exc_info=True) continue used_networks.extend([(network, share_dev) for network in share["networks"]]) if data["alldirs"] and share["alldirs"] and share_dev == dev: verrors.add( f"{schema_name}.alldirs", "This option is only available once per mountpoint") for i, network in enumerate(data["networks"]): network = ipaddress.ip_network(network, strict=False) for other_network, other_dev in used_networks: try: other_network = ipaddress.ip_network(other_network, strict=False) except Exception: self.logger.warning("Got invalid network %r", other_network) continue if network.overlaps(other_network) and dev == other_dev: verrors.add( f"{schema_name}.networks.{i}", f"The network {network} is already being shared and cannot be used twice " "for the same filesystem") @private async def extend(self, data): data["paths"] = [ path["path"] for path in await self.middleware.call( "datastore.query", "sharing.nfs_share_path", [["share_id", "=", data["id"]]]) ] data["networks"] = data.pop("network").split() data["hosts"] = data["hosts"].split() data["security"] = [s.upper() for s in data["security"]] return data @private async def compress(self, data): data["network"] = " ".join(data.pop("networks")) data["hosts"] = " ".join(data["hosts"]) data["security"] = [s.lower() for s in data["security"]] return data
class SharingNFSService(SharingService): share_task_type = 'NFS' class Config: namespace = "sharing.nfs" datastore = "sharing.nfs_share" datastore_prefix = "nfs_" datastore_extend = "sharing.nfs.extend" cli_namespace = "sharing.nfs" ENTRY = Patch( 'sharingnfs_create', 'sharing_nfs_entry', ('add', Int('id')), ('add', Bool('locked')), register=True, ) @accepts( Dict( "sharingnfs_create", Dir("path", required=True), List("aliases", items=[Str("path", validators=[Match(r"^/.*")])]), Str("comment", default=""), List("networks", items=[IPAddr("network", network=True)]), List("hosts", items=[Str("host")]), Bool("ro", default=False), Bool("quiet", default=False), Str("maproot_user", required=False, default=None, null=True), Str("maproot_group", required=False, default=None, null=True), Str("mapall_user", required=False, default=None, null=True), Str("mapall_group", required=False, default=None, null=True), List( "security", items=[ Str("provider", enum=["SYS", "KRB5", "KRB5I", "KRB5P"]) ], ), Bool("enabled", default=True), register=True, strict=True, )) async def do_create(self, data): """ Create a NFS Share. `path` local path to be exported. `aliases` IGNORED, for now. `networks` is a list of authorized networks that are allowed to access the share having format "network/mask" CIDR notation. If empty, all networks are allowed. `hosts` is a list of IP's/hostnames which are allowed to access the share. If empty, all IP's/hostnames are allowed. """ verrors = ValidationErrors() await self.validate(data, "sharingnfs_create", verrors) if verrors: raise verrors await self.compress(data) data["id"] = await self.middleware.call( "datastore.insert", self._config.datastore, data, {"prefix": self._config.datastore_prefix}, ) await self.extend(data) await self._service_change("nfs", "reload") return await self.get_instance(data["id"]) @accepts(Int("id"), Patch("sharingnfs_create", "sharingnfs_update", ("attr", { "update": True }))) async def do_update(self, id, data): """ Update NFS Share of `id`. """ verrors = ValidationErrors() old = await self.get_instance(id) new = old.copy() new.update(data) await self.validate(new, "sharingnfs_update", verrors, old=old) if verrors: raise verrors await self.compress(new) await self.middleware.call("datastore.update", self._config.datastore, id, new, {"prefix": self._config.datastore_prefix}) await self._service_change("nfs", "reload") return await self.get_instance(id) @returns() async def do_delete(self, id): """ Delete NFS Share of `id`. """ await self.middleware.call("datastore.delete", self._config.datastore, id) await self._service_change("nfs", "reload") @private async def validate(self, data, schema_name, verrors, old=None): if len(data["aliases"]): data['aliases'] = [] # This feature was originally intended to be provided by nfs-ganesha # since we no longer have ganesha, planning will need to be made about # how to implement for kernel NFS server. One candidate is using bind mounts, # but this will require careful design and testing. For now we will keep it disabled. """ if len(data["aliases"]) != len(data["paths"]): verrors.add( f"{schema_name}.aliases", "This field should be either empty of have the same number of elements as paths", ) """ # need to make sure that the nfs share is within the zpool mountpoint await check_path_resides_within_volume( verrors, self.middleware, f'{schema_name}.path', data['path'], ) filters = [] if old: filters.append(["id", "!=", old["id"]]) other_shares = await self.middleware.call("sharing.nfs.query", filters) dns_cache = await self.resolve_hostnames( sum([share["hosts"] for share in other_shares], []) + data["hosts"]) await self.middleware.run_in_thread(self.validate_hosts_and_networks, other_shares, data, schema_name, verrors, dns_cache) for k in ["maproot", "mapall"]: if not data[f"{k}_user"] and not data[f"{k}_group"]: pass elif not data[f"{k}_user"] and data[f"{k}_group"]: verrors.add( f"{schema_name}.{k}_user", "This field is required when map group is specified") else: user = group = None with contextlib.suppress(KeyError): user = await self.middleware.call( 'dscache.get_uncached_user', data[f'{k}_user']) if not user: verrors.add(f"{schema_name}.{k}_user", "User not found") if data[f'{k}_group']: with contextlib.suppress(KeyError): group = await self.middleware.call( 'dscache.get_uncached_group', data[f'{k}_group']) if not group: verrors.add(f"{schema_name}.{k}_group", "Group not found") if data["maproot_user"] and data["mapall_user"]: verrors.add(f"{schema_name}.mapall_user", "maproot_user disqualifies mapall_user") v4_sec = list( filter(lambda sec: sec != "SYS", data.get("security", []))) if v4_sec: nfs_config = await self.middleware.call("nfs.config") if not nfs_config["v4"]: verrors.add( f"{schema_name}.security", f"The following security flavor(s) require NFSv4 to be enabled: {','.join(v4_sec)}." ) @private async def resolve_hostnames(self, hostnames): hostnames = list(set(hostnames)) async def resolve(hostname): try: return (await asyncio.wait_for( self.middleware.run_in_thread(socket.getaddrinfo, hostname, None), 5))[0][4][0] except Exception as e: self.logger.warning("Unable to resolve host %r: %r", hostname, e) return None resolved_hostnames = await asyncio_map(resolve, hostnames, 8) return dict(zip(hostnames, resolved_hostnames)) @private def validate_hosts_and_networks(self, other_shares, data, schema_name, verrors, dns_cache): dev = os.stat(data["path"]).st_dev used_networks = set() for share in other_shares: try: share_dev = os.stat(share["path"]).st_dev except Exception: self.logger.warning("Failed to stat path for %r", share, exc_info=True) continue if share_dev == dev: for host in share["hosts"]: host = dns_cache[host] if host is None: continue try: network = ipaddress.ip_network(host) except Exception: self.logger.warning("Got invalid host %r", host) continue else: used_networks.add(network) for network in share["networks"]: try: network = ipaddress.ip_network(network, strict=False) except Exception: self.logger.warning("Got invalid network %r", network) continue else: used_networks.add(network) if not share["hosts"] and not share["networks"]: used_networks.add(ipaddress.ip_network("0.0.0.0/0")) used_networks.add(ipaddress.ip_network("::/0")) for host in set(data["hosts"]): cached_host = dns_cache[host] if cached_host is None: verrors.add(f"{schema_name}.hosts", f"Unable to resolve host {host}") continue network = ipaddress.ip_network(cached_host) if network in used_networks: verrors.add( f"{schema_name}.hosts", f"Another NFS share already exports this dataset for {cached_host}" ) used_networks.add(network) for network in set(data["networks"]): network = ipaddress.ip_network(network, strict=False) if network in used_networks: verrors.add( f"{schema_name}.networks", f"Another NFS share already exports this dataset for {network}" ) used_networks.add(network) if not data["hosts"] and not data["networks"]: if used_networks: verrors.add( f"{schema_name}.networks", "Another NFS share already exports this dataset for some network" ) @private async def extend(self, data): data["networks"] = data.pop("network").split() data["hosts"] = data["hosts"].split() data["security"] = [s.upper() for s in data["security"]] return data @private async def compress(self, data): data["network"] = " ".join(data.pop("networks")) data["hosts"] = " ".join(data["hosts"]) data["security"] = [s.lower() for s in data["security"]] data.pop(self.locked_field, None) return data