コード例 #1
0
class ACLBase(ServicePartBase):

    @accepts(
        Dict(
            'filesystem_acl',
            Str('path', required=True),
            Int('uid', null=True, default=None, validators=[Range(min=-1, max=2147483647)]),
            Int('gid', null=True, default=None, validators=[Range(min=-1, max=2147483647)]),
            OROperator(
                List(
                    'nfs4_acl',
                    items=[Dict(
                        'nfs4_ace',
                        Str('tag', enum=['owner@', 'group@', 'everyone@', 'USER', 'GROUP']),
                        Int('id', null=True, validators=[Range(min=-1, max=2147483647)]),
                        Str('type', enum=['ALLOW', 'DENY']),
                        Dict(
                            'perms',
                            Bool('READ_DATA'),
                            Bool('WRITE_DATA'),
                            Bool('APPEND_DATA'),
                            Bool('READ_NAMED_ATTRS'),
                            Bool('WRITE_NAMED_ATTRS'),
                            Bool('EXECUTE'),
                            Bool('DELETE_CHILD'),
                            Bool('READ_ATTRIBUTES'),
                            Bool('WRITE_ATTRIBUTES'),
                            Bool('DELETE'),
                            Bool('READ_ACL'),
                            Bool('WRITE_ACL'),
                            Bool('WRITE_OWNER'),
                            Bool('SYNCHRONIZE'),
                            Str('BASIC', enum=['FULL_CONTROL', 'MODIFY', 'READ', 'TRAVERSE']),
                        ),
                        Dict(
                            'flags',
                            Bool('FILE_INHERIT'),
                            Bool('DIRECTORY_INHERIT'),
                            Bool('NO_PROPAGATE_INHERIT'),
                            Bool('INHERIT_ONLY'),
                            Bool('INHERITED'),
                            Str('BASIC', enum=['INHERIT', 'NOINHERIT']),
                        ),
                        register=True
                    )],
                    register=True
                ),
                List(
                    'posix1e_acl',
                    items=[Dict(
                        'posix1e_ace',
                        Bool('default', default=False),
                        Str('tag', enum=['USER_OBJ', 'GROUP_OBJ', 'USER', 'GROUP', 'OTHER', 'MASK']),
                        Int('id', default=-1, validators=[Range(min=-1, max=2147483647)]),
                        Dict(
                            'perms',
                            Bool('READ', default=False),
                            Bool('WRITE', default=False),
                            Bool('EXECUTE', default=False),
                        ),
                        register=True
                    )],
                    register=True
                ),
                name='dacl',
            ),
            Dict(
                'nfs41_flags',
                Bool('autoinherit', default=False),
                Bool('protected', default=False),
            ),
            Str('acltype', enum=[x.name for x in ACLType], null=True),
            Dict(
                'options',
                Bool('stripacl', default=False),
                Bool('recursive', default=False),
                Bool('traverse', default=False),
                Bool('canonicalize', default=True)
            )
        )
    )
    @returns()
    @job(lock="perm_change")
    def setacl(self, job, data):
        """
        Set ACL of a given path. Takes the following parameters:
        `path` full path to directory or file.

        Paths on clustered volumes may be specifed with the path prefix
        `CLUSTER:<volume name>`. For example, to list directories
        in the directory 'data' in the clustered volume `smb01`, the
        path should be specified as `CLUSTER:smb01/data`.

        `dacl` ACL entries. Formatting depends on the underlying `acltype`. NFS4ACL requires
        NFSv4 entries. POSIX1e requires POSIX1e entries.

        `uid` the desired UID of the file user. If set to None (the default), then user is not changed.

        `gid` the desired GID of the file group. If set to None (the default), then group is not changed.

        `recursive` apply the ACL recursively

        `traverse` traverse filestem boundaries (ZFS datasets)

        `strip` convert ACL to trivial. ACL is trivial if it can be expressed as a file mode without
        losing any access rules.

        `canonicalize` reorder ACL entries so that they are in concanical form as described
        in the Microsoft documentation MS-DTYP 2.4.5 (ACL). This only applies to NFSv4 ACLs.

        For case of NFSv4 ACLs  USER_OBJ, GROUP_OBJ, and EVERYONE with owner@, group@, everyone@ for
        consistency with getfacl and setfacl. If one of aforementioned special tags is used, 'id' must
        be set to None.

        An inheriting empty everyone@ ACE is appended to non-trivial ACLs in order to enforce Windows
        expectations regarding permissions inheritance. This entry is removed from NT ACL returned
        to SMB clients when 'ixnas' samba VFS module is enabled.
        """

    @accepts(
        Str('path'),
        Bool('simplified', default=True),
        Bool('resolve_ids', default=False),
    )
    @returns(Dict(
        'truenas_acl',
        Str('path'),
        Bool('trivial'),
        Str('acltype', enum=[x.name for x in ACLType], null=True),
        OROperator(
            Ref('nfs4_acl'),
            Ref('posix1e_acl'),
            name='acl'
        )
    ))
    def getacl(self, path, simplified, resolve_ids):
        """
        Return ACL of a given path. This may return a POSIX1e ACL or a NFSv4 ACL. The acl type is indicated
        by the `acltype` key.

        `simplified` - effect of this depends on ACL type on underlying filesystem. In the case of
        NFSv4 ACLs simplified permissions and flags are returned for ACL entries where applicable.
        NFSv4 errata below. In the case of POSIX1E ACls, this setting has no impact on returned ACL.

        `resolve_ids` - adds additional `who` key to each ACL entry, that converts the numeric id to
        a user name or group name. In the case of owner@ and group@ (NFSv4) or USER_OBJ and GROUP_OBJ
        (POSIX1E), st_uid or st_gid will be converted from stat() return for file. In the case of
        MASK (POSIX1E), OTHER (POSIX1E), everyone@ (NFSv4), key `who` will be included, but set to null.
        In case of failure to resolve the id to a name, `who` will be set to null. This option should
        only be used if resolving ids to names is required.

        Errata about ACLType NFSv4:

        `simplified` returns a shortened form of the ACL permset and flags where applicable. If permissions
        have been simplified, then the `perms` object will contain only a single `BASIC` key with a string
        describing the underlying permissions set.

        `TRAVERSE` sufficient rights to traverse a directory, but not read contents.

        `READ` sufficient rights to traverse a directory, and read file contents.

        `MODIFIY` sufficient rights to traverse, read, write, and modify a file.

        `FULL_CONTROL` all permissions.

        If the permisssions do not fit within one of the pre-defined simplified permissions types, then
        the full ACL entry will be returned.
        """

    @accepts(
        Dict(
            'filesystem_ownership',
            Str('path', required=True),
            Int('uid', null=True, default=None, validators=[Range(min=-1, max=2147483647)]),
            Int('gid', null=True, default=None, validators=[Range(min=-1, max=2147483647)]),
            Dict(
                'options',
                Bool('recursive', default=False),
                Bool('traverse', default=False)
            )
        )
    )
    @returns()
    @job(lock="perm_change")
    def chown(self, job, data):
        """
        Change owner or group of file at `path`.

        `uid` and `gid` specify new owner of the file. If either
        key is absent or None, then existing value on the file is not
        changed.

        `recursive` performs action recursively, but does
        not traverse filesystem mount points.

        If `traverse` and `recursive` are specified, then the chown
        operation will traverse filesystem mount points.
        """

    @accepts(
        Dict(
            'filesystem_permission',
            Str('path', required=True),
            UnixPerm('mode', null=True),
            Int('uid', null=True, default=None, validators=[Range(min=-1, max=2147483647)]),
            Int('gid', null=True, default=None, validators=[Range(min=-1, max=2147483647)]),
            Dict(
                'options',
                Bool('stripacl', default=False),
                Bool('recursive', default=False),
                Bool('traverse', default=False),
            )
        )
    )
    @returns()
    @job(lock="perm_change")
    def setperm(self, job, data):
        """
        Set unix permissions on given `path`.

        Paths on clustered volumes may be specifed with the path prefix
        `CLUSTER:<volume name>`. For example, to list directories
        in the directory 'data' in the clustered volume `smb01`, the
        path should be specified as `CLUSTER:smb01/data`.

        If `mode` is specified then the mode will be applied to the
        path and files and subdirectories depending on which `options` are
        selected. Mode should be formatted as string representation of octal
        permissions bits.

        `uid` the desired UID of the file user. If set to None (the default), then user is not changed.

        `gid` the desired GID of the file group. If set to None (the default), then group is not changed.

        `stripacl` setperm will fail if an extended ACL is present on `path`,
        unless `stripacl` is set to True.

        `recursive` remove ACLs recursively, but do not traverse dataset
        boundaries.

        `traverse` remove ACLs from child datasets.

        If no `mode` is set, and `stripacl` is True, then non-trivial ACLs
        will be converted to trivial ACLs. An ACL is trivial if it can be
        expressed as a file mode without losing any access rules.

        """

    @accepts(Str('path', required=False, default=''))
    @returns(List('acl_choices', items=[Str("choice")]))
    async def default_acl_choices(self, path):
        """
        `DEPRECATED`
        Returns list of names of ACL templates. Wrapper around
        filesystem.acltemplate.query.
        """

    @accepts(
        Str('acl_type', default='POSIX_OPEN'),
        Str('share_type', default='NONE', enum=['NONE', 'SMB', 'NFS']),
    )
    @returns(OROperator(Ref('nfs4_acl'), Ref('posix1e_acl'), name='acl'))
    async def get_default_acl(self, acl_type, share_type):
        """
コード例 #2
0
ファイル: kerberos.py プロジェクト: wesleywwf/freenas
class KerberosService(TDBWrapConfigService):
    tdb_defaults = {
        "id": 1,
        "appdefaults_aux": "",
        "libdefaults_aux": ""
    }

    class Config:
        service = "kerberos"
        datastore = 'directoryservice.kerberossettings'
        datastore_prefix = "ks_"
        cli_namespace = "directory_service.kerberos.settings"

    @accepts(Dict(
        'kerberos_settings_update',
        Str('appdefaults_aux', max_length=None),
        Str('libdefaults_aux', max_length=None),
        update=True
    ))
    async def do_update(self, data):
        """
        `appdefaults_aux` add parameters to "appdefaults" section of the krb5.conf file.

        `libdefaults_aux` add parameters to "libdefaults" section of the krb5.conf file.
        """
        verrors = ValidationErrors()

        old = await self.config()
        new = old.copy()
        new.update(data)
        verrors.add_child(
            'kerberos_settings_update',
            await self._validate_appdefaults(new['appdefaults_aux'])
        )
        verrors.add_child(
            'kerberos_settings_update',
            await self._validate_libdefaults(new['libdefaults_aux'])
        )
        verrors.check()

        await super().do_update(data)

        await self.middleware.call('etc.generate', 'kerberos')
        return await self.config()

    @private
    @accepts(Dict(
        'kerberos-options',
        Str('ccache', enum=[x.name for x in krb5ccache], default=krb5ccache.SYSTEM.name),
        register=True
    ))
    async def _klist_test(self, data):
        """
        Returns false if there is not a TGT or if the TGT has expired.
        """
        krb_ccache = krb5ccache[data['ccache']]
        klist = await run(['klist', '-s', '-c', krb_ccache.value], check=False)
        if klist.returncode != 0:
            return False

        return True

    @private
    async def check_ticket(self):
        valid_ticket = await self._klist_test()
        if not valid_ticket:
            raise CallError("Kerberos ticket is required.", errno.ENOKEY)

        return

    @private
    async def _validate_param_type(self, data):
        supported_validation_types = [
            'boolean',
            'cctype',
            'etypes',
            'keytab',
        ]
        if data['ptype'] not in supported_validation_types:
            return

        if data['ptype'] == 'boolean':
            if data['value'].upper() not in ['YES', 'TRUE', 'NO', 'FALSE']:
                raise CallError(f'[{data["value"]}] is not boolean')

        if data['ptype'] == 'etypes':
            for e in data['value'].split(' '):
                try:
                    KRB_ETYPE(e)
                except Exception:
                    raise CallError(f'[{e}] is not a supported encryption type')

        if data['ptype'] == 'cctype':
            available_types = ['FILE', 'MEMORY', 'DIR']
            if data['value'] not in available_types:
                raise CallError(f'[{data["value"]}] is an unsupported cctype. '
                                f'Available types are {", ".join(available_types)}. '
                                'This parameter is case-sensitive')

        if data['ptype'] == 'keytab':
            try:
                keytab(data['value'])
            except Exception:
                raise CallError(f'{data["value"]} is an unsupported keytab path')

    @private
    async def _validate_appdefaults(self, appdefaults):
        verrors = ValidationErrors()
        for line in appdefaults.splitlines():
            param = line.split('=')
            if len(param) == 2 and (param[1].strip())[0] != '{':
                validated_param = list(filter(
                    lambda x: param[0].strip() in (x.value)[0], KRB_AppDefaults
                ))

                if not validated_param:
                    verrors.add(
                        'kerberos_appdefaults',
                        f'{param[0]} is an invalid appdefaults parameter.'
                    )
                    continue

                try:
                    await self._validate_param_type({
                        'ptype': (validated_param[0]).value[1],
                        'value': param[1].strip()
                    })
                except Exception as e:
                    verrors.add(
                        'kerberos_appdefaults',
                        f'{param[0]} has invalid value: {e.errmsg}.'
                    )
                    continue

        return verrors

    @private
    async def _validate_libdefaults(self, libdefaults):
        verrors = ValidationErrors()
        for line in libdefaults.splitlines():
            param = line.split('=')
            if len(param) == 2:
                validated_param = list(filter(
                    lambda x: param[0].strip() in (x.value)[0], KRB_LibDefaults
                ))

                if not validated_param:
                    verrors.add(
                        'kerberos_libdefaults',
                        f'{param[0]} is an invalid libdefaults parameter.'
                    )
                    continue

                try:
                    await self._validate_param_type({
                        'ptype': (validated_param[0]).value[1],
                        'value': param[1].strip()
                    })
                except Exception as e:
                    verrors.add(
                        'kerberos_libdefaults',
                        f'{param[0]} has invalid value: {e.errmsg}.'
                    )

            else:
                verrors.add('kerberos_libdefaults', f'{line} is an invalid libdefaults parameter.')

        return verrors

    @private
    @accepts(Dict(
        "get-kerberos-creds",
        Str("dstype", required=True, enum=[x.name for x in DSType]),
        OROperator(
            Dict(
                'ad_parameters',
                Str('bindname'),
                Str('bindpw'),
                Str('domainname'),
                Str('kerberos_principal')
            ),
            Dict(
                'ldap_parameters',
                Str('binddn'),
                Str('bindpw'),
                Int('kerberos_realm'),
            ),
            name='conf',
            required=True
        )
    ))
    async def get_cred(self, data):
        '''
        Get kerberos cred from directory services config to use for `do_kinit`.
        '''
        conf = data['conf']
        if conf['kerberos_principal']:
            return {'kerberos_principal': conf['kerberos_principal']}

        dstype = DSType[data['dstype']]
        if dstype is DSType.DS_TYPE_ACTIVEDIRECTORY:
            return {
                'username': f'{conf["bindname"]}@{conf["domainname"].upper()}',
                'password': conf['bindpw']
            }

        krb_realm = await self.middleware.call(
            'kerberos.realm.query',
            [('id', '=', conf['kerberos_realm'])],
            {'get': True}
        )
        bind_cn = (conf['binddn'].split(','))[0].split("=")
        return {
            'username': f'{bind_cn[1]}@{krb_realm["realm"]}',
            'password': conf['bindpw']
        }

    @private
    @accepts(Dict(
        'do_kinit',
        OROperator(
            Dict(
                'kerberos_username_password',
                Str('username', required=True),
                Str('password', required=True, private=True),
                register=True
            ),
            Dict(
                'kerberos_keytab',
                Str('kerberos_principal', required=True),
            ),
            name='krb5_cred',
            required=True,
        ),
        Patch(
            'kerberos-options',
            'kinit-options',
            ('add', {'name': 'renewal_period', 'type': 'int', 'default': 7}),
        )
    ))
    async def do_kinit(self, data):
        ccache = krb5ccache[data['kinit-options']['ccache']]
        cmd = ['kinit', '-r', str(data['kinit-options']['renewal_period']), '-c', ccache.value]
        creds = data['krb5_cred']
        has_principal = 'kerberos_principal' in creds

        if has_principal:
            cmd.extend(['-k', creds['kerberos_principal']])
            kinit = await run(cmd, check=False)
            if kinit.returncode != 0:
                raise CallError(f"kinit with principal [{creds['kerberos_principal']}] "
                                f"failed: {kinit.stderr.decode()}")
            return

        cmd.append(creds['username'])
        kinit = await Popen(
            cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE
        )

        output = await kinit.communicate(input=creds['password'].encode())
        if kinit.returncode != 0:
            raise CallError(f"kinit with password failed: {output[1].decode()}")

        return True

    @private
    async def _kinit(self):
        """
        For now we only check for kerberos realms explicitly configured in AD and LDAP.
        """
        ad = await self.middleware.call('activedirectory.config')
        ldap = await self.middleware.call('ldap.config')
        await self.middleware.call('etc.generate', 'kerberos')
        payload = {}

        if ad['enable']:
            payload = {
                'dstype': DSType.DS_TYPE_ACTIVEDIRECTORY.name,
                'conf': {
                    'bindname': ad['bindname'],
                    'bindpw': ad['bindpw'],
                    'domainname': ad['domainname'],
                    'kerberos_principal': ad['kerberos_principal'],
                }
            }

        if ldap['enable'] and ldap['kerberos_realm']:
            payload = {
                'dstype': DSType.DS_TYPE_LDAP.name,
                'conf': {
                    'binddn': ldap['binddn'],
                    'bindpw': ldap['bindpw'],
                    'kerberos_realm': ldap['kerberos_realm'],
                }
            }

        if not payload:
            return

        cred = await self.get_cred(payload)
        await self.do_kinit({'krb5_cred': cred})

    @private
    async def parse_klist(self, data):
        ad = data.get("ad")
        ldap = data.get("ldap")
        klistin = data.get("klistin")
        tickets = klistin.splitlines()
        default_principal = None
        tlen = len(tickets)

        if ad['enable']:
            dstype = DSType.DS_TYPE_ACTIVEDIRECTORY
        elif ldap['enable']:
            dstype = DSType.DS_TYPE_LDAP
        else:
            return {"ad_TGT": [], "ldap_TGT": []}

        parsed_klist = []
        for idx, e in enumerate(tickets):
            if e.startswith('Default'):
                default_principal = (e.split(':')[1]).strip()
            if e and e[0].isdigit():
                d = e.split("  ")
                issued = time.strptime(d[0], "%m/%d/%y %H:%M:%S")
                expires = time.strptime(d[1], "%m/%d/%y %H:%M:%S")
                client = default_principal
                server = d[2]
                flags = None
                etype = None
                next_two = [idx + 1, idx + 2]
                for i in next_two:
                    if i >= tlen:
                        break
                    if tickets[i][0].isdigit():
                        break
                    if tickets[i].startswith("\tEtype"):
                        etype = tickets[i].strip()
                        break
                    if tickets[i].startswith("\trenew"):
                        flags = tickets[i].split("Flags: ")[1]
                        continue

                    extra = tickets[i].split(", ", 1)
                    flags = extra[0].strip()
                    etype = extra[1].strip()

                parsed_klist.append({
                    'issued': issued,
                    'expires': expires,
                    'client': client,
                    'server': server,
                    'etype': etype,
                    'flags': flags,
                })

        return {
            "ad_TGT": parsed_klist if dstype == DSType.DS_TYPE_ACTIVEDIRECTORY else [],
            "ldap_TGT": parsed_klist if dstype == DSType.DS_TYPE_LDAP else [],
        }

    @private
    async def _get_cached_klist(self):
        """
        Try to get retrieve cached kerberos tgt info. If it hasn't been cached,
        perform klist, parse it, put it in cache, then return it.
        """
        if await self.middleware.call('cache.has_key', 'KRB_TGT_INFO'):
            return (await self.middleware.call('cache.get', 'KRB_TGT_INFO'))
        ad = await self.middleware.call('activedirectory.config')
        ldap = await self.middleware.call('ldap.config')
        ad_TGT = []
        ldap_TGT = []
        parsed_klist = {}
        if not ad['enable'] and not ldap['enable']:
            return {'ad_TGT': ad_TGT, 'ldap_TGT': ldap_TGT}
        if not ad['enable'] and not ldap['kerberos_realm']:
            return {'ad_TGT': ad_TGT, 'ldap_TGT': ldap_TGT}

        if not await self.status():
            await self.start()
        try:
            klist = await asyncio.wait_for(
                run(['klist', '-ef'], check=False, stdout=subprocess.PIPE),
                timeout=10.0
            )
        except Exception as e:
            await self.stop()
            raise CallError("Attempt to list kerberos tickets failed with error: %s", e)

        if klist.returncode != 0:
            await self.stop()
            raise CallError(f'klist failed with error: {klist.stderr.decode()}')

        klist_output = klist.stdout.decode()

        parsed_klist = await self.parse_klist({
            "klistin": klist_output,
            "ad": ad,
            "ldap": ldap,
        })

        if parsed_klist['ad_TGT'] or parsed_klist['ldap_TGT']:
            await self.middleware.call('cache.put', 'KRB_TGT_INFO', parsed_klist)

        return parsed_klist

    @private
    async def renew(self):
        """
        Compare timestamp of cached TGT info with current timestamp. If we're within 5 minutes
        of expire time, renew the TGT via 'kinit -R'.
        """
        tgt_info = await self._get_cached_klist()
        ret = True

        must_renew = False
        must_reinit = False
        if not tgt_info['ad_TGT'] and not tgt_info['ldap_TGT']:
            must_reinit = True

        if tgt_info['ad_TGT']:
            permitted_buffer = datetime.timedelta(minutes=5)
            current_time = datetime.datetime.now()
            for entry in tgt_info['ad_TGT']:
                tgt_expiry_time = datetime.datetime.fromtimestamp(time.mktime(entry['expires']))
                delta = tgt_expiry_time - current_time
                if datetime.timedelta(minutes=0) > delta:
                    must_reinit = True
                    break
                if permitted_buffer > delta:
                    must_renew = True
                    break

        if tgt_info['ldap_TGT']:
            permitted_buffer = datetime.timedelta(minutes=5)
            current_time = datetime.datetime.now()
            for entry in tgt_info['ldap_TGT']:
                tgt_expiry_time = datetime.datetime.fromtimestamp(time.mktime(entry['expires']))
                delta = tgt_expiry_time - current_time
                if datetime.timedelta(minutes=0) > delta:
                    must_reinit = True
                    break
                if permitted_buffer > delta:
                    must_renew = True
                    break

        if must_renew and not must_reinit:
            try:
                kinit = await asyncio.wait_for(run(['kinit', '-R'], check=False), timeout=15)
                if kinit.returncode != 0:
                    raise CallError(f'kinit -R failed with error: {kinit.stderr.decode()}')
                self.logger.debug('Successfully renewed kerberos TGT')
                await self.middleware.call('cache.pop', 'KRB_TGT_INFO')
            except asyncio.TimeoutError:
                self.logger.debug('Attempt to renew kerberos TGT failed after 15 seconds.')

        if must_reinit:
            ret = await self.start()
            await self.middleware.call('cache.pop', 'KRB_TGT_INFO')

        return ret

    @private
    async def status(self):
        """
        Experience in production environments has indicated that klist can hang
        indefinitely. Fail if we hang for more than 10 seconds. This should force
        a kdestroy and new attempt to kinit (depending on why we are checking status).
        _klist_test will return false if there is not a TGT or if the TGT has expired.
        """
        try:
            ret = await asyncio.wait_for(self._klist_test(), timeout=10.0)
            return ret
        except asyncio.TimeoutError:
            self.logger.debug('kerberos ticket status check timed out after 10 seconds.')
            return False

    @private
    @accepts(Ref('kerberos-options'))
    async def kdestroy(self, data):
        kdestroy = await run(['kdestroy', '-c', krb5ccache[data['ccache']].value], check=False)
        if kdestroy.returncode != 0:
            raise CallError(f'kdestroy failed with error: {kdestroy.stderr.decode()}')

        return

    @private
    async def stop(self):
        await self.middleware.call('cache.pop', 'KRB_TGT_INFO')
        await self.kdestroy()
        return True

    @private
    async def start(self, realm=None, kinit_timeout=30):
        """
        kinit can hang because it depends on DNS. If it has not returned within
        30 seconds, it is safe to say that it has failed.
        """
        await self.middleware.call('etc.generate', 'kerberos')
        try:
            await asyncio.wait_for(self._kinit(), timeout=kinit_timeout)
        except asyncio.TimeoutError:
            raise CallError(f'Timed out hung kinit after [{kinit_timeout}] seconds')
コード例 #3
0
class IdmapDomainService(TDBWrapCRUDService):

    tdb_defaults = [
        {
            "id": 1,
            "name": "DS_TYPE_ACTIVEDIRECTORY",
            "dns_domain_name": None,
            "range_low": 90000001,
            "range_high": 200000001,
            "idmap_backend": "AUTORID",
            "options": {
                "rangesize": 10000000
            },
            "certificate": None
        },
        {
            "id": 2,
            "name": "DS_TYPE_LDAP",
            "dns_domain_name": None,
            "range_low": 10000,
            "range_high": 90000000,
            "idmap_backend": "LDAP",
            "options": {
                "ldap_base_dn": "",
                "ldap_user_dn": "",
                "ldap_url": "",
                "ssl": "OFF"
            },
            "certificate": None
        },
        {
            "id": 5,
            "name": "DS_TYPE_DEFAULT_DOMAIN",
            "dns_domain_name": None,
            "range_low": 90000001,
            "range_high": 100000000,
            "idmap_backend": "TDB",
            "options": {},
            "certificate": None
        }
    ]

    ENTRY = Patch(
        'idmap_domain_create', 'idmap_domain_entry',
        ('add', Int('id')),
    )

    class Config:
        datastore = 'directoryservice.idmap_domain'
        datastore_prefix = 'idmap_domain_'
        namespace = 'idmap'
        datastore_extend = 'idmap.idmap_extend'
        cli_namespace = 'directory_service.idmap'

    @private
    async def idmap_extend(self, data):
        if data.get('idmap_backend'):
            data['idmap_backend'] = data['idmap_backend'].upper()

        opt_enums = ['ssl', 'linked_service']
        if data.get('options'):
            for i in opt_enums:
                if data['options'].get(i):
                    data['options'][i] = data['options'][i].upper()

        return data

    @private
    async def idmap_compress(self, data):
        opt_enums = ['ssl', 'linked_service']
        if data.get('options'):
            for i in opt_enums:
                if data['options'].get(i):
                    data['options'][i] = data['options'][i].lower()

        data['idmap_backend'] = data['idmap_backend'].lower()

        return data

    @private
    async def get_next_idmap_range(self):
        """
        Increment next high range by 100,000,000 ids. This number has
        to accomodate the highest available rid value for a domain.
        Configured idmap ranges _must_ not overlap.
        """
        domains = await self.query()
        sorted_idmaps = sorted(domains, key=lambda domain: domain['range_high'])
        low_range = sorted_idmaps[-1]['range_high'] + 1
        high_range = sorted_idmaps[-1]['range_high'] + 100000000
        return (low_range, high_range)

    @private
    async def snapshot_samba4_dataset(self):
        sysdataset = (await self.middleware.call('systemdataset.config'))['basename']
        ts = str(datetime.datetime.now(datetime.timezone.utc).timestamp())[:10]
        await self.middleware.call('zfs.snapshot.create', {'dataset': f'{sysdataset}/samba4',
                                                           'name': f'wbc-{ts}'})

    @private
    async def remove_winbind_idmap_tdb(self):
        await self.snapshot_samba4_dataset()
        try:
            os.remove('/var/db/system/samba4/winbindd_idmap.tdb')

        except FileNotFoundError:
            self.logger.trace("winbindd_idmap.tdb does not exist. Skipping removal.")

        except Exception:
            self.logger.debug("Failed to remove winbindd_idmap.tdb.", exc_info=True)

    @private
    async def domain_info(self, domain):
        ret = {}

        if domain == 'DS_TYPE_ACTIVEDIRECTORY':
            domain = (await self.middleware.call('smb.config'))['workgroup']

        wbinfo = await run(['wbinfo', '-D', domain], check=False)
        if wbinfo.returncode != 0:
            if 'WBC_ERR_DOMAIN_NOT_FOUND' in wbinfo.stderr.decode():
                err = errno.ENOENT
            else:
                err = errno.EFAULT

            raise CallError(f'Failed to get domain info for {domain}: '
                            f'{wbinfo.stderr.decode().strip()}', err)

        for entry in wbinfo.stdout.splitlines():
            kv = entry.decode().split(':')
            val = kv[1].strip()
            ret.update({kv[0].strip().lower(): val if val not in ('Yes', 'No') else bool(val)})

        return ret

    @private
    async def get_sssd_low_range(self, domain, sssd_config=None, seed=0xdeadbeef):
        """
        This is best effort attempt for SSSD compatibility. It will allocate low
        range for then initial slice in the SSSD environment. The SSSD allocation algorithm
        is non-deterministic. Domain SID string is converted to a 32-bit hashed value
        using murmurhash3 algorithm.

        The modulus of this value with the total number of available slices is used to
        pick the slice. This slice number is then used to calculate the low range for
        RID 0. With the default settings in SSSD this will be deterministic as long as
        the domain has less than 200,000 RIDs.
        """
        sid = (await self.domain_info(domain))['sid']
        sssd_config = {} if not sssd_config else sssd_config
        range_size = sssd_config.get('range_size', 200000)
        range_low = sssd_config.get('range_low', 10001)
        range_max = sssd_config.get('range_max', 2000200000)
        max_slices = int((range_max - range_low) / range_size)

        data = bytearray(sid.encode())
        datalen = len(data)
        hash = seed
        data_bytes = data

        c1 = 0xcc9e2d51
        c2 = 0x1b873593
        r1 = 15
        r2 = 13
        n = 0xe6546b64

        while datalen >= 4:
            k = int.from_bytes(data_bytes[:4], byteorder='little') & 0xFFFFFFFF
            data_bytes = data_bytes[4:]
            datalen = datalen - 4
            k = (k * c1) & 0xFFFFFFFF
            k = (k << r1 | k >> 32 - r1) & 0xFFFFFFFF
            k = (k * c2) & 0xFFFFFFFF
            hash ^= k
            hash = (hash << r2 | hash >> 32 - r2) & 0xFFFFFFFF
            hash = (hash * 5 + n) & 0xFFFFFFFF

        if datalen > 0:
            k = 0
            if datalen >= 3:
                k = k | data_bytes[2] << 16
            if datalen >= 2:
                k = k | data_bytes[1] << 8
            if datalen >= 1:
                k = k | data_bytes[0]
                k = (k * c1) & 0xFFFFFFFF
                k = (k << r1 | k >> 32 - r1) & 0xFFFFFFFF
                k = (k * c2) & 0xFFFFFFFF
                hash ^= k

        hash = (hash ^ len(data)) & 0xFFFFFFFF
        hash ^= hash >> 16
        hash = (hash * 0x85ebca6b) & 0xFFFFFFFF
        hash ^= hash >> 13
        hash = (hash * 0xc2b2ae35) & 0xFFFFFFFF
        hash ^= hash >> 16

        return (hash % max_slices) * range_size + range_size

    @accepts()
    @job(lock='clear_idmap_cache')
    async def clear_idmap_cache(self, job):
        """
        Stop samba, remove the winbindd_cache.tdb file, start samba, flush samba's cache.
        This should be performed after finalizing idmap changes.
        """
        ha_mode = await self.middleware.call('smb.get_smb_ha_mode')
        if ha_mode == 'CLUSTERED':
            self.logger.warning("clear_idmap_cache is unsafe on clustered smb servers.")
            return

        smb_started = await self.middleware.call('service.started', 'cifs')
        await self.middleware.call('service.stop', 'idmap')

        try:
            os.remove('/var/db/system/samba4/winbindd_cache.tdb')

        except FileNotFoundError:
            self.logger.debug("Failed to remove winbindd_cache.tdb. File not found.")

        except Exception:
            self.logger.debug("Failed to remove winbindd_cache.tdb.", exc_info=True)

        gencache_flush = await run(['net', 'cache', 'flush'], check=False)
        if gencache_flush.returncode != 0:
            raise CallError(f'Attempt to flush gencache failed with error: {gencache_flush.stderr.decode().strip()}')

        await self.middleware.call('service.start', 'idmap')
        if smb_started:
            await self.middleware.call('service.start', 'cifs')

    @private
    async def autodiscover_trusted_domains(self):
        smb = await self.middleware.call('smb.config')

        ad_idmap_backend = (await self.query([('name', '=', 'DS_TYPE_ACTIVEDIRECTORY')], {'get': True}))['idmap_backend']
        if ad_idmap_backend == IdmapBackend.AUTORID.name:
            self.logger.trace('Skipping auto-generation of trusted domains due to AutoRID being enabled.')
            return

        wbinfo = await run(['wbinfo', '-m', '--verbose'], check=False)
        if wbinfo.returncode != 0:
            raise CallError(f'wbinfo -m failed with error: {wbinfo.stderr.decode().strip()}')

        for entry in wbinfo.stdout.decode().splitlines():
            c = entry.split()
            range_low, range_high = await self.get_next_idmap_range()
            if len(c) == 6 and c[0] != smb['workgroup']:
                await self.middleware.call('idmap.create', {
                    'name': c[0],
                    'dns_domain_name': c[1],
                    'range_low': range_low,
                    'range_high': range_high,
                    'idmap_backend': 'RID'
                })

    @accepts()
    async def backend_options(self):
        """
        This returns full information about idmap backend options. Not all
        `options` are valid for every backend.
        """
        return {x.name: x.value for x in IdmapBackend}

    @accepts(
        Str('idmap_backend', enum=[x.name for x in IdmapBackend]),
    )
    async def options_choices(self, backend):
        """
        Returns a list of supported keys for the specified idmap backend.
        """
        return IdmapBackend[backend].supported_keys()

    @accepts()
    async def backend_choices(self):
        """
        Returns array of valid idmap backend choices per directory service.
        """
        return IdmapBackend.ds_choices()

    @private
    async def validate(self, schema_name, data, verrors):
        if data['name'] == DSType.DS_TYPE_LDAP.name:
            if data['idmap_backend'] not in (await self.backend_choices())['LDAP']:
                verrors.add(f'{schema_name}.idmap_backend',
                            f'idmap backend [{data["idmap_backend"]}] is not appropriate '
                            f'for the system domain type {data["name"]}')

        elif data['name'] == DSType.DS_TYPE_DEFAULT_DOMAIN.name:
            if data['idmap_backend'] != 'TDB':
                verrors.add(f'{schema_name}.idmap_backend',
                            'TDB is the only supported idmap backend for DS_TYPE_DEFAULT_DOMAIN.')

        if data['range_high'] < data['range_low']:
            """
            If we don't exit at this point further range() operations will raise an IndexError.
            """
            verrors.add(f'{schema_name}.range_low', 'Idmap high range must be greater than idmap low range')
            return

        if data.get('certificate') and not await self.middleware.call(
            'certificate.query', [['id', '=', data['certificate']]]
        ):
            verrors.add(f'{schema_name}.certificate', 'Please specify a valid certificate.')

        configured_domains = await self.query()
        ds_state = await self.middleware.call("directoryservices.get_state")
        ldap_enabled = True if ds_state['ldap'] in ['HEALTHY', 'JOINING'] else False
        ad_enabled = True if ds_state['activedirectory'] in ['HEALTHY', 'JOINING'] else False
        new_range = range(data['range_low'], data['range_high'])
        idmap_backend = data.get('idmap_backend')
        for i in configured_domains:
            # Do not generate validation error comparing to oneself.
            if i['name'] == data['name']:
                continue

            # Do not generate validation errors for overlapping with a disabled DS.
            if not ldap_enabled and i['name'] == 'DS_TYPE_LDAP':
                continue

            if not ad_enabled and i['name'] == 'DS_TYPE_ACTIVEDIRECTORY':
                continue

            # Idmap settings under Services->SMB are ignored when autorid is enabled.
            if idmap_backend == IdmapBackend.AUTORID.name and i['name'] == 'DS_TYPE_DEFAULT_DOMAIN':
                continue

            # Overlap between ranges defined for 'ad' backend are permitted.
            if idmap_backend == IdmapBackend.AD.name and i['idmap_backend'] == IdmapBackend.AD.name:
                continue

            existing_range = range(i['range_low'], i['range_high'])
            if range(max(existing_range[0], new_range[0]), min(existing_range[-1], new_range[-1]) + 1):
                verrors.add(f'{schema_name}.range_low',
                            f'new idmap range [{data["range_low"]}-{data["range_high"]}] '
                            'conflicts with existing range for domain '
                            f'[{i["name"]}], range: [{i["range_low"]}-{i["range_high"]}].')

    @private
    async def validate_options(self, schema_name, data, verrors, check=['MISSING', 'EXTRA']):
        supported_keys = set(IdmapBackend[data['idmap_backend']].supported_keys())
        required_keys = set(IdmapBackend[data['idmap_backend']].required_keys())
        provided_keys = set([str(x) for x in data['options'].keys()])

        missing_keys = required_keys - provided_keys
        extra_keys = provided_keys - supported_keys

        if 'MISSING' in check:
            for k in missing_keys:
                verrors.add(f'{schema_name}.options.{k}',
                            f'[{k}] is a required parameter for the [{data["idmap_backend"]}] idmap backend.')

        if 'EXTRA' in check:
            for k in extra_keys:
                verrors.add(f'{schema_name}.options.{k}',
                            f'[{k}] is not a valid parameter for the [{data["idmap_backend"]}] idmap backend.')

    @private
    async def prune_keys(self, data):
        supported_keys = set(IdmapBackend[data['idmap_backend']].supported_keys())
        provided_keys = set([str(x) for x in data['options'].keys()])

        for k in (provided_keys - supported_keys):
            data['options'].pop(k)

    @private
    async def idmap_conf_to_client_config(self, data):
        options = data['options'].copy()
        if data['idmap_backend'] not in ['LDAP', 'RFC2307']:
            raise CallError(f'{data["idmap_backend"]}: invalid idmap backend')

        if data['idmap_backend'] == 'LDAP':
            uri = options["ldap_url"]
            basedn = options["ldap_base_dn"]
        else:
            if data['options']['ldap_server'] == 'AD':
                uri = options["ldap_domain"]
            else:
                uri = options["ldap_url"]

            basedn = options["bind_path_user"]

        credentials = {
            "binddn": options["ldap_user_dn"],
            "bindpw": options["ldap_user_dn_password"],
        }

        security = {
            "ssl": options["ssl"],
            "sasl": "SEAL",
            "validate_certificates": options["validate_certificates"],
        }

        return {
            "uri_list": [f'{"ldaps://" if security["ssl"] == "ON" else "ldap://"}{uri}'],
            "basedn": basedn,
            "bind_type": "PLAIN",
            "credentials": credentials,
            "security": security,
        }

    @filterable
    async def query(self, filters, options):
        extra = options.get("extra", {})
        more_info = extra.get("additional_information", [])
        ret = await super().query(filters, options)
        if 'DOMAIN_INFO' in more_info:
            for entry in ret:
                try:
                    domain_info = await self.middleware.call('idmap.domain_info', entry['name'])
                except CallError as e:
                    if e.errno != errno.ENOENT:
                        self.logger.debug("Failed to retrieve domain info: %s", e)
                    domain_info = None

                entry.update({'domain_info': domain_info})

        return ret

    @accepts(Dict(
        'idmap_domain_create',
        Str('name', required=True),
        Str('dns_domain_name'),
        Int('range_low', required=True, validators=[Range(min=1000, max=2147483647)]),
        Int('range_high', required=True, validators=[Range(min=1000, max=2147483647)]),
        Str('idmap_backend', required=True, enum=[x.name for x in IdmapBackend]),
        Int('certificate', null=True),
        OROperator(
            Dict(
                'idmap_ad_options',
                Ref('nss_info_ad', 'schema_mode'),
                Bool('unix_primary_group', default=False),
                Bool('unix_nss_info', default=False),
            ),
            Dict(
                'idmap_autorid_options',
                Int('rangesize', default=100000, validators=[Range(min=10000, max=1000000000)]),
                Bool('readonly', default=False),
                Bool('ignore_builtin', default=False),
            ),
            Dict(
                'idmap_ldap_options',
                LDAP_DN('ldap_base_dn'),
                LDAP_DN('ldap_user_dn'),
                Str('ldap_user_dn_password', private=True),
                Str('ldap_url'),
                Bool('readonly', default=False),
                Ref('ldap_ssl_choice', 'ssl'),
                Bool('validate_certificates', default=True),
            ),
            Dict(
                'idmap_nss_options',
                Str('linked_service', default='LOCAL_ACCOUNT', enum=['LOCAL_ACCOUNT', 'LDAP']),
            ),
            Dict(
                'idmap_rfc2307_options',
                Str('ldap_server', required=True, enum=['AD', 'STANDALONE']),
                Bool('ldap_realm', default=False),
                LDAP_DN('bind_path_user'),
                LDAP_DN('bind_path_group'),
                Bool('user_cn', default=False),
                Str('cn_realm'),
                Str('ldap_domain'),
                Str('ldap_url'),
                LDAP_DN('ldap_user_dn'),
                Str('ldap_user_dn_password', private=True),
                Ref('ldap_ssl_choice', 'ssl'),
                Bool('validate_certificates', default=True),
            ),
            Dict(
                'idmap_rid_options',
                Bool('sssd_compat', default=False),
            ),
            Dict(
                'idmap_tdb_options',
            ),
            default={},
            name='options',
            title='idmap_options',
        ),
        register=True
    ))
    async def do_create(self, data):
        """
        Create a new IDMAP domain. These domains must be unique. This table
        will be automatically populated after joining an Active Directory domain
        if "allow trusted domains" is set to True in the AD service configuration.
        There are three default system domains: DS_TYPE_ACTIVEDIRECTORY, DS_TYPE_LDAP, DS_TYPE_DEFAULT_DOMAIN.
        The system domains correspond with the idmap settings under Active Directory, LDAP, and SMB
        respectively.

        `name` the pre-windows 2000 domain name.

        `DNS_domain_name` DNS name of the domain.

        `idmap_backend` provides a plugin interface for Winbind to use varying
        backends to store SID/uid/gid mapping tables. The correct setting
        depends on the environment in which the NAS is deployed.

        `range_low` and `range_high` specify the UID and GID range for which this backend is authoritative.

        `certificate_id` references the certificate ID of the SSL certificate to use for certificate-based
        authentication to a remote LDAP server. This parameter is not supported for all idmap backends as some
        backends will generate SID to ID mappings algorithmically without causing network traffic.

        `options` are additional parameters that are backend-dependent:

        `AD` idmap backend options:
        `unix_primary_group` If True, the primary group membership is fetched from the LDAP attributes (gidNumber).
        If False, the primary group membership is calculated via the "primaryGroupID" LDAP attribute.

        `unix_nss_info` if True winbind will retrieve the login shell and home directory from the LDAP attributes.
        If False or if the AD LDAP entry lacks the SFU attributes the smb4.conf parameters `template shell` and `template homedir` are used.

        `schema_mode` Defines the schema that idmap_ad should use when querying Active Directory regarding user and group information.
        This can be either the RFC2307 schema support included in Windows 2003 R2 or the Service for Unix (SFU) schema.
        For SFU 3.0 or 3.5 please choose "SFU", for SFU 2.0 please choose "SFU20". The behavior of primary group membership is
        controlled by the unix_primary_group option.

        `AUTORID` idmap backend options:
        `readonly` sets the module to read-only mode. No new ranges will be allocated and new mappings
        will not be created in the idmap pool.

        `ignore_builtin` ignores mapping requests for the BUILTIN domain.

        `LDAP` idmap backend options:
        `ldap_base_dn` defines the directory base suffix to use for SID/uid/gid mapping entries.

        `ldap_user_dn` defines the user DN to be used for authentication.

        `ldap_url` specifies the LDAP server to use for SID/uid/gid map entries.

        `ssl` specifies whether to encrypt the LDAP transport for the idmap backend.

        `NSS` idmap backend options:
        `linked_service` specifies the auxiliary directory service ID provider.

        `RFC2307` idmap backend options:
        `domain` specifies the domain for which the idmap backend is being created. Numeric id, short-form
        domain name, or long-form DNS domain name of the domain may be specified. Entry must be entered as
        it appears in `idmap.domain`.

        `range_low` and `range_high` specify the UID and GID range for which this backend is authoritative.

        `ldap_server` defines the type of LDAP server to use. This can either be an LDAP server provided
        by the Active Directory Domain (ad) or a stand-alone LDAP server.

        `bind_path_user` specfies the search base where user objects can be found in the LDAP server.

        `bind_path_group` specifies the search base where group objects can be found in the LDAP server.

        `user_cn` query cn attribute instead of uid attribute for the user name in LDAP.

        `realm` append @realm to cn for groups (and users if user_cn is set) in LDAP queries.

        `ldmap_domain` when using the LDAP server in the Active Directory server, this allows one to
        specify the domain where to access the Active Directory server. This allows using trust relationships
        while keeping all RFC 2307 records in one place. This parameter is optional, the default is to access
        the AD server in the current domain to query LDAP records.

        `ldap_url` when using a stand-alone LDAP server, this parameter specifies the LDAP URL for accessing the LDAP server.

        `ldap_user_dn` defines the user DN to be used for authentication.

        `ldap_user_dn_password` is the password to be used for LDAP authentication.

        `realm` defines the realm to use in the user and group names. This is only required when using cn_realm together with
         a stand-alone ldap server.

        `RID` backend options:
        `sssd_compat` generate idmap low range based on same algorithm that SSSD uses by default.
        """
        verrors = ValidationErrors()

        if 'options' not in data:
            data['options'] = {}

        old = await self.query()
        if data['name'] in [x['name'] for x in old]:
            verrors.add('idmap_domain_create.name', 'Domain names must be unique.')

        if data['options'].get('sssd_compat'):
            if await self.middleware.call('activedirectory.get_state') != 'HEALTHY':
                verrors.add('idmap_domain_create.options',
                            'AD service must be enabled and started to '
                            'generate an SSSD-compatible id range')
                verrors.check()

            data['range_low'] = await self.get_sssd_low_range(data['name'])
            data['range_high'] = data['range_low'] + 100000000

        await self.validate('idmap_domain_create', data, verrors)
        await self.validate_options('idmap_domain_create', data, verrors)
        if data.get('certificate_id') and not data['options'].get('ssl'):
            verrors.add('idmap_domain_create.certificate_id',
                        f'The {data["idmap_backend"]} idmap backend does not '
                        'generate LDAP traffic. Certificates do not apply.')
        verrors.check()

        if data['options'].get('ldap_user_dn_password'):
            try:
                DSType[data["name"]]
                domain = (await self.middleware.call("smb.config"))['workgroup']
            except KeyError:
                domain = data["name"]

            client_conf = await self.idmap_conf_to_client_config(data)
            await self.middleware.call(
                'ldapclient.validate_credentials',
                client_conf
            )

            secret = data['options'].pop('ldap_user_dn_password')

            await self.middleware.call("directoryservices.set_ldap_secret",
                                       domain, secret)
            await self.middleware.call("directoryservices.backup_secrets")

        final_options = IdmapBackend[data['idmap_backend']].defaults()
        final_options.update(data['options'])
        data['options'] = final_options

        id = await super().do_create(data)
        out = await self.query([('id', '=', id)], {'get': True})
        await self.synchronize()
        return out

    async def do_update(self, id, data):
        """
        Update a domain by id.
        """

        old = await self.query([('id', '=', id)], {'get': True})
        new = old.copy()
        new.update(data)
        if data.get('idmap_backend') and data['idmap_backend'] != old['idmap_backend']:
            """
            Remove options from previous backend because they are almost certainly
            not valid for the new backend.
            """
            new['options'] = data.get('options', {})
        else:
            new['options'] = old['options'].copy() | data.get('options', {})

        tmp = data.copy()
        verrors = ValidationErrors()
        if old['name'] in [x.name for x in DSType] and old['name'] != new['name']:
            verrors.add('idmap_domain_update.name',
                        f'Changing name of default domain {old["name"]} is not permitted')

        if new['options'].get('sssd_compat') and not old['options'].get('sssd_compat'):
            if await self.middleware.call('activedirectory.get_state') != 'HEALTHY':
                verrors.add('idmap_domain_update.options',
                            'AD service must be enabled and started to '
                            'generate an SSSD-compatible id range')
                verrors.check()

            new['range_low'] = await self.get_sssd_low_range(new['name'])
            new['range_high'] = new['range_low'] + 100000000

        if new['idmap_backend'] == 'AUTORID' and new['name'] != 'DS_TYPE_ACTIVEDIRECTORY':
            verrors.add("idmap_domain_update.idmap_backend",
                        "AUTORID is only permitted for the default idmap backend for "
                        "the active directory directory service (DS_TYPE_ACTIVEDIRECTORY).")

        await self.validate('idmap_domain_update', new, verrors)
        await self.validate_options('idmap_domain_update', new, verrors, ['MISSING'])
        tmp['idmap_backend'] = new['idmap_backend']
        if data.get('options'):
            await self.validate_options('idmap_domain_update', tmp, verrors, ['EXTRA'])

        if data.get('certificate_id') and not data['options'].get('ssl'):
            verrors.add('idmap_domain_update.certificate_id',
                        f'The {new["idmap_backend"]} idmap backend does not '
                        'generate LDAP traffic. Certificates do not apply.')
        verrors.check()
        await self.prune_keys(new)
        final_options = IdmapBackend[new['idmap_backend']].defaults() | new['options'].copy()
        new['options'] = final_options

        if new['options'].get('ldap_user_dn_password'):
            try:
                DSType[new["name"]]
                domain = (await self.middleware.call("smb.config"))['workgroup']
            except KeyError:
                domain = new["name"]

            client_conf = await self.idmap_conf_to_client_config(new)
            await self.middleware.call(
                'ldapclient.validate_credentials',
                client_conf
            )

            secret = new['options'].pop('ldap_user_dn_password')
            await self.middleware.call("directoryservices.set_ldap_secret",
                                       domain, secret)
            await self.middleware.call("directoryservices.backup_secrets")

        await super().do_update(id, new)

        out = await self.query([('id', '=', id)], {'get': True})
        await self.synchronize()
        cache_job = await self.middleware.call('idmap.clear_idmap_cache')
        await cache_job.wait()
        return out

    async def do_delete(self, id):
        """
        Delete a domain by id. Deletion of default system domains is not permitted.
        In case of registry config for clustered server, this will remove all smb4.conf
        entries for the domain associated with the id.
        """
        if id <= 5:
            entry = await self.get_instance(id)
            raise CallError(f'Deleting system idmap domain [{entry["name"]}] is not permitted.', errno.EPERM)

        ret = await self.direct_delete(id)
        await self.synchronize()
        return ret

    @private
    async def name_to_sid(self, name):
        wb = await run([SMBCmd.WBINFO.value, '--name-to-sid', name], check=False)
        if wb.returncode != 0:
            self.logger.debug("wbinfo failed with error: %s",
                              wb.stderr.decode().strip())

        return wb.stdout.decode().strip()

    @private
    async def sid_to_name(self, sid):
        """
        Last two characters of name string encode the account type.
        """
        wb = await run([SMBCmd.WBINFO.value, '--sid-to-name', sid], check=False)
        if wb.returncode != 0:
            raise CallError(f'wbinfo failed with error: {wb.stderr.decode().strip()}')

        out = wb.stdout.decode().strip()
        return {"name": out[:-2], "type": int(out[-2:])}

    @private
    async def sid_to_unixid(self, sid_str):
        rv = None
        gid = None
        uid = None

        if sid_str.startswith(SID_LOCAL_USER_PREFIX):
            return {"id_type": "USER", "id": int(sid_str.strip(SID_LOCAL_USER_PREFIX))}

        elif sid_str.startswith(SID_LOCAL_GROUP_PREFIX):
            return {"id_type": "GROUP", "id": int(sid_str.strip(SID_LOCAL_GROUP_PREFIX))}

        wb = await run([SMBCmd.WBINFO.value, '--sid-to-gid', sid_str], check=False)
        if wb.returncode == 0:
            gid = int(wb.stdout.decode().strip())

        wb = await run([SMBCmd.WBINFO.value, '--sid-to-uid', sid_str], check=False)
        if wb.returncode == 0:
            uid = int(wb.stdout.decode().strip())

        if gid and (gid == uid):
            rv = {"id_type": "BOTH", "id": gid}
        elif gid:
            rv = {"id_type": "GROUP", "id": gid}
        elif uid:
            rv = {"id_type": "USER", "id": uid}

        return rv

    @private
    async def id_to_name(self, id, id_type):
        idtype = IDType[id_type]
        idmap_timeout = 5.0

        if idtype == IDType.GROUP or idtype == IDType.BOTH:
            method = "group.get_group_obj"
            to_check = {"gid": id}
            key = 'gr_name'
        elif idtype == IDType.USER:
            method = "user.get_user_obj"
            to_check = {"uid": id}
            key = 'pw_name'
        else:
            raise CallError(f"Unsupported id_type: [{idtype.name}]")

        try:
            ret = await asyncio.wait_for(
                self.middleware.call(method, to_check),
                timeout=idmap_timeout
            )
            name = ret[key]
        except asyncio.TimeoutError:
            self.logger.debug(
                "timeout encountered while trying to convert %s id %s "
                "to name. This may indicate significant networking issue.",
                id_type.lower(), id
            )
            name = None
        except KeyError:
            name = None

        return name

    @private
    async def unixid_to_sid(self, data):
        """
        Samba generates SIDs for local accounts that lack explicit mapping in
        passdb.tdb or group_mapping.tdb with a prefix of S-1-22-1 (users) and
        S-1-22-2 (groups). This is not returned by wbinfo, but for consistency
        with what appears when viewed over SMB protocol we'll do the same here.
        """
        unixid = data.get("id")
        id = IDType[data.get("id_type", "GROUP")]

        if id == IDType.USER:
            wb = await run([SMBCmd.WBINFO.value, '--uid-to-sid', str(unixid)], check=False)
        else:
            wb = await run([SMBCmd.WBINFO.value, '--gid-to-sid', str(unixid)], check=False)

        if wb.returncode != 0:
            self.logger.warning("Could not convert [%d] to SID: %s",
                                unixid, wb.stderr.decode().strip())
            if WBCErr.DOMAIN_NOT_FOUND.err() in wb.stderr.decode():
                is_local = await self.middleware.call(
                    f'{"user" if id == IDType.USER else "group"}.query',
                    [("uid" if id == IDType.USER else "gid", '=', unixid)],
                    {"count": True}
                )
                if is_local:
                    return f'S-1-22-{1 if id == IDType.USER else 2}-{unixid}'

            return None

        return wb.stdout.decode().strip()

    @private
    async def get_idmap_info(self, ds, id):
        low_range = None
        id_type_both = False
        domains = await self.query()

        for d in domains:
            if ds == 'activedirectory' and d['name'] == 'DS_TYPE_LDAP':
                continue

            if ds == 'ldap' and d['name'] != 'DS_TYPE_LDAP':
                continue

            if id in range(d['range_low'], d['range_high']):
                low_range = d['range_low']
                id_type_both = d['idmap_backend'] in ['AUTORID', 'RID']
                break

        return (low_range, id_type_both)

    @private
    async def synthetic_user(self, ds, passwd):
        idmap_info = await self.get_idmap_info(ds, passwd['pw_uid'])
        sid = await self.unixid_to_sid({"id": passwd['pw_uid'], "id_type": "USER"})
        rid = int(sid.rsplit('-', 1)[1])
        return {
            'id': 100000 + idmap_info[0] + rid,
            'uid': passwd['pw_uid'],
            'username': passwd['pw_name'],
            'unixhash': None,
            'smbhash': None,
            'group': {},
            'home': '',
            'shell': '',
            'full_name': passwd['pw_gecos'],
            'builtin': False,
            'email': '',
            'password_disabled': False,
            'locked': False,
            'sudo': False,
            'sudo_nopasswd': False,
            'sudo_commands': [],
            'microsoft_account': False,
            'attributes': {},
            'groups': [],
            'sshpubkey': None,
            'local': False,
            'id_type_both': idmap_info[1],
        }

    @private
    async def synthetic_group(self, ds, grp):
        idmap_info = await self.get_idmap_info(ds, grp['gr_gid'])
        sid = await self.unixid_to_sid({"id": grp['gr_gid'], "id_type": "GROUP"})
        rid = int(sid.rsplit('-', 1)[1])
        return {
            'id': 100000 + idmap_info[0] + rid,
            'gid': grp['gr_gid'],
            'name': grp['gr_name'],
            'group': grp['gr_name'],
            'builtin': False,
            'sudo': False,
            'sudo_nopasswd': False,
            'sudo_commands': [],
            'users': [],
            'local': False,
            'id_type_both': idmap_info[1],
        }

    @private
    async def idmap_to_smbconf(self, data=None):
        rv = {}
        if data is None:
            idmap = await self.query()
        else:
            idmap = data

        ds_state = await self.middleware.call('directoryservices.get_state')
        workgroup = await self.middleware.call('smb.getparm', 'workgroup', 'global')
        ad_enabled = ds_state['activedirectory'] in ['HEALTHY', 'JOINING', 'FAULTED']
        ldap_enabled = ds_state['ldap'] in ['HEALTHY', 'JOINING', 'FAULTED']
        ad_idmap = filter_list(idmap, [('name', '=', DSType.DS_TYPE_ACTIVEDIRECTORY.name)], {'get': True}) if ad_enabled else None
        disable_ldap_starttls = False

        for i in idmap:
            if i['name'] == DSType.DS_TYPE_DEFAULT_DOMAIN.name:
                if ad_idmap and ad_idmap['idmap_backend'] == 'AUTORID':
                    continue
                domain = "*"
            elif i['name'] == DSType.DS_TYPE_ACTIVEDIRECTORY.name:
                if not ad_enabled:
                    continue
                if i['idmap_backend'] == 'AUTORID':
                    domain = "*"
                else:
                    domain = workgroup
            elif i['name'] == DSType.DS_TYPE_LDAP.name:
                if not ldap_enabled:
                    continue
                domain = workgroup
                # This will need to be re-implemented once LDAP directory service is clustered
                if i['idmap_backend'] == 'LDAP':
                    """
                    In case of default LDAP backend, populate values from ldap form.
                    """
                    idmap_prefix = f"idmap config {domain} :"
                    ldap = await self.middleware.call('ldap.config')
                    rv.update({
                        f"{idmap_prefix} backend": {"raw": i['idmap_backend'].lower()},
                        f"{idmap_prefix} range": {"raw": f"{i['range_low']} - {i['range_high']}"},
                        f"{idmap_prefix} ldap_base_dn": {"raw": ldap['basedn']},
                        f"{idmap_prefix} ldap_url": {"raw": ' '.join(ldap['uri_list'])},
                    })
                    continue
            else:
                domain = i['name']

            idmap_prefix = f"idmap config {domain} :"
            rv.update({
                f"{idmap_prefix} backend": {"raw": i['idmap_backend'].lower()},
                f"{idmap_prefix} range": {"raw": f"{i['range_low']} - {i['range_high']}"}
            })
            for k, v in i['options'].items():
                backend_parameter = "realm" if k == "cn_realm" else k
                if k == 'ldap_server':
                    v = 'ad' if v == 'AD' else 'stand-alone'
                elif k == 'ldap_url':
                    v = f'{"ldaps://" if i["options"]["ssl"]  == "ON" else "ldap://"}{v}'
                elif k == 'ssl':
                    if v != 'STARTTLS':
                        disable_ldap_starttls = True

                    continue

                rv.update({
                    f"{idmap_prefix} {backend_parameter}": {"parsed": v},
                })

        if ad_enabled:
            rv['ldap ssl'] = {'parsed': 'off' if disable_ldap_starttls else 'start tls'}

        return rv

    @private
    async def diff_conf_and_registry(self, data, idmaps):
        r = idmaps
        s_keys = set(data.keys())
        r_keys = set(r.keys())
        intersect = s_keys.intersection(r_keys)
        return {
            'added': {x: data[x] for x in s_keys - r_keys},
            'removed': {x: r[x] for x in r_keys - s_keys},
            'modified': {x: data[x] for x in intersect if data[x] != r[x]},
        }

    @private
    async def synchronize(self):
        config_idmap = await self.query()
        idmaps = await self.idmap_to_smbconf(config_idmap)
        to_check = (await self.middleware.call('smb.reg_globals'))['idmap']
        diff = await self.diff_conf_and_registry(idmaps, to_check)
        await self.middleware.call('sharing.smb.apply_conf_diff', 'GLOBAL', diff)
        await self.middleware.call('service.restart', 'idmap')
コード例 #4
0
class DirectoryServices(Service):
    class Config:
        service = "directoryservices"
        cli_namespace = "directory_service"

    @accepts()
    @returns(
        Dict('directory_services_states',
             Ref('directoryservice_state', 'activedirectory'),
             Ref('directoryservice_state', 'ldap')))
    async def get_state(self):
        """
        `DISABLED` Directory Service is disabled.

        `FAULTED` Directory Service is enabled, but not HEALTHY. Review logs and generated alert
        messages to debug the issue causing the service to be in a FAULTED state.

        `LEAVING` Directory Service is in process of stopping.

        `JOINING` Directory Service is in process of starting.

        `HEALTHY` Directory Service is enabled, and last status check has passed.
        """
        ha_mode = await self.middleware.call('smb.get_smb_ha_mode')
        svc = 'clustercache' if ha_mode == 'CLUSTERED' else 'cache'

        try:
            return await self.middleware.call(f'{svc}.get', 'DS_STATE')
        except KeyError:
            ds_state = {}
            for srv in DSType:
                try:
                    res = await self.middleware.call(f'{srv.value}.started')
                    ds_state[
                        srv.
                        value] = DSStatus.HEALTHY.name if res else DSStatus.DISABLED.name
                except Exception:
                    ds_state[srv.value] = DSStatus.FAULTED.name

            await self.middleware.call(f'{svc}.put', 'DS_STATE', ds_state, 60)
            return ds_state

        except CallError as e:
            if e.errno is not errno.ENXIO:
                raise

            self.logger.debug(
                "Unable to determine directory services state while cluster is unhealthy"
            )
            return {"activedirectory": "DISABLED", "ldap": "DISABLED"}

    @private
    async def set_state(self, new):
        ds_state = {
            'activedirectory': DSStatus.DISABLED.name,
            'ldap': DSStatus.DISABLED.name,
        }

        ha_mode = await self.middleware.call('smb.get_smb_ha_mode')
        svc = 'clustercache' if ha_mode == 'CLUSTERED' else 'cache'

        try:
            old_state = await self.middleware.call(f'{svc}.get', 'DS_STATE')
            ds_state.update(old_state)
        except KeyError:
            self.logger.trace(
                "No previous DS_STATE exists. Lazy initializing for %s", new)

        ds_state.update(new)
        self.middleware.send_event('directoryservices.status',
                                   'CHANGED',
                                   fields=ds_state)
        return await self.middleware.call(f'{svc}.put', 'DS_STATE', ds_state)

    @accepts()
    @job()
    async def cache_refresh(self, job):
        """
        This method refreshes the directory services cache for users and groups that is
        used as a backing for `user.query` and `group.query` methods. The first cache fill in
        an Active Directory domain may take a significant amount of time to complete and
        so it is performed as within a job. The most likely situation in which a user may
        desire to refresh the directory services cache is after new users or groups  to a remote
        directory server with the intention to have said users or groups appear in the
        results of the aforementioned account-related methods.

        A cache refresh is not required in order to use newly-added users and groups for in
        permissions and ACL related methods. Likewise, a cache refresh will not resolve issues
        with users being unable to authenticate to shares.
        """
        return await job.wrap(await self.middleware.call('dscache.refresh'))

    @private
    @returns(
        List('ldap_ssl_choices',
             items=[
                 Str('ldap_ssl_choice',
                     enum=[x.value for x in list(SSL)],
                     default=SSL.USESSL.value,
                     register=True)
             ]))
    async def ssl_choices(self, dstype):
        return [x.value for x in list(SSL)]

    @private
    @returns(
        List('sasl_wrapping_choices',
             items=[
                 Str('sasl_wrapping_choice',
                     enum=[x.value for x in list(SASL_Wrapping)],
                     register=True)
             ]))
    async def sasl_wrapping_choices(self, dstype):
        return [x.value for x in list(SASL_Wrapping)]

    @private
    @returns(
        OROperator(List('ad_nss_choices',
                        items=[
                            Str('nss_info_ad',
                                enum=[
                                    x.value[0] for x in NSS_Info
                                    if DSType.AD in x.value[1]
                                ],
                                default=NSS_Info.SFU.value[0],
                                register=True)
                        ]),
                   List('ldap_nss_choices',
                        items=[
                            Str('nss_info_ldap',
                                enum=[
                                    x.value[0] for x in NSS_Info
                                    if DSType.LDAP in x.value[1]
                                ],
                                default=NSS_Info.RFC2307.value[0],
                                register=True)
                        ]),
                   name='nss_info_choices'))
    async def nss_info_choices(self, dstype):
        ds = DSType(dstype.lower())
        ret = []

        for x in list(NSS_Info):
            if ds in x.value[1]:
                ret.append(x.value[0])

        return ret

    @private
    def get_db_secrets(self):
        rv = {}
        db = self.middleware.call_sync('datastore.query', 'services.cifs', [],
                                       {
                                           'prefix': 'cifs_srv_',
                                           'get': True
                                       })

        rv.update({"id": db['id']})
        if db['secrets'] is None:
            return rv

        try:
            rv.update(json.loads(db['secrets']))
        except json.decoder.JSONDecodeError:
            self.logger.warning("Stored secrets are not valid JSON "
                                "a new backup of secrets should be generated.")
        return rv

    @private
    def backup_secrets(self):
        """
        Writes the current secrets database to the freenas config file.
        """
        ha_mode = self.middleware.call_sync('smb.get_smb_ha_mode')
        if ha_mode == "CLUSTERED":
            return

        if ha_mode == "UNIFIED":
            if self.middleware.call_sync("failover.status") != "MASTER":
                self.logger.debug(
                    "Skipping secrets backup on standby controller.")
                return

            ngc = self.middleware.call_sync("network.configuration.config")
            netbios_name = ngc["hostname_virtual"]
        else:
            netbios_name = self.middleware.call_sync(
                'smb.config')['netbiosname_local']

        db_secrets = self.get_db_secrets()
        id = db_secrets.pop('id')

        with DirectorySecrets(logger=self.logger, ha_mode=ha_mode) as s:
            secrets = s.dump()

        if not secrets:
            self.logger.warning("Unable to parse secrets")
            return

        db_secrets.update({f"{netbios_name.upper()}$": secrets})
        self.middleware.call_sync('datastore.update', 'services.cifs', id,
                                  {'secrets': json.dumps(db_secrets)},
                                  {'prefix': 'cifs_srv_'})

    @private
    def restore_secrets(self, netbios_name=None):
        """
        Restores secrets from a backup copy in the TrueNAS config file. This should
        be used with caution because winbindd will automatically update machine account
        passwords at configurable intervals. There is a periodic TrueNAS check that
        automates this backup, but care should be taken before manually invoking restores.
        """
        ha_mode = self.middleware.call_sync('smb.get_smb_ha_mode')

        if ha_mode == "CLUSTERED":
            return True

        if ha_mode == "UNIFIED":
            if self.middleware.call_sync("failover.status") != "MASTER":
                self.logger.debug(
                    "Skipping secrets restore on standby controller.")
                return

            if netbios_name is None:
                ngc = self.middleware.call_sync("network.configuratoin.config")
                netbios_name = ngc["hostname_virtual"]

        elif netbios_name is None:
            netbios_name = self.middleware.call_sync(
                'smb.config')['netbiosname_local']

        db_secrets = self.get_db_secrets()

        server_secrets = db_secrets.get(f"{netbios_name.upper()}$")
        if server_secrets is None:
            self.logger.warning(
                "Unable to find stored secrets for [%s]. "
                "Directory service functionality may be impacted.",
                netbios_name)
            return False

        with DirectorySecrets(logger=self.logger, ha_mode=ha_mode) as s:
            try:
                s.restore(server_secrets)
            except Exception:
                self.logger.warning(
                    "Failed to restore secrets for [%s]. "
                    "Directory service functionality may be impacted.",
                    netbios_name,
                    exc_info=True)
                return False

        return True

    @private
    def secrets_has_domain(self, domain):
        """
        Simple check to see whether a particular domain is in the
        secrets file. Traversing a tdb file can set a tdb chainlock
        on it. It's better to just do a quick lookup of the
        single value.
        """
        ha_mode = self.middleware.call_sync('smb.get_smb_ha_mode')
        if ha_mode == 'CLUSTERED':
            # with clustered server we don't want misbehaving node to
            # potentially muck around with clustered secrets.
            return True

        with DirectorySecrets(logger=self.logger, ha_mode=ha_mode) as s:
            rv = s.has_domain(domain)

        return rv

    @private
    def set_ldap_secret(self, domain, secret):
        ha_mode = self.middleware.call_sync('smb.get_smb_ha_mode')
        with DirectorySecrets(logger=self.logger, ha_mode=ha_mode) as s:
            rv = s.set_ldap_secret(domain, secret)

        return rv

    @private
    def get_last_password_change(self, domain=None):
        """
        Returns unix timestamp of last password change according to
        the secrets.tdb (our current running configuration), and what
        we have in our database.
        """
        ha_mode = self.middleware.call_sync('smb.get_smb_ha_mode')
        if ha_mode == 'CLUSTERED':
            return

        smb_config = self.middleware.call_sync('smb.config')
        if domain is None:
            domain = smb_config['workgroup']

        with DirectorySecrets(logger=self.logger, ha_mode=ha_mode) as s:
            passwd_ts = s.last_password_change(domain)

        db_secrets = self.get_db_secrets()
        server_secrets = db_secrets.get(
            f"{smb_config['netbiosname_local'].upper()}$")
        if server_secrets is None:
            return {"dbconfig": None, "secrets": passwd_ts}

        stored_ts_bytes = server_secrets[
            f'SECRETS/MACHINE_LAST_CHANGE_TIME/{domain.upper()}']
        stored_ts = struct.unpack("<L", b64decode(stored_ts_bytes))[0]

        return {"dbconfig": stored_ts, "secrets": passwd_ts}

    @private
    def available_secrets(self):
        """
        Entries in the secrets backup are keyed according to machine account name,
        which in this case is the netbios name of server followed by a dollar sign ($).
        These are possible values to add as an argument to 'restore_secrets' so that
        the secrets.tdb can be restored to what it was prior to a netbios name change.
        This functionality is intended more as a support tool than for general-purpose
        use in case user has become somewhat inventive with troubleshooting steps
        and changing server names.
        """
        db_secrets = self.get_db_secrets()
        db_secrets.pop('id')
        return list(db_secrets.keys())

    @private
    async def initialize(self, data=None):
        """
        Ensure that secrets.tdb at a minimum exists. If it doesn't exist, try to restore
        from a backup stored in our config file. If this fails, try to use what
        auth info we have to recover the information. If we are in an LDAP
        environment with a samba schema in use, we just need to write the password into
        secrets.tdb.
        """
        if data is None:
            ldap_conf = await self.middleware.call("ldap.config")
            ldap_enabled = ldap_conf['enable']
            ad_enabled = (
                await self.middleware.call("activedirectory.config"))['enable']
        else:
            ldap_enabled = data['ldap']
            ad_enabled = data['activedirectory']
            if ldap_enabled:
                ldap_conf = await self.middleware.call("ldap.config")

        workgroup = (await self.middleware.call("smb.config"))["workgroup"]
        is_kerberized = ad_enabled

        if not ldap_enabled and not ad_enabled:
            return

        health_check = 'activedirectory.started' if ad_enabled else 'ldap.started'

        has_secrets = await self.middleware.call(
            "directoryservices.secrets_has_domain", workgroup)

        if ad_enabled and not has_secrets:
            kerberos_method = await self.middleware.call(
                "smb.getparm", "kerberos method", "GLOBAL")
            self.logger.warning("Domain secrets database does not exist. "
                                "Attempting to restore.")
            ok = await self.middleware.call("directoryservices.restore_secrets"
                                            )
            if not ok:
                self.logger.warning(
                    "Failed to restore domain secrets database. "
                    "Re-joining AD domain may be required.")

                if kerberos_method != "secrets and keytab":
                    self.logger.warning(
                        "Restoration of secrets database failed. "
                        "Attempting to automatically re-join AD domain.")
                    try:
                        await self.middleware.call("activedirectory.start")
                    except Exception:
                        self.logger.warning(
                            "Failed to re-join active directory domain.",
                            exc_info=True)

        elif ldap_enabled and not has_secrets and ldap_conf["has_samba_schema"]:
            self.logger.warning(
                "LDAP SMB secrets database does not exist. "
                "attempting to restore secrets from configuration file.")
            await self.middleware.call("smb.store_ldap_admin_password")

        if ldap_enabled and ldap_conf['kerberos_realm']:
            is_kerberized = True

        gencache_flush = await run([SMBCmd.NET.value, 'cache', 'flush'],
                                   check=False)
        if gencache_flush.returncode != 0:
            self.logger.warning(
                "Failed to clear the SMB gencache after re-initializing "
                "directory services: [%s]", gencache_flush.stderr.decode())

        if is_kerberized:
            try:
                await self.middleware.call('kerberos.start')
            except CallError:
                self.logger.warning(
                    "Failed to start kerberos after directory service "
                    "initialization. Services dependent on kerberos may"
                    "not work correctly.",
                    exc_info=True)

        await self.middleware.call(health_check)
        refresh = await self.middleware.call('dscache.refresh')
        await refresh.wait()
コード例 #5
0
class ACLTemplateService(CRUDService):

    class Config:
        cli_namespace = 'filesystem.acltemplate'
        datastore = 'filesystem.acltemplate'
        datastore_prefix = 'acltemplate_'
        namespace = 'filesystem.acltemplate'

    ENTRY = Patch(
        'acltemplate_create', 'acltemplate_entry',
        ('add', Int('id')),
        ('add', Bool('builtin')),
    )

    @private
    async def validate_acl(self, data, schema, verrors):
        acltype = ACLType[data['acltype']]
        aclcheck = acltype.validate({'dacl': data['acl']})
        if not aclcheck['is_valid']:
            for err in aclcheck['errors']:
                if err[2]:
                    v = f'{schema}.{err[0]}.{err[2]}'
                else:
                    v = f'{schema}.{err[0]}'

                verrors.add(v, err[1])

        if acltype is ACLType.POSIX1E:
            await self.middleware.call(
                "filesystem.gen_aclstring_posix1e",
                copy.deepcopy(data["acl"]), False, verrors
            )

        for idx, ace in enumerate(data['acl']):
            if ace['id'] is None:
                verrors.add(f'{schema}.{idx}.id', 'null id is not permitted.')

    @accepts(Dict(
        "acltemplate_create",
        Str("name", required=True),
        Str("acltype", required=True, enum=["NFS4", "POSIX1E"]),
        OROperator(Ref('nfs4_acl'), Ref('posix1e_acl'), name='acl', required=True),
        register=True
    ))
    async def do_create(self, data):
        """
        Create a new filesystem ACL template.
        """
        verrors = ValidationErrors()
        if len(data['acl']) == 0:
            verrors.add(
                "filesystem_acltemplate_create.acl",
                "At least one ACL entry must be specified."
            )
        await self.validate_acl(data, "filesystem_acltemplate_create.acl", verrors)
        verrors.check()
        data['builtin'] = False

        data['id'] = await self.middleware.call(
            'datastore.insert',
            self._config.datastore,
            data,
            {'prefix': self._config.datastore_prefix}
        )
        return await self._get_instance(data['id'])

    @accepts(
        Int('id'),
        Patch(
            'acltemplate_create',
            'acltemplate_update',
            ('attr', {'update': True})
        )
    )
    async def do_update(self, id, data):
        """
        update filesystem ACL template with `id`.
        """
        old = await self._get_instance(id)
        new = old.copy()
        new.update(data)
        verrors = ValidationErrors()
        if old['builtin']:
            verrors.add("filesystem_acltemplate_update.builtin",
                        "built-in ACL templates may not be changed")

        if new['name'] != old['name']:
            name_exists = bool(await self.query([('name', '=', new['name'])]))
            if name_exists:
                verrors.add("filesystem_acltemplate_update.name",
                            f"{data['name']}: name is not unique")

        if len(new['acl']) == 0:
            verrors.add(
                "filesystem_acltemplate_update.acl",
                "At least one ACL entry must be specified."
            )
        await self.validate_acl(new, "filesystem_acltemplate_update.acl", verrors)
        verrors.check()

        await self.middleware.call(
            'datastore.update',
            self._config.datastore,
            id,
            new,
            {'prefix': self._config.datastore_prefix}
        )
        return await self.get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        entry = await self.get_instance(id)
        if entry['builtin']:
            raise CallError("Deletion of builtin templates is not permitted",
                            errno.EPERM)

        return await self.middleware.call(
            'datastore.delete', self._config.datastore, id
        )

    @private
    async def append_builtins(self, data):
        """
        This method ensures that ACL grants some minimum level of permissions
        to our builtin users or builtin admins accounts.
        """
        bu_id = int(SMBBuiltin.USERS.value[1][9:])
        ba_id = int(SMBBuiltin.USERS.value[1][9:])
        has_builtins = any(filter(lambda x: x["id"] in [bu_id, ba_id], data['acl']))
        if has_builtins:
            return

        if data['acltype'] == ACLType.NFS4.name:
            data['acl'].extend([
                {"tag": "GROUP", "id": bu_id, "perms": {"BASIC": "MODIFY"}, "flags": {"BASIC": "INHERIT"}, "type": "ALLOW"},
                {"tag": "GROUP", "id": ba_id, "perms": {"BASIC": "FULL_CONTROL"}, "flags": {"BASIC": "INHERIT"}, "type": "ALLOW"},
            ])
            return

        has_default_mask = any(filter(lambda x: x["tag"] == "MASK" and x["default"], data['acl']))
        has_access_mask = any(filter(lambda x: x["tag"] == "MASK" and x["default"], data['acl']))
        all_perms = {"READ": True, "WRITE": True, "EXECUTE": True}
        data['acl'].extend([
            {"tag": "GROUP", "id": bu_id, "perms": all_perms, "default": False},
            {"tag": "GROUP", "id": bu_id, "perms": all_perms, "default": True},
            {"tag": "GROUP", "id": ba_id, "perms": all_perms, "default": False},
            {"tag": "GROUP", "id": ba_id, "perms": all_perms, "default": True},
        ])

        if not has_default_mask:
            data['acl'].append({"tag": "MASK", "id": -1, "perms": all_perms, "default": False})

        if not has_access_mask:
            data['acl'].append({"tag": "MASK", "id": -1, "perms": all_perms, "default": True})

        return

    @private
    async def resolve_names(self, uid, gid, data):
        for ace in data['acl']:
            if ace['id'] != -1:
                ace['who'] = await self.middleware.call(
                    'idmap.id_to_name', ace['id'], ace['tag']
                )
            elif ace['tag'] in ('group@', 'GROUP_OBJ'):
                ace['who'] = await self.middleware.call(
                    'idmap.id_to_name', gid, 'GROUP'
                )
            elif ace['tag'] in ('owner@', 'USER_OBJ'):
                ace['who'] = await self.middleware.call(
                    'idmap.id_to_name', uid, 'USER'
                )
            else:
                ace['who'] = None

        return

    @accepts(Dict(
        "acltemplate_by_path",
        Str("path", default=""),
        Ref('query-filters'),
        Ref('query-options'),
        Dict(
            "format-options",
            Bool("canonicalize", default=False),
            Bool("ensure_builtins", default=False),
            Bool("resolve_names", default=False),
        )
    ))
    @returns(List(
        'templates',
        items=[Ref('acltemplate_entry')]
    ))
    async def by_path(self, data):
        """
        Retrieve list of available ACL templates for a given `path`.

        Supports `query-filters` and `query-options`.
        `format-options` gives additional options to alter the results of
        the template query:

        `canonicalize` - place ACL entries for NFSv4 ACLs in Microsoft canonical order.
        `ensure_builtins` - ensure all results contain entries for `builtin_users` and `builtin_administrators`
        groups.
        `resolve_names` - convert ids in ACL entries into names.
        """
        verrors = ValidationErrors()
        filters = data.get('query-filters')
        if data['path']:
            path = await self.middleware.call(
                "filesystem.resolve_cluster_path", data['path']
            )
            acltype = await self.middleware.call(
                'filesystem.path_get_acltype', path
            )
            if acltype == ACLType.DISABLED.name:
                return []

            if acltype == ACLType.POSIX1E.name and data['format-options']['canonicalize']:
                verrors.add(
                    "filesystem.acltemplate_by_path.format-options.canonicalize",
                    "POSIX1E ACLs may not be sorted into Windows canonical order."
                )
            filters.append(("acltype", "=", acltype))

        if not data['path'] and data['format-options']['resolve_names']:
            verrors.add(
                "filesystem.acltemplate_by_path.format-options.canonicalize",
                "ACL entry ids may not be resolved into names unless path is provided."
            )

        verrors.check()

        templates = await self.query(filters, data['query-options'])
        for t in templates:
            if data['format-options']['ensure_builtins']:
                await self.append_builtins(t)

            if data['format-options']['resolve_names']:
                st = await self.middleware.run_in_thread(os.stat(path))
                await self.resolve_names(st.st_uid, st.st_gid, t)

            if data['format-options']['canonicalize'] and t['acltype'] == ACLType.NFS4.name:
                canonicalized = ACLType[t['acltype']].canonicalize(t['acl'])
                t['acl'] = canonicalized

        return templates
コード例 #6
0
class DNSClient(Service):
    class Config:
        private = True

    @private
    async def get_resolver(self, options):
        if options['nameservers']:
            mem_resolvconf = StringIO()
            for n in options['nameservers']:
                mem_resolvconf.write(f"nameserver {n}\n")

            mem_resolvconf.seek(0)
            r = Resolver(mem_resolvconf)
        else:
            r = Resolver()

        r.timeout = options['timeout']

        return r

    @private
    async def resolve_name(self, name, rdtype, options):
        r = await self.get_resolver(options)

        if rdtype == 'PTR':
            ans = await r.resolve_address(name, lifetime=options['lifetime'])
        else:
            ans = await r.resolve(name, rdtype, lifetime=options['lifetime'])

        return ans

    @accepts(
        Dict(
            'lookup_data',
            List('names', items=[Str('name')], required=True),
            Str('record_type', default='A', enum=['A', 'AAAA', 'SRV']),
            Dict('dns_client_options',
                 List('nameservers', items=[IPAddr("ip")], default=[]),
                 Int('lifetime', default=12),
                 Int('timeout', default=4),
                 register=True),
            Ref('query-filters'),
            Ref('query-options'),
        ))
    @returns(
        OROperator(
            List(
                'rdata_list_srv',
                items=[
                    Dict(
                        Str('name'),
                        Int('priority'),
                        Int('weight'),
                        Int('port'),
                        Str('class'),
                        Str('type'),
                        Int('ttl'),
                        Str('target'),
                    )
                ],
            ),
            List(
                'rdata_list',
                items=[
                    Dict(
                        Str('name'),
                        Str('class'),
                        Str('type'),
                        Int('ttl'),
                        IPAddr('address'),
                    )
                ],
            ),
            name='record_list',
        ))
    async def forward_lookup(self, data):
        output = []
        options = data['dns_client_options']
        rtype = data['record_type']

        results = await asyncio.gather(
            *[self.resolve_name(h, rtype, options) for h in data['names']])

        for ans in results:
            ttl = ans.response.answer[0].ttl
            name = ans.response.answer[0].name.to_text()

            if rtype == 'SRV':
                entries = [{
                    "name": name,
                    "priority": i.priority,
                    "weight": i.weight,
                    "port": i.port,
                    "class": i.rdclass.name,
                    "type": i.rdtype.name,
                    "ttl": ttl,
                    "target": i.target.to_text()
                } for i in ans.response.answer[0].items]
            else:
                entries = [{
                    "name": name,
                    "class": i.rdclass.name,
                    "type": i.rdtype.name,
                    "ttl": ttl,
                    "address": i.address,
                } for i in ans.response.answer[0].items]

            output.extend(entries)

        return filter_list(output, data['query-filters'],
                           data['query-options'])

    @accepts(
        Dict(
            'lookup_data',
            List("addresses", items=[IPAddr("address")], required=True),
            Ref('dns_client_options'),
            Ref('query-filters'),
            Ref('query-options'),
        ))
    @returns(
        List('rdata_list',
             items=[
                 Dict(
                     Str('name'),
                     Str('class'),
                     Str('type'),
                     Int('ttl'),
                     Str('target'),
                 )
             ]))
    async def reverse_lookup(self, data):
        output = []
        options = data['dns_client_options']

        results = await asyncio.gather(
            *[self.resolve_name(i, 'PTR', options) for i in data['addresses']])

        for ans in results:
            ttl = ans.response.answer[0].ttl
            name = ans.response.answer[0].name.to_text()

            entries = [{
                "name": name,
                "class": i.rdclass.name,
                "type": i.rdtype.name,
                "ttl": ttl,
                "target": i.target.to_text(),
            } for i in ans.response.answer[0].items]

            output.extend(entries)

        return filter_list(output, data['query-filters'],
                           data['query-options'])