예제 #1
0
파일: kerberos.py 프로젝트: tejp/freenas
class KerberosRealmService(CRUDService):
    class Config:
        datastore = 'directoryservice.kerberosrealm'
        datastore_prefix = 'krb_'
        datastore_extend = 'kerberos.realm.kerberos_extend'
        namespace = 'kerberos.realm'

    @private
    async def kerberos_extend(self, data):
        for param in ['kdc', 'admin_server', 'kpasswd_server']:
            data[param] = data[param].split(' ') if data[param] else []

        return data

    @private
    async def kerberos_compress(self, data):
        for param in ['kdc', 'admin_server', 'kpasswd_server']:
            data[param] = ' '.join(data[param])

        return data

    @accepts(
        Dict('kerberos_realm_create',
             Str('realm', required=True),
             List('kdc', default=[]),
             List('admin_server', default=[]),
             List('kpasswd_server', default=[]),
             register=True))
    async def do_create(self, data):
        """
        Create a new kerberos realm. This will be automatically populated during the
        domain join process in an Active Directory environment. Kerberos realm names
        are case-sensitive, but convention is to only use upper-case.

        Entries for kdc, admin_server, and kpasswd_server are not required.
        If they are unpopulated, then kerberos will use DNS srv records to
        discover the correct servers. The option to hard-code them is provided
        due to AD site discovery. Kerberos has no concept of Active Directory
        sites. This means that middleware performs the site discovery and
        sets the kerberos configuration based on the AD site.
        """
        verrors = ValidationErrors()

        verrors.add_child('kerberos_realm_create', await self._validate(data))

        if verrors:
            raise verrors

        data = await self.kerberos_compress(data)
        data["id"] = await self.middleware.call(
            "datastore.insert",
            self._config.datastore,
            data,
            {"prefix": self._config.datastore_prefix},
        )
        await self.middleware.call('etc.generate', 'kerberos')
        await self.middleware.call('service.restart', 'cron')
        return await self._get_instance(data['id'])

    @accepts(Int('id', required=True),
             Patch("kerberos_realm_create", "kerberos_realm_update",
                   ("attr", {
                       "update": True
                   })))
    async def do_update(self, id, data):
        """
        Update a kerberos realm by id. This will be automatically populated during the
        domain join process in an Active Directory environment. Kerberos realm names
        are case-sensitive, but convention is to only use upper-case.
        """
        old = await self._get_instance(id)
        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        verrors.add_child('kerberos_realm_update', await self._validate(new))

        if verrors:
            raise verrors

        data = await self.kerberos_compress(new)
        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})

        await self.middleware.call('etc.generate', 'kerberos')
        return await self._get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete a kerberos realm by ID.
        """
        await self.middleware.call("datastore.delete", self._config.datastore,
                                   id)
        await self.middleware.call('etc.generate', 'kerberos')

    @private
    async def _validate(self, data):
        verrors = ValidationErrors()
        realms = await self.query()
        for realm in realms:
            if realm['realm'].upper() == data['realm'].upper():
                verrors.add(
                    f'kerberos_realm',
                    f'kerberos realm with name {realm["realm"]} already exists.'
                )
        return verrors
예제 #2
0
class RsyncModService(CRUDService):

    class Config:
        datastore = 'services.rsyncmod'
        datastore_prefix = 'rsyncmod_'
        datastore_extend = 'rsyncmod.rsync_mod_extend'

    @private
    async def rsync_mod_extend(self, data):
        data['hostsallow'] = data['hostsallow'].split()
        data['hostsdeny'] = data['hostsdeny'].split()
        data['mode'] = data['mode'].upper()
        return data

    @private
    async def common_validation(self, data, schema_name):
        verrors = ValidationErrors()

        await check_path_resides_within_volume(verrors, self.middleware, f'{schema_name}.path', data.get('path'))

        for entity in ('user', 'group'):
            value = data.get(entity)
            if value not in map(
                lambda e: e[entity if entity == 'group' else 'username'],
                await self.middleware.call(f'{entity}.query')
            ):
                verrors.add(
                    f'{schema_name}.{entity}',
                    f'Please specify a valid {entity}'
                )

        verrors.check()

        data['hostsallow'] = ' '.join(data['hostsallow'])
        data['hostsdeny'] = ' '.join(data['hostsdeny'])
        data['mode'] = data['mode'].lower()

        return data

    @accepts(Dict(
        'rsyncmod_create',
        Str('name', validators=[Match(r'[^/\]]')]),
        Str('comment'),
        Str('path', required=True, max_length=RSYNC_PATH_LIMIT),
        Str('mode', enum=['RO', 'RW', 'WO']),
        Int('maxconn'),
        Str('user', default='nobody'),
        Str('group', default='nobody'),
        List('hostsallow', items=[Str('hostsallow')], default=[]),
        List('hostsdeny', items=[Str('hostdeny')], default=[]),
        Str('auxiliary', max_length=None),
        register=True,
    ))
    async def do_create(self, data):
        """
        Create a Rsyncmod module.

        `path` represents the path to a dataset. Path length is limited to 1023 characters maximum as per the limit
        enforced by FreeBSD. It is possible that we reach this max length recursively while transferring data. In that
        case, the user must ensure the maximum path will not be too long or modify the recursed path to shorter
        than the limit.

        `maxconn` is an integer value representing the maximum number of simultaneous connections. Zero represents
        unlimited.

        `hostsallow` is a list of patterns to match hostname/ip address of a connecting client. If list is empty,
        all hosts are allowed.

        `hostsdeny` is a list of patterns to match hostname/ip address of a connecting client. If the pattern is
        matched, access is denied to the client. If no client should be denied, this should be left empty.

        `auxiliary` attribute can be used to pass on any additional parameters from rsyncd.conf(5).
        """

        data = await self.common_validation(data, 'rsyncmod_create')

        data['id'] = await self.middleware.call(
            'datastore.insert',
            self._config.datastore,
            data,
            {'prefix': self._config.datastore_prefix}
        )

        await self._service_change('rsync', 'reload')

        return await self._get_instance(data['id'])

    @accepts(Int('id'), Patch('rsyncmod_create', 'rsyncmod_update', ('attr', {'update': True})))
    async def do_update(self, id, data):
        """
        Update Rsyncmod module of `id`.
        """
        module = await self._get_instance(id)
        module.update(data)

        module = await self.common_validation(module, 'rsyncmod_update')

        await self.middleware.call(
            'datastore.update',
            self._config.datastore,
            id,
            module,
            {'prefix': self._config.datastore_prefix}
        )

        await self._service_change('rsync', 'reload')

        return await self._get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete Rsyncmod module of `id`.
        """
        return await self.middleware.call('datastore.delete', self._config.datastore, id)
예제 #3
0
class GroupService(CRUDService):
    class Config:
        datastore = 'account.bsdgroups'
        datastore_prefix = 'bsdgrp_'
        datastore_extend = 'group.group_extend'

    @private
    async def group_extend(self, group):
        # Get group membership
        group['users'] = [
            gm['user']['id'] for gm in await self.middleware.call(
                'datastore.query', 'account.bsdgroupmembership', [(
                    'group', '=', group['id'])], {'prefix': 'bsdgrpmember_'})
        ]
        group['users'] += [
            gmu['id'] for gmu in await self.middleware.call(
                'datastore.query', 'account.bsdusers', [('bsdusr_group_id',
                                                         '=', group['id'])])
        ]
        return group

    @accepts(
        Dict(
            'group_create',
            Int('gid'),
            Str('name', required=True),
            Bool('sudo', default=False),
            Bool('allow_duplicate_gid', default=False),
            List('users', items=[Int('id')], required=False),
            register=True,
        ))
    async def do_create(self, data):

        verrors = ValidationErrors()
        await self.__common_validation(verrors, data)
        if verrors:
            raise verrors

        if not data.get('gid'):
            data['gid'] = await self.get_next_gid()

        group = data.copy()
        group['group'] = group.pop('name')

        users = group.pop('users', [])

        pk = await self.middleware.call('datastore.insert',
                                        'account.bsdgroups', group,
                                        {'prefix': 'bsdgrp_'})

        for user in users:
            await self.middleware.call('datastore.insert',
                                       'account.bsdgroupmembership', {
                                           'bsdgrpmember_group': pk,
                                           'bsdgrpmember_user': user
                                       })

        await self.middleware.call('notifier.groupmap_add', data['name'],
                                   data['name'])

        await self.middleware.call('service.reload', 'user')

        return pk

    @accepts(
        Int('id'),
        Patch(
            'group_create',
            'group_update',
            ('attr', {
                'update': True
            }),
        ),
    )
    async def do_update(self, pk, data):

        group = await self._get_instance(pk)

        verrors = ValidationErrors()
        await self.__common_validation(verrors, data, pk=pk)
        if verrors:
            raise verrors

        group.update(data)
        delete_groupmap = False
        group.pop('users', None)

        if 'name' in data and data['name'] != group['group']:
            delete_groupmap = group['group']
            group['group'] = group.pop('name')

        await self.middleware.call('datastore.update', 'account.bsdgroups', pk,
                                   group, {'prefix': 'bsdgrp_'})

        if 'users' in data:
            existing = {
                i['bsdgrpmember_user']['id']: i
                for i in await self.middleware.call(
                    'datastore.query', 'account.bsdgroupmembership', [(
                        'bsdgrpmember_group', '=', pk)])
            }
            to_remove = set(existing.keys()) - set(data['users'])
            for i in to_remove:
                await self.middleware.call('datastore.delete',
                                           'account.bsdgroupmembership',
                                           existing[i]['id'])

            to_add = set(data['users']) - set(existing.keys())
            for i in to_add:
                await self.middleware.call('datastore.insert',
                                           'account.bsdgroupmembership', {
                                               'bsdgrpmember_group': pk,
                                               'bsdgrpmember_user': i
                                           })

        if delete_groupmap:
            await self.middleware.call('notifier.groupmap_delete',
                                       delete_groupmap)

        await self.middleware.call('notifier.groupmap_add', group['group'],
                                   group['group'])

        await self.middleware.call('service.reload', 'user')

        return pk

    @accepts(Int('id'), Dict('options', Bool('delete_users', default=False)))
    async def do_delete(self, pk, options=None):

        group = await self._get_instance(pk)

        if group['builtin']:
            raise CallError('A built-in group cannot be deleted.',
                            errno.EACCES)

        if options['delete_users']:
            for i in await self.middleware.call('datastore.query',
                                                'account.bsdusers',
                                                [('group', '=', group['id'])],
                                                {'prefix': 'bsdusr_'}):
                await self.middleware.call('datastore.delete',
                                           'account.bsdusers', i['id'])

        if await self.middleware.call('notifier.common', 'system',
                                      'domaincontroller_enabled'):
            await self.middleware.call('notifier.samba4', 'group_delete',
                                       [group['group']])

        await self.middleware.call('datastore.delete', 'account.bsdgroups', pk)

        await self.middleware.call('service.reload', 'user')

        return pk

    async def get_next_gid(self):
        """
        Get the next available/free gid.
        """
        last_gid = 999
        for i in await self.middleware.call('datastore.query',
                                            'account.bsdgroups',
                                            [('builtin', '=', False)], {
                                                'order_by': ['gid'],
                                                'prefix': 'bsdgrp_'
                                            }):
            # If the difference between the last gid and the current one is
            # bigger than 1, it means we have a gap and can use it.
            if i['gid'] - last_gid > 1:
                return last_gid + 1
            last_gid = i['gid']
        return last_gid + 1

    async def __common_validation(self, verrors, data, pk=None):

        exclude_filter = [('id', '!=', pk)] if pk else []

        if 'name' in data:
            existing = await self.middleware.call(
                'datastore.query', 'account.bsdgroups',
                [('group', '=', data['name'])] + exclude_filter,
                {'prefix': 'bsdgrp_'})
            if existing:
                verrors.add(
                    'name',
                    f'A Group with the name "{data["name"]}" already exists.',
                    errno.EEXIST)

            pw_checkname(verrors, 'name', data['name'])

        allow_duplicate_gid = data.pop('allow_duplicate_gid', False)
        if data.get('gid') and not allow_duplicate_gid:
            existing = await self.middleware.call(
                'datastore.query', 'account.bsdgroups',
                [('gid', '=', data['gid'])] + exclude_filter,
                {'prefix': 'bsdgrp_'})
            if existing:
                verrors.add('gid',
                            f'The Group ID "{data["gid"]}" already exists.',
                            errno.EEXIST)

        if 'users' in data:
            existing = set([
                i['id'] for i in await self.middleware.call(
                    'datastore.query', 'account.bsdusers', [('id', 'in',
                                                             data['users'])])
            ])
            notfound = set(data['users']) - existing
            if notfound:
                verrors.add(
                    'users',
                    f'Following users do not exist: {", ".join(map(str, notfound))}'
                )
예제 #4
0
class CredentialsService(CRUDService):
    class Config:
        namespace = "cloudsync.credentials"

        datastore = "system.cloudcredentials"

    @accepts(
        Dict(
            "cloud_sync_credentials_verify",
            Str("provider", required=True),
            Dict("attributes", additional_attrs=True, required=True),
        ))
    async def verify(self, data):
        """
        Verify if `attributes` provided for `provider` are authorized by the `provider`.
        """
        data = dict(data, name="")
        await self._validate("cloud_sync_credentials_create", data)

        async with RcloneConfig({"credentials": data}) as config:
            proc = await run([
                "rclone", "--config", config.config_path, "lsjson", "remote:"
            ],
                             check=False,
                             encoding="utf8")
            if proc.returncode == 0:
                return {"valid": True}
            else:
                return {"valid": False, "error": proc.stderr}

    @accepts(
        Dict(
            "cloud_sync_credentials_create",
            Str("name", required=True),
            Str("provider", required=True),
            Dict("attributes", additional_attrs=True, required=True),
            register=True,
        ))
    async def do_create(self, data):
        """
        Create Cloud Sync Credentials.

        `attributes` is a dictionary of valid values which will be used to authorize with the `provider`.
        """
        await self._validate("cloud_sync_credentials_create", data)

        data["id"] = await self.middleware.call(
            "datastore.insert",
            "system.cloudcredentials",
            data,
        )
        return data

    @accepts(Int("id"),
             Patch("cloud_sync_credentials_create",
                   "cloud_sync_credentials_update", ("attr", {
                       "update": True
                   })))
    async def do_update(self, id, data):
        """
        Update Cloud Sync Credentials of `id`.
        """
        old = await self._get_instance(id)

        new = old.copy()
        new.update(data)

        await self._validate("cloud_sync_credentials_update", new, id)

        await self.middleware.call(
            "datastore.update",
            "system.cloudcredentials",
            id,
            new,
        )

        data["id"] = id

        return data

    @accepts(Int("id"))
    async def do_delete(self, id):
        """
        Delete Cloud Sync Credentials of `id`.
        """
        await self.middleware.call(
            "datastore.delete",
            "system.cloudcredentials",
            id,
        )

    async def _validate(self, schema_name, data, id=None):
        verrors = ValidationErrors()

        await self._ensure_unique(verrors, schema_name, "name", data["name"],
                                  id)

        if data["provider"] not in REMOTES:
            verrors.add(f"{schema_name}.provider", "Invalid provider")
        else:
            provider = REMOTES[data["provider"]]

            attributes_verrors = validate_attributes(
                provider.credentials_schema, data)
            verrors.add_child(f"{schema_name}.attributes", attributes_verrors)

        if verrors:
            raise verrors
예제 #5
0
class TunableService(CRUDService):
    class Config:
        datastore = 'system.tunable'
        datastore_prefix = 'tun_'
        datastore_extend = 'tunable.upper'

    def __init__(self, *args, **kwargs):
        super(TunableService, self).__init__(*args, **kwargs)
        self.__default_sysctl = {}

    @private
    async def get_default_value(self, oid):
        return self.__default_sysctl[oid]

    @private
    async def set_default_value(self, oid, value):
        if oid not in self.__default_sysctl:
            self.__default_sysctl[oid] = value

    @accepts(Dict(
        'tunable_create',
        Str('var', validators=[Match(r'^[\w\.]+$')], required=True),
        Str('value', required=True),
        Str('type', enum=['LOADER', 'RC', 'SYSCTL'], required=True),
        Str('comment'),
        Bool('enabled', default=True),
        register=True
    ))
    async def do_create(self, data):
        """
        Create a Tunable.

        `var` represents name of the sysctl/loader/rc variable.

        `type` should be one of the following:
        1) LOADER     -     Configure `var` for loader(8)
        2) RC         -     Configure `var` for rc(8)
        3) SYSCTL     -     Configure `var` for sysctl(8)
        """
        await self.clean(data, 'tunable_create')
        await self.validate(data, 'tunable_create')
        await self.lower(data)

        data['id'] = await self.middleware.call(
            'datastore.insert',
            self._config.datastore,
            data,
            {'prefix': self._config.datastore_prefix}
        )

        await self.middleware.call('service.reload', data['type'])

        return await self._get_instance(data['id'])

    @accepts(
        Int('id'),
        Patch(
            'tunable_create',
            'tunable_update',
            ('attr', {'update': True})
        )
    )
    async def do_update(self, id, data):
        """
        Update Tunable of `id`.
        """
        old = await self._get_instance(id)

        new = old.copy()
        new.update(data)

        await self.clean(new, 'tunable_update', old=old)
        await self.validate(new, 'tunable_update')

        await self.lower(new)

        await self.middleware.call(
            'datastore.update',
            self._config.datastore,
            id,
            new,
            {'prefix': self._config.datastore_prefix}
        )

        await self.middleware.call('service.reload', new['type'])

        return await self._get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete Tunable of `id`.
        """
        tunable = await self._get_instance(id)
        await self.lower(tunable)
        if tunable['type'] == 'sysctl':
            # Restore the default value, if it is possible.
            value_default = None
            try:
                value_default = await self.get_default_value(tunable["var"])
            except KeyError:
                pass
            if value_default is not None:
                ret = subprocess.run(
                    ['sysctl', f'{tunable["var"]}="{value_default}"'],
                    capture_output=True
                )
                if ret.returncode:
                    self.middleware.logger.debug(
                        'Failed to set sysctl %s -> %s: %s',
                        tunable['var'], tunable['value'], ret.stderr.decode(),
                    )

        response = await self.middleware.call(
            'datastore.delete',
            self._config.datastore,
            id
        )

        await self.middleware.call('service.reload', tunable['type'].lower())

        return response

    @private
    async def lower(self, data):
        data['type'] = data['type'].lower()

        return data

    @private
    async def upper(self, data):
        data['type'] = data['type'].upper()

        return data

    @private
    async def clean(self, tunable, schema_name, old=None):
        verrors = ValidationErrors()
        skip_dupe = False
        tun_comment = tunable.get('comment')
        tun_value = tunable['value']
        tun_var = tunable['var']

        if tun_comment is not None:
            tunable['comment'] = tun_comment.strip()

        if '"' in tun_value or "'" in tun_value:
            verrors.add(f"{schema_name}.value",
                        'Quotes in value are not allowed')

        if schema_name == 'tunable_update' and old:
            old_tun_var = old['var']

            if old_tun_var == tun_var:
                # They aren't trying to change to a new name, just updating
                skip_dupe = True

        if not skip_dupe:
            tun_vars = await self.middleware.call(
                'datastore.query', self._config.datastore, [('tun_var', '=',
                                                             tun_var)])

            if tun_vars:
                verrors.add(f"{schema_name}.value",
                            'This variable already exists')

        if verrors:
            raise verrors

        return tunable

    @private
    async def validate(self, tunable, schema_name):
        sysctl_re = \
            re.compile('[a-z][a-z0-9_]+\.([a-z0-9_]+\.)*[a-z0-9_]+', re.I)

        loader_re = \
            re.compile('[a-z][a-z0-9_]+\.*([a-z0-9_]+\.)*[a-z0-9_]+', re.I)

        verrors = ValidationErrors()
        tun_var = tunable['var'].lower()
        tun_type = tunable['type'].lower()

        if tun_type == 'loader' or tun_type == 'rc':
            err_msg = "Value can start with a letter and end with an alphanumeric. Aphanumeric and underscore" \
                      " characters are allowed"
        else:
            err_msg = 'Value can start with a letter and end with an alphanumeric. A period (.) once is a must.' \
                      ' Alphanumeric and underscore characters are allowed'

        if (
            tun_type in ('loader', 'rc') and
            not loader_re.match(tun_var)
        ) or (
            tun_type == 'sysctl' and
            not sysctl_re.match(tun_var)
        ):
            verrors.add(f"{schema_name}.var", err_msg)

        if verrors:
            raise verrors
예제 #6
0
class SharingSMBService(CRUDService):
    class Config:
        namespace = 'sharing.smb'
        datastore = 'sharing.cifs_share'
        datastore_prefix = 'cifs_'
        datastore_extend = 'sharing.smb.extend'

    @accepts(
        Dict('sharingsmb_create',
             Str('purpose',
                 enum=[x.name for x in SMBSharePreset],
                 default=SMBSharePreset.DEFAULT_SHARE.name),
             Str('path', required=True),
             Str('path_suffix', default=''),
             Bool('home', default=False),
             Str('name', max_length=80),
             Str('comment', default=''),
             Bool('ro', default=False),
             Bool('browsable', default=True),
             Bool('timemachine', default=False),
             Bool('recyclebin', default=False),
             Bool('guestok', default=False),
             Bool('abe', default=False),
             List('hostsallow', default=[]),
             List('hostsdeny', default=[]),
             Bool('aapl_name_mangling', default=False),
             Bool('acl', default=True),
             Bool('durablehandle', default=True),
             Bool('shadowcopy', default=True),
             Bool('streams', default=True),
             Bool('fsrvp', default=False),
             Str('auxsmbconf', max_length=None, default=''),
             Bool('enabled', default=True),
             register=True))
    async def do_create(self, data):
        """
        Create a SMB Share.

        `purpose` applies common configuration presets depending on intended purpose.

        `timemachine` when set, enables Time Machine backups for this share.

        `ro` when enabled, prohibits write access to the share.

        `guestok` when enabled, allows access to this share without a password.

        `hostsallow` is a list of hostnames / IP addresses which have access to this share.

        `hostsdeny` is a list of hostnames / IP addresses which are not allowed access to this share. If a handful
        of hostnames are to be only allowed access, `hostsdeny` can be passed "ALL" which means that it will deny
        access to ALL hostnames except for the ones which have been listed in `hostsallow`.

        `acl` enables support for storing the SMB Security Descriptor as a Filesystem ACL.

        `streams` enables support for storing alternate datastreams as filesystem extended attributes.

        `fsrvp` enables support for the filesystem remote VSS protocol. This allows clients to create
        ZFS snapshots through RPC.

        `shadowcopy` enables support for the volume shadow copy service.

        `auxsmbconf` is a string of additional smb4.conf parameters not covered by the system's API.
        """
        verrors = ValidationErrors()
        path = data['path']

        await self.clean(data, 'sharingsmb_create', verrors)
        await self.validate(data, 'sharingsmb_create', verrors)

        if verrors:
            raise verrors

        if path and not os.path.exists(path):
            try:
                os.makedirs(path)
            except OSError as e:
                raise CallError(f'Failed to create {path}: {e}')

        await self.apply_presets(data)
        await self.compress(data)
        vuid = await self.generate_vuid(data['timemachine'])
        data.update({'vuid': vuid})
        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})

        await self.middleware.call('sharing.smb.reg_addshare', data)
        await self.extend(data)  # We should do this in the insert call ?

        enable_aapl = await self.check_aapl(data)

        if enable_aapl:
            await self._service_change('cifs', 'restart')
        else:
            await self._service_change('cifs', 'reload')

        return data

    @accepts(Int('id'),
             Patch('sharingsmb_create', 'sharingsmb_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        """
        Update SMB Share of `id`.
        """
        verrors = ValidationErrors()
        path = data.get('path')

        old = await self.middleware.call(
            'datastore.query', self._config.datastore, [('id', '=', id)], {
                'extend': self._config.datastore_extend,
                'prefix': self._config.datastore_prefix,
                'get': True
            })

        new = old.copy()
        new.update(data)

        oldname = 'homes' if old['home'] else old['name']
        newname = 'homes' if new['home'] else new['name']

        new['vuid'] = await self.generate_vuid(new['timemachine'], new['vuid'])
        await self.clean(new, 'sharingsmb_update', verrors, id=id)
        await self.validate(new, 'sharingsmb_update', verrors, old=old)

        if verrors:
            raise verrors

        if path and not os.path.exists(path):
            try:
                os.makedirs(path)
            except OSError as e:
                raise CallError(f'Failed to create {path}: {e}')

        if old['purpose'] != new['purpose']:
            await self.apply_presets(new)

        await self.compress(new)
        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})

        enable_aapl = await self.check_aapl(new)
        if newname != oldname:
            # This is disruptive change. Share is actually being removed and replaced.
            # Forcibly closes any existing SMB sessions.
            await self.close_share(oldname)
            try:
                await self.middleware.call('sharing.smb.reg_delshare', oldname)
            except Exception:
                self.logger.warning('Failed to remove stale share [%s]',
                                    old['name'],
                                    exc_info=True)
            await self.middleware.call('sharing.smb.reg_addshare', new)
        else:
            diff = await self.middleware.call(
                'sharing.smb.diff_middleware_and_registry', new['name'], new)
            share_name = new['name'] if not new['home'] else 'homes'
            await self.middleware.call('sharing.smb.apply_conf_diff',
                                       'REGISTRY', share_name, diff)

        await self.extend(new)  # same here ?

        if enable_aapl:
            await self._service_change('cifs', 'restart')
        else:
            await self._service_change('cifs', 'reload')

        return new

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete SMB Share of `id`. This will forcibly disconnect SMB clients
        that are accessing the share.
        """
        share = await self._get_instance(id)
        result = await self.middleware.call('datastore.delete',
                                            self._config.datastore, id)
        await self.close_share(share['name'])
        try:
            await self.middleware.call(
                'smb.sharesec._delete',
                share['name'] if not share['home'] else 'homes')
        except Exception:
            self.logger.debug('Failed to delete share ACL for [%s].',
                              share['name'],
                              exc_info=True)

        try:
            await self.middleware.call(
                'sharing.smb.reg_delshare',
                share['name'] if not share['home'] else 'homes')
        except Exception:
            self.logger.warn('Failed to remove registry entry for [%s].',
                             share['name'],
                             exc_info=True)

        if share['timemachine']:
            await self.middleware.call('service.restart', 'mdns')

        return result

    @private
    async def check_aapl(self, data):
        """
        Returns whether we changed the global aapl support settings.
        """
        aapl_extensions = (
            await self.middleware.call('smb.config'))['aapl_extensions']

        if not aapl_extensions and data['timemachine']:
            await self.middleware.call('datastore.update', 'services_cifs', 1,
                                       {'cifs_srv_aapl_extensions': True})
            return True

        return False

    @private
    async def close_share(self, share_name):
        c = await run(
            [SMBCmd.SMBCONTROL.value, 'smbd', 'close-share', share_name],
            check=False)
        if c.returncode != 0:
            self.logger.warn('Failed to close smb share [%s]: [%s]',
                             share_name,
                             c.stderr.decode().strip())

    @private
    async def clean(self, data, schema_name, verrors, id=None):
        data['name'] = await self.name_exists(data, schema_name, verrors, id)

    @private
    async def validate(self, data, schema_name, verrors, old=None):
        home_result = await self.home_exists(data['home'], schema_name,
                                             verrors, old)

        if home_result:
            verrors.add(f'{schema_name}.home',
                        'Only one share is allowed to be a home share.')
        elif not home_result and not data['path']:
            verrors.add(f'{schema_name}.path', 'This field is required.')

        if data['path']:
            await check_path_resides_within_volume(verrors, self.middleware,
                                                   f"{schema_name}.path",
                                                   data['path'])

        if not data['acl'] and not await self.middleware.call(
                'filesystem.acl_is_trivial', data['path']):
            verrors.add(
                f'{schema_name}.acl',
                f'ACL detected on {data["path"]}. ACLs must be stripped prior to creation '
                'of SMB share.')

        if data.get('name') and data['name'].lower() in [
                'global', 'homes', 'printers'
        ]:
            verrors.add(
                f'{schema_name}.name',
                f'{data["name"]} is a reserved section name, please select another one'
            )
        if data.get('path_suffix') and len(data['path_suffix'].split('/')) > 2:
            verrors.add(
                f'{schema_name}.name',
                'Path suffix may not contain more than two components.')

    @private
    async def home_exists(self, home, schema_name, verrors, old=None):
        home_filters = [('home', '=', True)]
        home_result = None

        if home:
            if old and old['id'] is not None:
                id = old['id']

                if not old['home']:
                    home_filters.append(('id', '!=', id))
                    # The user already had this set as the home share
                    home_result = await self.middleware.call(
                        'datastore.query', self._config.datastore,
                        home_filters,
                        {'prefix': self._config.datastore_prefix})

        return home_result

    @private
    async def auxsmbconf_dict(self, aux, direction="TO"):
        ret = None
        if direction == 'TO':
            ret = {}
            for entry in aux.splitlines():
                try:
                    kv = param.split('=', 1)
                    ret[kv[0].strip()] = kv[1].strip()
                except Exception:
                    self.logger.debug(
                        "Share contains invalid auxiliary parameter: [%s]",
                        param)
            return ret

        if direction == 'FROM':
            return '\n'.join([f'{k}={v}' for k, v in aux.items()])

    @private
    async def name_exists(self, data, schema_name, verrors, id=None):
        name = data['name']
        path = data['path']

        if path and not name:
            name = path.rsplit('/', 1)[-1]

        name_filters = [('name', '=', name)]

        if id is not None:
            name_filters.append(('id', '!=', id))

        name_result = await self.middleware.call(
            'datastore.query', self._config.datastore, name_filters,
            {'prefix': self._config.datastore_prefix})

        if name_result:
            verrors.add(f'{schema_name}.name',
                        'A share with this name already exists.')

        return name

    @private
    async def extend(self, data):
        data['hostsallow'] = data['hostsallow'].split()
        data['hostsdeny'] = data['hostsdeny'].split()
        if data['fsrvp']:
            data['shadowcopy'] = True

        if 'share_acl' in data:
            data.pop('share_acl')

        return data

    @private
    async def compress(self, data):
        data['hostsallow'] = ' '.join(data['hostsallow'])
        data['hostsdeny'] = ' '.join(data['hostsdeny'])

        return data

    @private
    async def generate_vuid(self, timemachine, vuid=""):
        try:
            if timemachine and vuid:
                uuid.UUID(vuid, version=4)
        except ValueError:
            self.logger.debug(
                f"Time machine VUID string ({vuid}) is invalid. Regenerating.")
            vuid = ""

        if timemachine and not vuid:
            vuid = str(uuid.uuid4())

        return vuid

    @private
    async def apply_presets(self, data):
        """
        Apply settings from presets. Only include auxiliary parameters
        from preset if user-defined aux parameters already exist. In this
        case user-defined takes precedence.
        """
        params = (SMBSharePreset[data["purpose"]].value)["params"].copy()
        aux = params.pop("auxsmbconf")
        data.update(params)
        if data["auxsmbconf"]:
            preset_aux = await self.auxsmbconf_dict(aux, direction="TO")
            data_aux = await self.auxsmbconf_dict(data["auxsmbconf"],
                                                  direction="TO")
            preset_aux.update(data_aux)
            data["auxsmbconf"] = await self.auxsmbconf_dict(preset_aux,
                                                            direction="FROM")

        return data

    @accepts()
    async def presets(self):
        """
        Retrieve pre-defined configuration sets for specific use-cases. These parameter
        combinations are often non-obvious, but beneficial in these scenarios.
        """
        return {x.name: x.value for x in SMBSharePreset}
예제 #7
0
class TunableService(CRUDService):
    class Config:
        datastore = 'system.tunable'
        datastore_prefix = 'tun_'
        cli_namespace = 'system.tunable'

    SYSTEM_TUNABLES = deque()

    ENTRY = Patch(
        'tunable_create',
        'tunable_entry',
        ('add', Int('id')),
    )

    @private
    def get_system_tunables(self):
        if not TunableService.SYSTEM_TUNABLES:
            tunables = subprocess.run(['sysctl', '-aN'],
                                      stdout=subprocess.PIPE)
            for tunable in filter(lambda x: x,
                                  tunables.stdout.decode().split('\n')):
                TunableService.SYSTEM_TUNABLES.append(tunable)
        return TunableService.SYSTEM_TUNABLES

    @private
    def get_or_set(self, var, value=None):
        with open(f'/proc/sys/{var.replace(".", "/")}',
                  'r' if not value else 'w') as f:
            return f.read().strip() if not value else f.write(value)

    @accepts()
    @returns(
        Dict('tunable_type_choices',
             *[Str(k, enum=[k]) for k in TUNABLE_TYPES]))
    async def tunable_type_choices(self):
        """
        Retrieve the supported tunable types that can be changed.
        """
        return {k: k for k in TUNABLE_TYPES}

    @accepts(
        Dict('tunable_create',
             Str('var', required=True),
             Str('value', required=True),
             Str('type', enum=TUNABLE_TYPES, default='SYSCTL', required=True),
             Str('comment'),
             Bool('enabled', default=True),
             register=True))
    async def do_create(self, data):
        """
        Create a Tunable.
        """
        verrors = ValidationErrors()
        if await self.middleware.call('tunable.query',
                                      [('var', '=', data['var'])]):
            verrors.add(
                'tunable.create',
                f'Tunable {data["var"]!r} already exists in database.',
                errno.EEXIST)

        if data['var'] not in await self.middleware.call(
                'tunable.get_system_tunables'):
            verrors.add('tunable.create',
                        f'Tunable {data["var"]!r} does not exist in kernel.',
                        errno.ENOENT)

        verrors.check()

        data['orig_value'] = await self.middleware.call(
            'tunable.get_or_set', data['var'])

        if (comment := data.get('comment', '').strip()):
            data['comment'] = comment

        _id = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})
        await self.middleware.call('service.restart', 'sysctl')
        return await self.get_instance(_id)
예제 #8
0
파일: cron.py 프로젝트: vaibhav-rbs/freenas
class CronJobService(CRUDService):
    class Config:
        datastore = 'tasks.cronjob'
        datastore_prefix = 'cron_'
        datastore_extend = 'cronjob.cron_extend'
        namespace = 'cronjob'

    @private
    def cron_extend(self, data):
        Cron.convert_db_format_to_schedule(data)
        return data

    @private
    async def validate_data(self, data, schema):
        verrors = ValidationErrors()

        user = data.get('user')
        if user:
            # Windows users can have spaces in their usernames
            # http://www.freebsd.org/cgi/query-pr.cgi?pr=164808
            if ' ' in user:
                verrors.add(f'{schema}.user', 'Usernames cannot have spaces')

            elif not (await self.middleware.call('notifier.get_user_object',
                                                 user)):
                verrors.add(f'{schema}.user', 'Specified user does not exist')

        return verrors, data

    @accepts(
        Dict('cron_job_create',
             Bool('enabled'),
             Bool('stderr'),
             Bool('stdout'),
             Cron('schedule'),
             Str('command', required=True),
             Str('description'),
             Str('user', required=True),
             register=True))
    async def do_create(self, data):
        """
        Create a new cron job.

        `stderr` and `stdout` are boolean values which if `true`, represent that we would like to suppress
        standard error / standard output respectively.

        .. examples(websocket)::

          Create a cron job which executes `touch /tmp/testfile` after every 5 minutes.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "cronjob.create",
                "params": [{
                    "enabled": true,
                    "schedule": {
                        "minute": "5",
                        "hour": "*",
                        "dom": "*",
                        "month": "*",
                        "dow": "*"
                    },
                    "command": "touch /tmp/testfile",
                    "description": "Test command",
                    "user": "******",
                    "stderr": true,
                    "stdout": true
                }]
            }
        """
        verrors, data = await self.validate_data(data, 'cron_job_create')
        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(data)

        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})

        await self.middleware.call('service.restart', 'cron')

        return data

    @accepts(Int('id', validators=[Range(min=1)]),
             Patch('cron_job_create', 'cron_job_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        """
        Update cronjob of `id`.
        """
        task_data = await self.query(filters=[('id', '=', id)],
                                     options={'get': True})
        original_data = task_data.copy()
        task_data.update(data)
        verrors, task_data = await self.validate_data(task_data,
                                                      'cron_job_update')

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(task_data)
        Cron.convert_schedule_to_db_format(original_data)

        if len(set(task_data.items()) ^ set(original_data.items())) > 0:

            await self.middleware.call(
                'datastore.update', self._config.datastore, id, task_data,
                {'prefix': self._config.datastore_prefix})

            await self.middleware.call('service.restart', 'cron')

        return await self.query(filters=[('id', '=', id)],
                                options={'get': True})

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete cronjob of `id`.
        """
        response = await self.middleware.call('datastore.delete',
                                              self._config.datastore, id)

        await self.middleware.call('service.restart', 'cron')

        return response
예제 #9
0
파일: alert.py 프로젝트: Mu-L/freenas
class AlertServiceService(CRUDService):
    class Config:
        datastore = "system.alertservice"
        datastore_extend = "alertservice._extend"
        datastore_order_by = ["name"]
        cli_namespace = "system.alert.service"

    @accepts()
    async def list_types(self):
        """
        List all types of supported Alert services which can be configured with the system.
        """
        return [{
            "name": name,
            "title": factory.title,
        } for name, factory in sorted(ALERT_SERVICES_FACTORIES.items(),
                                      key=lambda i: i[1].title.lower())]

    @private
    async def _extend(self, service):
        try:
            service["type__title"] = ALERT_SERVICES_FACTORIES[
                service["type"]].title
        except KeyError:
            service["type__title"] = "<Unknown>"

        return service

    @private
    async def _compress(self, service):
        service.pop("type__title")

        return service

    @private
    async def _validate(self, service, schema_name):
        verrors = ValidationErrors()

        factory = ALERT_SERVICES_FACTORIES.get(service["type"])
        if factory is None:
            verrors.add(f"{schema_name}.type", "This field has invalid value")
            raise verrors

        verrors.add_child(
            f"{schema_name}.attributes",
            validate_attributes(list(factory.schema.attrs.values()), service))

        if verrors:
            raise verrors

    @accepts(
        Dict(
            "alert_service_create",
            Str("name"),
            Str("type", required=True),
            Dict("attributes", additional_attrs=True),
            Str("level", enum=list(AlertLevel.__members__)),
            Bool("enabled"),
            register=True,
        ))
    async def do_create(self, data):
        """
        Create an Alert Service of specified `type`.

        If `enabled`, it sends alerts to the configured `type` of Alert Service.

        .. examples(websocket)::

          Create an Alert Service of Mail `type`

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "alertservice.create",
                "params": [{
                    "name": "Test Email Alert",
                    "enabled": true,
                    "type": "Mail",
                    "attributes": {
                        "email": "*****@*****.**"
                    },
                    "settings": {
                        "VolumeVersion": "HOURLY"
                    }
                }]
            }
        """
        await self._validate(data, "alert_service_create")

        data["id"] = await self.middleware.call("datastore.insert",
                                                self._config.datastore, data)

        await self._extend(data)

        return data

    @accepts(Int("id"),
             Patch(
                 "alert_service_create",
                 "alert_service_update",
                 ("attr", {
                     "update": True
                 }),
             ))
    async def do_update(self, id, data):
        """
        Update Alert Service of `id`.
        """
        old = await self.middleware.call(
            "datastore.query", self._config.datastore, [("id", "=", id)], {
                "extend": self._config.datastore_extend,
                "get": True
            })

        new = old.copy()
        new.update(data)

        await self._validate(new, "alert_service_update")

        await self._compress(new)

        await self.middleware.call("datastore.update", self._config.datastore,
                                   id, new)

        await self._extend(new)

        return new

    @accepts(Int("id"))
    async def do_delete(self, id):
        """
        Delete Alert Service of `id`.
        """
        return await self.middleware.call("datastore.delete",
                                          self._config.datastore, id)

    @accepts(Ref('alert_service_create'))
    async def test(self, data):
        """
        Send a test alert using `type` of Alert Service.

        .. examples(websocket)::

          Send a test alert using Alert Service of Mail `type`.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "alertservice.test",
                "params": [{
                    "name": "Test Email Alert",
                    "enabled": true,
                    "type": "Mail",
                    "attributes": {
                        "email": "*****@*****.**"
                    },
                    "settings": {}
                }]
            }
        """
        await self._validate(data, "alert_service_test")

        factory = ALERT_SERVICES_FACTORIES.get(data["type"])
        if factory is None:
            self.logger.error("Alert service %r does not exist", data["type"])
            return False

        try:
            alert_service = factory(self.middleware, data["attributes"])
        except Exception:
            self.logger.error(
                "Error creating alert service %r with parameters=%r",
                data["type"],
                data["attributes"],
                exc_info=True)
            return False

        master_node = "A"
        if await self.middleware.call("failover.licensed"):
            master_node = await self.middleware.call("failover.node")

        test_alert = Alert(
            TestAlertClass,
            node=master_node,
            datetime=datetime.utcnow(),
            last_occurrence=datetime.utcnow(),
            _uuid="test",
        )

        try:
            await alert_service.send([test_alert], [], [test_alert])
        except Exception:
            self.logger.error("Error in alert service %r",
                              data["type"],
                              exc_info=True)
            return False

        return True
예제 #10
0
class SharingAFPService(CRUDService):
    class Config:
        namespace = 'sharing.afp'
        datastore = 'sharing.afp_share'
        datastore_prefix = 'afp_'
        datastore_extend = 'sharing.afp.extend'

    @accepts(Dict(
        'sharingafp_create',
        Str('path', required=True),
        Bool('home', default=False),
        Str('name'),
        Str('comment'),
        List('allow', default=[]),
        List('deny', default=[]),
        List('ro', default=[]),
        List('rw', default=[]),
        Bool('timemachine', default=False),
        Int('timemachine_quota', default=0),
        Bool('nodev', default=False),
        Bool('nostat', default=False),
        Bool('upriv', default=True),
        UnixPerm('fperm', default='644'),
        UnixPerm('dperm', default='755'),
        UnixPerm('umask', default='000'),
        List('hostsallow', items=[], default=[]),
        List('hostsdeny', items=[], default=[]),
        Str('vuid', null=True, default=''),
        Str('auxparams', max_length=None),
        Bool('enabled', default=True),
        register=True
    ))
    async def do_create(self, data):
        """
        Create AFP share.

        `allow`, `deny`, `ro`, and `rw` are lists of users and groups. Groups are designated by
        an @ prefix.

        `hostsallow` and `hostsdeny` are lists of hosts and/or networks.
        """
        verrors = ValidationErrors()
        path = data['path']

        await self.clean(data, 'sharingafp_create', verrors)
        await self.validate(data, 'sharingafp_create', verrors)

        await check_path_resides_within_volume(
            verrors, self.middleware, 'sharingafp_create.path', path)

        verrors.check()

        if path and not os.path.exists(path):
            try:
                os.makedirs(path)
            except OSError as e:
                raise CallError(f'Failed to create {path}: {e}')

        await self.compress(data)
        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})
        await self.extend(data)

        await self._service_change('afp', 'reload')

        return data

    @accepts(
        Int('id'),
        Patch(
            'sharingafp_create',
            'sharingafp_update',
            ('attr', {'update': True})
        )
    )
    async def do_update(self, id, data):
        """
        Update AFP share `id`.
        """
        verrors = ValidationErrors()
        old = await self.middleware.call(
            'datastore.query', self._config.datastore, [('id', '=', id)],
            {'extend': self._config.datastore_extend,
             'prefix': self._config.datastore_prefix,
             'get': True})
        path = data.get('path')

        new = old.copy()
        new.update(data)

        await self.clean(new, 'sharingafp_update', verrors, id=id)
        await self.validate(new, 'sharingafp_update', verrors, old=old)

        if path:
            await check_path_resides_within_volume(
                verrors, self.middleware, 'sharingafp_create.path', path)

        if verrors:
            raise verrors

        if path and not os.path.exists(path):
            try:
                os.makedirs(path)
            except OSError as e:
                raise CallError(f'Failed to create {path}: {e}')

        await self.compress(new)
        await self.middleware.call(
            'datastore.update', self._config.datastore, id, new,
            {'prefix': self._config.datastore_prefix})
        await self.extend(new)

        await self._service_change('afp', 'reload')

        return new

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete AFP share `id`.
        """
        result = await self.middleware.call('datastore.delete', self._config.datastore, id)
        await self._service_change('afp', 'reload')
        return result

    @private
    async def clean(self, data, schema_name, verrors, id=None):
        data['name'] = await self.name_exists(data, schema_name, verrors, id)

    @private
    async def validate(self, data, schema_name, verrors, old=None):
        await self.home_exists(data['home'], schema_name, verrors, old)
        if data['vuid']:
            try:
                uuid.UUID(data['vuid'], version=4)
            except ValueError:
                verrors.add(f'{schema_name}.vuid', 'vuid must be a valid UUID.')

    @private
    async def home_exists(self, home, schema_name, verrors, old=None):
        home_filters = [('home', '=', True)]
        home_result = None

        if home:
            if old and old['id'] is not None:
                id = old['id']

                if not old['home']:
                    home_filters.append(('id', '!=', id))
                    # The user already had this set as the home share
                    home_result = await self.middleware.call(
                        'datastore.query', self._config.datastore,
                        home_filters, {'prefix': self._config.datastore_prefix})

        if home_result:
            verrors.add(f'{schema_name}.home',
                        'Only one share is allowed to be a home share.')

    @private
    async def name_exists(self, data, schema_name, verrors, id=None):
        name = data['name']
        path = data['path']
        home = data['home']
        name_filters = [('name', '=', name)]
        path_filters = [('path', '=', path)]

        if not name:
            if home:
                name = 'Homes'
            else:
                name = path.rsplit('/', 1)[-1]

        if id is not None:
            name_filters.append(('id', '!=', id))
            path_filters.append(('id', '!=', id))

        name_result = await self.middleware.call(
            'datastore.query', self._config.datastore,
            name_filters,
            {'prefix': self._config.datastore_prefix})
        path_result = await self.middleware.call(
            'datastore.query', self._config.datastore,
            path_filters,
            {'prefix': self._config.datastore_prefix})

        if name_result:
            verrors.add(f'{schema_name}.name',
                        'A share with this name already exists.')

        if path_result:
            verrors.add(f'{schema_name}.path',
                        'A share with this path already exists.')

        return name

    @private
    async def extend(self, data):
        data['allow'] = data['allow'].split()
        data['deny'] = data['deny'].split()
        data['ro'] = data['ro'].split()
        data['rw'] = data['rw'].split()
        data['hostsallow'] = data['hostsallow'].split()
        data['hostsdeny'] = data['hostsdeny'].split()

        return data

    @private
    async def compress(self, data):
        data['allow'] = ' '.join(data['allow'])
        data['deny'] = ' '.join(data['deny'])
        data['ro'] = ' '.join(data['ro'])
        data['rw'] = ' '.join(data['rw'])
        data['hostsallow'] = ' '.join(data['hostsallow'])
        data['hostsdeny'] = ' '.join(data['hostsdeny'])
        if not data['vuid'] and data['timemachine']:
            data['vuid'] = str(uuid.uuid4())
        return data
예제 #11
0
파일: pool.py 프로젝트: jpaetzel/freenas
class PoolDatasetService(CRUDService):
    class Config:
        namespace = 'pool.dataset'

    @filterable
    def query(self, filters, options):
        # Otimization for cases in which they can be filtered at zfs.dataset.query
        zfsfilters = []
        for f in filters:
            if len(f) == 3:
                if f[0] in ('id', 'name', 'pool', 'type'):
                    zfsfilters.append(f)
        datasets = self.middleware.call_sync('zfs.dataset.query', zfsfilters,
                                             None)
        return filter_list(self.__transform(datasets), filters, options)

    def __transform(self, datasets):
        """
        We need to transform the data zfs gives us to make it consistent/user-friendly,
        making it match whatever pool.dataset.{create,update} uses as input.
        """
        def transform(dataset):
            for orig_name, new_name, method in (
                ('org.freenas:description', 'comments', None),
                ('dedup', 'deduplication', str.upper),
                ('atime', None, str.upper),
                ('casesensitivity', None, str.upper),
                ('exec', None, str.upper),
                ('sync', None, str.upper),
                ('compression', None, str.upper),
                ('origin', None, None),
                ('quota', None, _null),
                ('refquota', None, _null),
                ('reservation', None, _null),
                ('refreservation', None, _null),
                ('copies', None, None),
                ('snapdir', None, str.upper),
                ('readonly', None, str.upper),
                ('recordsize', None, None),
                ('sparse', None, None),
                ('volsize', None, None),
                ('volblocksize', None, None),
            ):
                if orig_name not in dataset['properties']:
                    continue
                i = new_name or orig_name
                dataset[i] = dataset['properties'][orig_name]
                if method:
                    dataset[i]['value'] = method(dataset[i]['value'])
            del dataset['properties']

            rv = []
            for child in dataset['children']:
                rv.append(transform(child))
            dataset['children'] = rv
            return dataset

        rv = []
        for dataset in datasets:
            rv.append(transform(dataset))
        return rv

    @accepts(
        Dict(
            'pool_dataset_create',
            Str('name', required=True),
            Str('type', enum=['FILESYSTEM', 'VOLUME'], default='FILESYSTEM'),
            Int('volsize'),
            Str('volblocksize',
                enum=[
                    '512',
                    '1K',
                    '2K',
                    '4K',
                    '8K',
                    '16K',
                    '32K',
                    '64K',
                    '128K',
                ]),
            Bool('sparse'),
            Str('comments'),
            Str('sync', enum=[
                'STANDARD',
                'ALWAYS',
                'DISABLED',
            ]),
            Str('compression',
                enum=[
                    'OFF',
                    'LZ4',
                    'GZIP-1',
                    'GZIP-6',
                    'GZIP-9',
                    'ZLE',
                    'LZJB',
                ]),
            Str('atime', enum=['ON', 'OFF']),
            Str('exec', enum=['ON', 'OFF']),
            Int('quota'),
            Int('refquota'),
            Int('reservation'),
            Int('refreservation'),
            Int('copies'),
            Str('snapdir', enum=['VISIBLE', 'HIDDEN']),
            Str('deduplication', enum=['ON', 'VERIFY', 'OFF']),
            Str('readonly', enum=['ON', 'OFF']),
            Str('recordsize',
                enum=[
                    '512',
                    '1K',
                    '2K',
                    '4K',
                    '8K',
                    '16K',
                    '32K',
                    '64K',
                    '128K',
                    '256K',
                    '512K',
                    '1024K',
                ]),
            Str('casesensitivity', enum=['SENSITIVE', 'INSENSITIVE', 'MIXED']),
            register=True,
        ))
    async def do_create(self, data):
        """
        Creates a dataset/zvol.

        `volsize` is required for type=VOLUME and is supposed to be a multiple of the block size.
        """

        verrors = ValidationErrors()
        await self.__common_validation(verrors, 'pool_dataset_create', data,
                                       'CREATE')
        if verrors:
            raise verrors

        props = {}
        for i, real_name, transform in (
            ('atime', None, str.lower),
            ('casesensitivity', None, str.lower),
            ('comments', 'org.freenas:description', None),
            ('compression', None, str.lower),
            ('copies', None, lambda x: str(x)),
            ('deduplication', 'dedup', str.lower),
            ('exec', None, str.lower),
            ('quota', None, _none),
            ('readonly', None, str.lower),
            ('recordsize', None, None),
            ('refquota', None, _none),
            ('refreservation', None, _none),
            ('reservation', None, _none),
            ('snapdir', None, str.lower),
            ('sparse', None, None),
            ('sync', None, str.lower),
            ('volblocksize', None, None),
            ('volsize', None, lambda x: str(x)),
        ):
            if i not in data:
                continue
            name = real_name or i
            props[name] = data[i] if not transform else transform(data[i])

        await self.middleware.call('zfs.dataset.create', {
            'name': data['name'],
            'type': data['type'],
            'properties': props,
        })

        await self.middleware.call('zfs.dataset.mount', data['name'])

    def _add_inherit(name):
        def add(attr):
            attr.enum.append('INHERIT')

        return {'name': name, 'method': add}

    @accepts(
        Str('id'),
        Patch(
            'pool_dataset_create',
            'pool_dataset_update',
            ('rm', {
                'name': 'name'
            }),
            ('rm', {
                'name': 'type'
            }),
            ('rm', {
                'name': 'casesensitivity'
            }),  # Its a readonly attribute
            ('rm', {
                'name': 'sparse'
            }),  # Create time only attribute
            ('rm', {
                'name': 'volblocksize'
            }),  # Create time only attribute
            ('edit', _add_inherit('atime')),
            ('edit', _add_inherit('exec')),
            ('edit', _add_inherit('sync')),
            ('edit', _add_inherit('compression')),
            ('edit', _add_inherit('deduplication')),
            ('edit', _add_inherit('readonly')),
            ('edit', _add_inherit('recordsize')),
            ('edit', _add_inherit('snapdir')),
        ))
    async def do_update(self, id, data):
        """
        Updates a dataset/zvol `id`.
        """

        verrors = ValidationErrors()

        dataset = await self.middleware.call('pool.dataset.query',
                                             [('id', '=', id)])
        if not dataset:
            verrors.add('id', f'{id} does not exist', errno.ENOENT)
        else:
            data['type'] = dataset[0]['type']
            await self.__common_validation(verrors, 'pool_dataset_update',
                                           data, 'UPDATE')
        if verrors:
            raise verrors

        props = {}
        for i, real_name, transform, inheritable in (
            ('atime', None, str.lower, True),
            ('comments', 'org.freenas:description', None, False),
            ('sync', None, str.lower, True),
            ('compression', None, str.lower, True),
            ('deduplication', 'dedup', str.lower, True),
            ('exec', None, str.lower, True),
            ('quota', None, _none, False),
            ('refquota', None, _none, False),
            ('reservation', None, _none, False),
            ('refreservation', None, _none, False),
            ('copies', None, None, False),
            ('snapdir', None, str.lower, True),
            ('readonly', None, str.lower, True),
            ('recordsize', None, None, True),
            ('volsize', None, lambda x: str(x), False),
        ):
            if i not in data:
                continue
            name = real_name or i
            if inheritable and data[i] == 'INHERIT':
                props[name] = {'source': 'INHERIT'}
            else:
                props[name] = {
                    'value': data[i] if not transform else transform(data[i])
                }

        return await self.middleware.call('zfs.dataset.update', id,
                                          {'properties': props})

    async def __common_validation(self, verrors, schema, data, mode):
        assert mode in ('CREATE', 'UPDATE')

        if data['type'] == 'FILESYSTEM':
            for i in ('sparse', 'volsize', 'volblocksize'):
                if i in data:
                    verrors.add(f'{schema}.{i}',
                                'This field is not valid for FILESYSTEM')
        elif data['type'] == 'VOLUME':
            if mode == 'CREATE' and 'volsize' not in data:
                verrors.add(f'{schema}.volsize',
                            'This field is required for VOLUME')

            for i in (
                    'atime',
                    'casesensitivity',
                    'quota',
                    'refquota',
                    'recordsize',
            ):
                if i in data:
                    verrors.add(f'{schema}.{i}',
                                'This field is not valid for VOLUME')

    @accepts(Str('id'))
    async def do_delete(self, id):
        return await self.middleware.call('zfs.dataset.delete', id)

    @item_method
    @accepts(Str('id'))
    async def promote(self, id):
        """
        Promote the cloned dataset `id`
        """
        dataset = await self.middleware.call('zfs.dataset.query',
                                             [('id', '=', id)])
        if not dataset:
            raise CallError(f'Dataset "{id}" does not exist.', errno.ENOENT)
        if not dataset[0]['properties']['origin']['value']:
            raise CallError('Only cloned datasets can be promoted.',
                            errno.EBADMSG)
        return await self.middleware.call('zfs.dataset.promote', id)
예제 #12
0
파일: nfs.py 프로젝트: sbignell/freenas
class SharingNFSService(SharingService):

    path_field = 'paths'
    share_task_type = 'NFS'

    class Config:
        namespace = "sharing.nfs"
        datastore = "sharing.nfs_share"
        datastore_prefix = "nfs_"
        datastore_extend = "sharing.nfs.extend"

    async def human_identifier(self, share_task):
        return ', '.join(share_task[self.path_field])

    @private
    async def sharing_task_determine_locked(self, data, locked_datasets):
        for path in data[self.path_field]:
            if await self.middleware.call(
                    'pool.dataset.path_in_locked_datasets', path,
                    locked_datasets):
                return True
        else:
            return False

    @accepts(
        Dict(
            "sharingnfs_create",
            List("paths", items=[Dir("path")], required=True, empty=False),
            List("aliases",
                 items=[Str("path", validators=[Match(r"^/.*")])],
                 default=[]),
            Str("comment", default=""),
            List("networks",
                 items=[IPAddr("network", network=True)],
                 default=[]),
            List("hosts", items=[Str("host")], default=[]),
            Bool("alldirs", default=False),
            Bool("ro", default=False),
            Bool("quiet", default=False),
            Str("maproot_user", required=False, default=None, null=True),
            Str("maproot_group", required=False, default=None, null=True),
            Str("mapall_user", required=False, default=None, null=True),
            Str("mapall_group", required=False, default=None, null=True),
            List(
                "security",
                default=[],
                items=[
                    Str("provider", enum=["SYS", "KRB5", "KRB5I", "KRB5P"])
                ],
            ),
            Bool("enabled", default=True),
            register=True,
            strict=True,
        ))
    async def do_create(self, data):
        """
        Create a NFS Share.

        `paths` is a list of valid paths which are configured to be shared on this share.

        `aliases` is a list of aliases for each path (or an empty list if aliases are not used).

        `networks` is a list of authorized networks that are allowed to access the share having format
        "network/mask" CIDR notation. If empty, all networks are allowed.

        `hosts` is a list of IP's/hostnames which are allowed to access the share. If empty, all IP's/hostnames are
        allowed.

        `alldirs` is a boolean value which when set indicates that the client can mount any subdirectories of the
        selected pool or dataset.
        """
        verrors = ValidationErrors()

        await self.validate(data, "sharingnfs_create", verrors)

        if verrors:
            raise verrors

        await self.compress(data)
        data["id"] = await self.middleware.call(
            "datastore.insert",
            self._config.datastore,
            data,
            {"prefix": self._config.datastore_prefix},
        )
        await self.extend(data)

        await self._service_change("nfs", "reload")

        return await self.get_instance(data["id"])

    @accepts(Int("id"),
             Patch("sharingnfs_create", "sharingnfs_update", ("attr", {
                 "update": True
             })))
    async def do_update(self, id, data):
        """
        Update NFS Share of `id`.
        """
        verrors = ValidationErrors()
        old = await self.get_instance(id)

        new = old.copy()
        new.update(data)

        await self.validate(new, "sharingnfs_update", verrors, old=old)

        if verrors:
            raise verrors

        await self.compress(new)
        await self.middleware.call("datastore.update", self._config.datastore,
                                   id, new,
                                   {"prefix": self._config.datastore_prefix})

        await self._service_change("nfs", "reload")

        return await self.get_instance(id)

    @accepts(Int("id"))
    async def do_delete(self, id):
        """
        Delete NFS Share of `id`.
        """
        await self.middleware.call("datastore.delete", self._config.datastore,
                                   id)
        await self._service_change("nfs", "reload")

    @private
    async def validate(self, data, schema_name, verrors, old=None):
        if len(data["aliases"]):
            if not osc.IS_LINUX:
                verrors.add(
                    f"{schema_name}.aliases",
                    "This field is only supported on SCALE",
                )

            if len(data["aliases"]) != len(data["paths"]):
                verrors.add(
                    f"{schema_name}.aliases",
                    "This field should be either empty of have the same number of elements as paths",
                )

        if data["alldirs"] and len(data["paths"]) > 1:
            verrors.add(
                f"{schema_name}.alldirs",
                "This option can only be used for shares that contain single path"
            )

        # need to make sure that the nfs share is within the zpool mountpoint
        for idx, i in enumerate(data["paths"]):
            await check_path_resides_within_volume(
                verrors, self.middleware, f'{schema_name}.paths.{idx}', i)

        await self.middleware.run_in_thread(self.validate_paths, data,
                                            schema_name, verrors)

        filters = []
        if old:
            filters.append(["id", "!=", old["id"]])
        other_shares = await self.middleware.call("sharing.nfs.query", filters)
        dns_cache = await self.resolve_hostnames(
            sum([share["hosts"]
                 for share in other_shares], []) + data["hosts"])
        await self.middleware.run_in_thread(self.validate_hosts_and_networks,
                                            other_shares, data, schema_name,
                                            verrors, dns_cache)

        for k in ["maproot", "mapall"]:
            if not data[f"{k}_user"] and not data[f"{k}_group"]:
                pass
            elif not data[f"{k}_user"] and data[f"{k}_group"]:
                verrors.add(
                    f"{schema_name}.{k}_user",
                    "This field is required when map group is specified")
            else:
                user = group = None
                with contextlib.suppress(KeyError):
                    user = await self.middleware.call(
                        'dscache.get_uncached_user', data[f'{k}_user'])

                if not user:
                    verrors.add(f"{schema_name}.{k}_user", "User not found")

                if data[f'{k}_group']:
                    with contextlib.suppress(KeyError):
                        group = await self.middleware.call(
                            'dscache.get_uncached_group', data[f'{k}_group'])

                    if not group:
                        verrors.add(f"{schema_name}.{k}_group",
                                    "Group not found")

        if data["maproot_user"] and data["mapall_user"]:
            verrors.add(f"{schema_name}.mapall_user",
                        "maproot_user disqualifies mapall_user")

        if data["security"]:
            nfs_config = await self.middleware.call("nfs.config")
            if not nfs_config["v4"]:
                verrors.add(f"{schema_name}.security",
                            "This is not allowed when NFS v4 is disabled")

    @private
    def validate_paths(self, data, schema_name, verrors):
        if osc.IS_LINUX:
            # Ganesha does not have such a restriction, each path is a different share
            return

        dev = None
        for i, path in enumerate(data["paths"]):
            stat = os.stat(path)
            if dev is None:
                dev = stat.st_dev
            else:
                if dev != stat.st_dev:
                    verrors.add(
                        f'{schema_name}.paths.{i}',
                        'Paths for a NFS share must reside within the same filesystem'
                    )

    @private
    async def resolve_hostnames(self, hostnames):
        hostnames = list(set(hostnames))

        async def resolve(hostname):
            try:
                return (await asyncio.wait_for(
                    self.middleware.run_in_thread(socket.getaddrinfo, hostname,
                                                  None), 5))[0][4][0]
            except Exception as e:
                self.logger.warning("Unable to resolve host %r: %r", hostname,
                                    e)
                return None

        resolved_hostnames = await asyncio_map(resolve, hostnames, 8)

        return dict(zip(hostnames, resolved_hostnames))

    @private
    def validate_hosts_and_networks(self, other_shares, data, schema_name,
                                    verrors, dns_cache):
        dev = os.stat(data["paths"][0]).st_dev

        used_networks = set()
        for share in other_shares:
            try:
                share_dev = os.stat(share["paths"][0]).st_dev
            except Exception:
                self.logger.warning("Failed to stat first path for %r",
                                    share,
                                    exc_info=True)
                continue

            if share_dev == dev:
                for host in share["hosts"]:
                    host = dns_cache[host]
                    if host is None:
                        continue

                    try:
                        network = ipaddress.ip_network(host)
                    except Exception:
                        self.logger.warning("Got invalid host %r", host)
                        continue
                    else:
                        used_networks.add(network)

                for network in share["networks"]:
                    try:
                        network = ipaddress.ip_network(network, strict=False)
                    except Exception:
                        self.logger.warning("Got invalid network %r", network)
                        continue
                    else:
                        used_networks.add(network)

                if not share["hosts"] and not share["networks"]:
                    used_networks.add(ipaddress.ip_network("0.0.0.0/0"))
                    used_networks.add(ipaddress.ip_network("::/0"))

        for host in set(data["hosts"]):
            host = dns_cache[host]
            if host is None:
                continue

            network = ipaddress.ip_network(host)
            if network in used_networks:
                verrors.add(
                    f"{schema_name}.hosts",
                    f"Another NFS share already exports this dataset for {host}"
                )

            used_networks.add(network)

        for network in set(data["networks"]):
            network = ipaddress.ip_network(network, strict=False)

            if network in used_networks:
                verrors.add(
                    f"{schema_name}.networks",
                    f"Another NFS share already exports this dataset for {network}"
                )

            used_networks.add(network)

        if not data["hosts"] and not data["networks"]:
            if used_networks:
                verrors.add(
                    f"{schema_name}.networks",
                    "Another NFS share already exports this dataset for some network"
                )

    @private
    async def extend(self, data):
        data["networks"] = data.pop("network").split()
        data["hosts"] = data["hosts"].split()
        data["security"] = [s.upper() for s in data["security"]]
        return data

    @private
    async def compress(self, data):
        data["network"] = " ".join(data.pop("networks"))
        data["hosts"] = " ".join(data["hosts"])
        data["security"] = [s.lower() for s in data["security"]]
        data.pop(self.locked_field, None)
        return data
예제 #13
0
class ACLTemplateService(CRUDService):

    class Config:
        cli_namespace = 'filesystem.acltemplate'
        datastore = 'filesystem.acltemplate'
        datastore_prefix = 'acltemplate_'
        namespace = 'filesystem.acltemplate'

    ENTRY = Patch(
        'acltemplate_create', 'acltemplate_entry',
        ('add', Int('id')),
        ('add', Bool('builtin')),
    )

    @private
    async def validate_acl(self, data, schema, verrors):
        acltype = ACLType[data['acltype']]
        aclcheck = acltype.validate({'dacl': data['acl']})
        if not aclcheck['is_valid']:
            for err in aclcheck['errors']:
                if err[2]:
                    v = f'{schema}.{err[0]}.{err[2]}'
                else:
                    v = f'{schema}.{err[0]}'

                verrors.add(v, err[1])

        if acltype is ACLType.POSIX1E:
            await self.middleware.call(
                "filesystem.gen_aclstring_posix1e",
                copy.deepcopy(data["acl"]), False, verrors
            )

        for idx, ace in enumerate(data['acl']):
            if ace['id'] is None:
                verrors.add(f'{schema}.{idx}.id', 'null id is not permitted.')

    @accepts(Dict(
        "acltemplate_create",
        Str("name", required=True),
        Str("acltype", required=True, enum=["NFS4", "POSIX1E"]),
        OROperator(Ref('nfs4_acl'), Ref('posix1e_acl'), name='acl', required=True),
        register=True
    ))
    async def do_create(self, data):
        """
        Create a new filesystem ACL template.
        """
        verrors = ValidationErrors()
        if len(data['acl']) == 0:
            verrors.add(
                "filesystem_acltemplate_create.acl",
                "At least one ACL entry must be specified."
            )
        await self.validate_acl(data, "filesystem_acltemplate_create.acl", verrors)
        verrors.check()
        data['builtin'] = False

        data['id'] = await self.middleware.call(
            'datastore.insert',
            self._config.datastore,
            data,
            {'prefix': self._config.datastore_prefix}
        )
        return await self._get_instance(data['id'])

    @accepts(
        Int('id'),
        Patch(
            'acltemplate_create',
            'acltemplate_update',
            ('attr', {'update': True})
        )
    )
    async def do_update(self, id, data):
        """
        update filesystem ACL template with `id`.
        """
        old = await self._get_instance(id)
        new = old.copy()
        new.update(data)
        verrors = ValidationErrors()
        if old['builtin']:
            verrors.add("filesystem_acltemplate_update.builtin",
                        "built-in ACL templates may not be changed")

        if new['name'] != old['name']:
            name_exists = bool(await self.query([('name', '=', new['name'])]))
            if name_exists:
                verrors.add("filesystem_acltemplate_update.name",
                            f"{data['name']}: name is not unique")

        if len(new['acl']) == 0:
            verrors.add(
                "filesystem_acltemplate_update.acl",
                "At least one ACL entry must be specified."
            )
        await self.validate_acl(new, "filesystem_acltemplate_update.acl", verrors)
        verrors.check()

        await self.middleware.call(
            'datastore.update',
            self._config.datastore,
            id,
            new,
            {'prefix': self._config.datastore_prefix}
        )
        return await self.get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        entry = await self.get_instance(id)
        if entry['builtin']:
            raise CallError("Deletion of builtin templates is not permitted",
                            errno.EPERM)

        return await self.middleware.call(
            'datastore.delete', self._config.datastore, id
        )

    @private
    async def append_builtins(self, data):
        """
        This method ensures that ACL grants some minimum level of permissions
        to our builtin users or builtin admins accounts.
        """
        bu_id = int(SMBBuiltin.USERS.value[1][9:])
        ba_id = int(SMBBuiltin.USERS.value[1][9:])
        has_builtins = any(filter(lambda x: x["id"] in [bu_id, ba_id], data['acl']))
        if has_builtins:
            return

        if data['acltype'] == ACLType.NFS4.name:
            data['acl'].extend([
                {"tag": "GROUP", "id": bu_id, "perms": {"BASIC": "MODIFY"}, "flags": {"BASIC": "INHERIT"}, "type": "ALLOW"},
                {"tag": "GROUP", "id": ba_id, "perms": {"BASIC": "FULL_CONTROL"}, "flags": {"BASIC": "INHERIT"}, "type": "ALLOW"},
            ])
            return

        has_default_mask = any(filter(lambda x: x["tag"] == "MASK" and x["default"], data['acl']))
        has_access_mask = any(filter(lambda x: x["tag"] == "MASK" and x["default"], data['acl']))
        all_perms = {"READ": True, "WRITE": True, "EXECUTE": True}
        data['acl'].extend([
            {"tag": "GROUP", "id": bu_id, "perms": all_perms, "default": False},
            {"tag": "GROUP", "id": bu_id, "perms": all_perms, "default": True},
            {"tag": "GROUP", "id": ba_id, "perms": all_perms, "default": False},
            {"tag": "GROUP", "id": ba_id, "perms": all_perms, "default": True},
        ])

        if not has_default_mask:
            data['acl'].append({"tag": "MASK", "id": -1, "perms": all_perms, "default": False})

        if not has_access_mask:
            data['acl'].append({"tag": "MASK", "id": -1, "perms": all_perms, "default": True})

        return

    @private
    async def resolve_names(self, uid, gid, data):
        for ace in data['acl']:
            if ace['id'] != -1:
                ace['who'] = await self.middleware.call(
                    'idmap.id_to_name', ace['id'], ace['tag']
                )
            elif ace['tag'] in ('group@', 'GROUP_OBJ'):
                ace['who'] = await self.middleware.call(
                    'idmap.id_to_name', gid, 'GROUP'
                )
            elif ace['tag'] in ('owner@', 'USER_OBJ'):
                ace['who'] = await self.middleware.call(
                    'idmap.id_to_name', uid, 'USER'
                )
            else:
                ace['who'] = None

        return

    @accepts(Dict(
        "acltemplate_by_path",
        Str("path", default=""),
        Ref('query-filters'),
        Ref('query-options'),
        Dict(
            "format-options",
            Bool("canonicalize", default=False),
            Bool("ensure_builtins", default=False),
            Bool("resolve_names", default=False),
        )
    ))
    @returns(List(
        'templates',
        items=[Ref('acltemplate_entry')]
    ))
    async def by_path(self, data):
        """
        Retrieve list of available ACL templates for a given `path`.

        Supports `query-filters` and `query-options`.
        `format-options` gives additional options to alter the results of
        the template query:

        `canonicalize` - place ACL entries for NFSv4 ACLs in Microsoft canonical order.
        `ensure_builtins` - ensure all results contain entries for `builtin_users` and `builtin_administrators`
        groups.
        `resolve_names` - convert ids in ACL entries into names.
        """
        verrors = ValidationErrors()
        filters = data.get('query-filters')
        if data['path']:
            path = await self.middleware.call(
                "filesystem.resolve_cluster_path", data['path']
            )
            acltype = await self.middleware.call(
                'filesystem.path_get_acltype', path
            )
            if acltype == ACLType.DISABLED.name:
                return []

            if acltype == ACLType.POSIX1E.name and data['format-options']['canonicalize']:
                verrors.add(
                    "filesystem.acltemplate_by_path.format-options.canonicalize",
                    "POSIX1E ACLs may not be sorted into Windows canonical order."
                )
            filters.append(("acltype", "=", acltype))

        if not data['path'] and data['format-options']['resolve_names']:
            verrors.add(
                "filesystem.acltemplate_by_path.format-options.canonicalize",
                "ACL entry ids may not be resolved into names unless path is provided."
            )

        verrors.check()

        templates = await self.query(filters, data['query-options'])
        for t in templates:
            if data['format-options']['ensure_builtins']:
                await self.append_builtins(t)

            if data['format-options']['resolve_names']:
                st = await self.middleware.run_in_thread(os.stat(path))
                await self.resolve_names(st.st_uid, st.st_gid, t)

            if data['format-options']['canonicalize'] and t['acltype'] == ACLType.NFS4.name:
                canonicalized = ACLType[t['acltype']].canonicalize(t['acl'])
                t['acl'] = canonicalized

        return templates
예제 #14
0
class ReplicationService(CRUDService):

    class Config:
        datastore = "storage.replication"
        datastore_prefix = "repl_"
        datastore_extend = "replication.extend"
        datastore_extend_context = "replication.extend_context"

    @private
    async def extend_context(self, extra):
        return {
            "state": await self.middleware.call("zettarepl.get_state"),
        }

    @private
    async def extend(self, data, context):
        data["periodic_snapshot_tasks"] = [
            {k.replace("task_", ""): v for k, v in task.items()}
            for task in data["periodic_snapshot_tasks"]
        ]

        for task in data["periodic_snapshot_tasks"]:
            Cron.convert_db_format_to_schedule(task, begin_end=True)

        if data["direction"] == "PUSH":
            data["also_include_naming_schema"] = data["naming_schema"]
            data["naming_schema"] = []
        if data["direction"] == "PULL":
            data["also_include_naming_schema"] = []

        Cron.convert_db_format_to_schedule(data, "schedule", key_prefix="schedule_", begin_end=True)
        Cron.convert_db_format_to_schedule(data, "restrict_schedule", key_prefix="restrict_schedule_", begin_end=True)

        if "error" in context["state"]:
            data["state"] = context["state"]["error"]
        else:
            data["state"] = context["state"]["tasks"].get(f"replication_task_{data['id']}", {
                "state": "PENDING",
            })

        data["job"] = data["state"].pop("job", None)

        return data

    @private
    async def compress(self, data):
        if data["direction"] == "PUSH":
            data["naming_schema"] = data["also_include_naming_schema"]
        del data["also_include_naming_schema"]

        Cron.convert_schedule_to_db_format(data, "schedule", key_prefix="schedule_", begin_end=True)
        Cron.convert_schedule_to_db_format(data, "restrict_schedule", key_prefix="restrict_schedule_", begin_end=True)

        del data["periodic_snapshot_tasks"]

        return data

    @accepts(
        Dict(
            "replication_create",
            Str("name", required=True),
            Str("direction", enum=["PUSH", "PULL"], required=True),
            Str("transport", enum=["SSH", "SSH+NETCAT", "LOCAL"], required=True),
            Int("ssh_credentials", null=True, default=None),
            Str("netcat_active_side", enum=["LOCAL", "REMOTE"], null=True, default=None),
            Str("netcat_active_side_listen_address", null=True, default=None),
            Int("netcat_active_side_port_min", null=True, default=None, validators=[Port()]),
            Int("netcat_active_side_port_max", null=True, default=None, validators=[Port()]),
            Str("netcat_passive_side_connect_address", null=True, default=None),
            List("source_datasets", items=[Path("dataset", empty=False)], required=True, empty=False),
            Path("target_dataset", required=True, empty=False),
            Bool("recursive", required=True),
            List("exclude", items=[Path("dataset", empty=False)], default=[]),
            Bool("properties", default=True),
            Bool("replicate", default=False),
            List("periodic_snapshot_tasks", items=[Int("periodic_snapshot_task")], default=[],
                 validators=[Unique()]),
            List("naming_schema", items=[
                Str("naming_schema", validators=[ReplicationSnapshotNamingSchema()])], default=[]),
            List("also_include_naming_schema", items=[
                Str("naming_schema", validators=[ReplicationSnapshotNamingSchema()])], default=[]),
            Bool("auto", required=True),
            Cron(
                "schedule",
                defaults={"minute": "00"},
                begin_end=True,
                null=True,
                default=None
            ),
            Cron(
                "restrict_schedule",
                defaults={"minute": "00"},
                begin_end=True,
                null=True,
                default=None
            ),
            Bool("only_matching_schedule", default=False),
            Bool("allow_from_scratch", default=False),
            Bool("hold_pending_snapshots", default=False),
            Str("retention_policy", enum=["SOURCE", "CUSTOM", "NONE"], required=True),
            Int("lifetime_value", null=True, default=None, validators=[Range(min=1)]),
            Str("lifetime_unit", null=True, default=None, enum=["HOUR", "DAY", "WEEK", "MONTH", "YEAR"]),
            Str("compression", enum=["LZ4", "PIGZ", "PLZIP"], null=True, default=None),
            Int("speed_limit", null=True, default=None, validators=[Range(min=1)]),
            Bool("dedup", default=False),
            Bool("large_block", default=True),
            Bool("embed", default=False),
            Bool("compressed", default=True),
            Int("retries", default=5, validators=[Range(min=1)]),
            Str("logging_level", enum=["DEBUG", "INFO", "WARNING", "ERROR"], null=True, default=None),
            Bool("enabled", default=True),
            register=True,
            strict=True,
        )
    )
    async def do_create(self, data):
        """
        Create a Replication Task

        Create a Replication Task that will push or pull ZFS snapshots to or from remote host..

        * `name` specifies a name for replication task
        * `direction` specifies whether task will `PUSH` or `PULL` snapshots
        * `transport` is a method of snapshots transfer:
          * `SSH` transfers snapshots via SSH connection. This method is supported everywhere but does not achieve
            great performance
            `ssh_credentials` is a required field for this transport (Keychain Credential ID of type `SSH_CREDENTIALS`)
          * `SSH+NETCAT` uses unencrypted connection for data transfer. This can only be used in trusted networks
            and requires a port (specified by range from `netcat_active_side_port_min` to `netcat_active_side_port_max`)
            to be open on `netcat_active_side`
            `ssh_credentials` is also required for control connection
          * `LOCAL` replicates to or from localhost
        * `source_datasets` is a non-empty list of datasets to replicate snapshots from
        * `target_dataset` is a dataset to put snapshots into. It must exist on target side
        * `recursive` and `exclude` have the same meaning as for Periodic Snapshot Task
        * `properties` control whether we should send dataset properties along with snapshots
        * `periodic_snapshot_tasks` is a list of periodic snapshot task IDs that are sources of snapshots for this
          replication task. Only push replication tasks can be bound to periodic snapshot tasks.
        * `naming_schema` is a list of naming schemas for pull replication
        * `also_include_naming_schema` is a list of naming schemas for push replication
        * `auto` allows replication to run automatically on schedule or after bound periodic snapshot task
        * `schedule` is a schedule to run replication task. Only `auto` replication tasks without bound periodic
          snapshot tasks can have a schedule
        * `restrict_schedule` restricts when replication task with bound periodic snapshot tasks runs. For example,
          you can have periodic snapshot tasks that run every 15 minutes, but only run replication task every hour.
        * Enabling `only_matching_schedule` will only replicate snapshots that match `schedule` or
          `restrict_schedule`
        * `allow_from_scratch` will destroy all snapshots on target side and replicate everything from scratch if none
          of the snapshots on target side matches source snapshots
        * `hold_pending_snapshots` will prevent source snapshots from being deleted by retention of replication fails
          for some reason
        * `retention_policy` specifies how to delete old snapshots on target side:
          * `SOURCE` deletes snapshots that are absent on source side
          * `CUSTOM` deletes snapshots that are older than `lifetime_value` and `lifetime_unit`
          * `NONE` does not delete any snapshots
        * `compression` compresses SSH stream. Available only for SSH transport
        * `speed_limit` limits speed of SSH stream. Available only for SSH transport
        * `dedup`, `large_block`, `embed` and `compressed` are various ZFS stream flag documented in `man zfs send`
        * `retries` specifies number of retries before considering replication failed

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.create",
                "params": [{
                    "name": "Work Backup",
                    "direction": "PUSH",
                    "transport": "SSH",
                    "ssh_credentials": [12],
                    "source_datasets", ["data/work"],
                    "target_dataset": "repl/work",
                    "recursive": true,
                    "periodic_snapshot_tasks": [5],
                    "auto": true,
                    "restrict_schedule": {
                        "minute": "0",
                        "hour": "*/2",
                        "dom": "*",
                        "month": "*",
                        "dow": "1,2,3,4,5",
                        "begin": "09:00",
                        "end": "18:00"
                    },
                    "only_matching_schedule": true,
                    "retention_policy": "CUSTOM",
                    "lifetime_value": 1,
                    "lifetime_unit": "WEEK",
                }]
            }
        """

        verrors = ValidationErrors()
        verrors.add_child("replication_create", await self._validate(data))

        if verrors:
            raise verrors

        periodic_snapshot_tasks = data["periodic_snapshot_tasks"]
        await self.compress(data)

        id = await self.middleware.call(
            "datastore.insert",
            self._config.datastore,
            data,
            {"prefix": self._config.datastore_prefix}
        )

        await self._set_periodic_snapshot_tasks(id, periodic_snapshot_tasks)

        await self.middleware.call("service.restart", "cron")
        await self.middleware.call("zettarepl.update_tasks")

        return await self._get_instance(id)

    @accepts(Int("id"), Patch(
        "replication_create",
        "replication_update",
        ("attr", {"update": True}),
    ))
    async def do_update(self, id, data):
        """
        Update a Replication Task with specific `id`

        See the documentation for `create` method for information on payload contents

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.update",
                "params": [
                    7,
                    {
                        "name": "Work Backup",
                        "direction": "PUSH",
                        "transport": "SSH",
                        "ssh_credentials": [12],
                        "source_datasets", ["data/work"],
                        "target_dataset": "repl/work",
                        "recursive": true,
                        "periodic_snapshot_tasks": [5],
                        "auto": true,
                        "restrict_schedule": {
                            "minute": "0",
                            "hour": "*/2",
                            "dom": "*",
                            "month": "*",
                            "dow": "1,2,3,4,5",
                            "begin": "09:00",
                            "end": "18:00"
                        },
                        "only_matching_schedule": true,
                        "retention_policy": "CUSTOM",
                        "lifetime_value": 1,
                        "lifetime_unit": "WEEK",
                    }
                ]
            }
        """

        old = await self._get_instance(id)

        new = old.copy()
        if new["ssh_credentials"]:
            new["ssh_credentials"] = new["ssh_credentials"]["id"]
        new["periodic_snapshot_tasks"] = [task["id"] for task in new["periodic_snapshot_tasks"]]
        new.update(data)

        verrors = ValidationErrors()
        verrors.add_child("replication_update", await self._validate(new, id))

        if verrors:
            raise verrors

        periodic_snapshot_tasks = new["periodic_snapshot_tasks"]
        await self.compress(new)

        new.pop("state", None)
        new.pop("job", None)

        await self.middleware.call(
            "datastore.update",
            self._config.datastore,
            id,
            new,
            {'prefix': self._config.datastore_prefix}
        )

        await self._set_periodic_snapshot_tasks(id, periodic_snapshot_tasks)

        await self.middleware.call("service.restart", "cron")
        await self.middleware.call("zettarepl.update_tasks")

        return await self._get_instance(id)

    @accepts(
        Int("id")
    )
    async def do_delete(self, id):
        """
        Delete a Replication Task with specific `id`

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.delete",
                "params": [
                    1
                ]
            }
        """

        response = await self.middleware.call(
            "datastore.delete",
            self._config.datastore,
            id
        )

        await self.middleware.call("service.restart", "cron")
        await self.middleware.call("zettarepl.update_tasks")

        return response

    @item_method
    @accepts(Int("id"), Bool("really_run", default=True, hidden=True))
    @job(logs=True)
    async def run(self, job, id, really_run):
        """
        Run Replication Task of `id`.
        """
        if really_run:
            task = await self._get_instance(id)

            if not task["enabled"]:
                raise CallError("Task is not enabled")

            if task["state"]["state"] == "RUNNING":
                raise CallError("Task is already running")

            if task["state"]["state"] == "HOLD":
                raise CallError("Task is on hold")

        await self.middleware.call("zettarepl.run_replication_task", id, really_run, job)

    async def _validate(self, data, id=None):
        verrors = ValidationErrors()

        await self._ensure_unique(verrors, "", "name", data["name"], id)

        # Direction

        snapshot_tasks = []

        if data["direction"] == "PUSH":
            e, snapshot_tasks = await self._query_periodic_snapshot_tasks(data["periodic_snapshot_tasks"])
            verrors.add_child("periodic_snapshot_tasks", e)

            if data["naming_schema"]:
                verrors.add("naming_schema", "This field has no sense for push replication")

            if not snapshot_tasks and not data["also_include_naming_schema"]:
                verrors.add(
                    "periodic_snapshot_tasks", "You must at least either bind a periodic snapshot task or provide "
                                               "\"Also Include Naming Schema\" for push replication task"
                )

            if data["schedule"]:
                if data["periodic_snapshot_tasks"]:
                    verrors.add("schedule", "Push replication can't be bound to periodic snapshot task and have "
                                            "schedule at the same time")
            else:
                if data["auto"] and not data["periodic_snapshot_tasks"]:
                    verrors.add("auto", "Push replication that runs automatically must be either "
                                        "bound to periodic snapshot task or have schedule")

        if data["direction"] == "PULL":
            if data["schedule"]:
                pass
            else:
                if data["auto"]:
                    verrors.add("auto", "Pull replication that runs automatically must have schedule")

            if data["periodic_snapshot_tasks"]:
                verrors.add("periodic_snapshot_tasks", "Pull replication can't be bound to periodic snapshot task")

            if not data["naming_schema"]:
                verrors.add("naming_schema", "Naming schema is required for pull replication")

            if data["also_include_naming_schema"]:
                verrors.add("also_include_naming_schema", "This field has no sense for pull replication")

            if data["hold_pending_snapshots"]:
                verrors.add("hold_pending_snapshots", "Pull replication tasks can't hold pending snapshots because "
                                                      "they don't do source retention")

        # Transport

        if data["transport"] == "SSH+NETCAT":
            if data["netcat_active_side"] is None:
                verrors.add("netcat_active_side", "You must choose active side for SSH+netcat replication")

            if data["netcat_active_side_port_min"] is not None and data["netcat_active_side_port_max"] is not None:
                if data["netcat_active_side_port_min"] > data["netcat_active_side_port_max"]:
                    verrors.add("netcat_active_side_port_max",
                                "Please specify value greater or equal than netcat_active_side_port_min")

            if data["compression"] is not None:
                verrors.add("compression", "Compression is not supported for SSH+netcat replication")

            if data["speed_limit"] is not None:
                verrors.add("speed_limit", "Speed limit is not supported for SSH+netcat replication")
        else:
            if data["netcat_active_side"] is not None:
                verrors.add("netcat_active_side", "This field only has sense for SSH+netcat replication")

            for k in ["netcat_active_side_listen_address", "netcat_active_side_port_min", "netcat_active_side_port_max",
                      "netcat_passive_side_connect_address"]:
                if data[k] is not None:
                    verrors.add(k, "This field only has sense for SSH+netcat replication")

        if data["transport"] == "LOCAL":
            if data["ssh_credentials"] is not None:
                verrors.add("ssh_credentials", "Remote credentials have no sense for local replication")

            if data["compression"] is not None:
                verrors.add("compression", "Compression has no sense for local replication")

            if data["speed_limit"] is not None:
                verrors.add("speed_limit", "Speed limit has no sense for local replication")
        else:
            if data["ssh_credentials"] is None:
                verrors.add("ssh_credentials", "SSH Credentials are required for non-local replication")
            else:
                try:
                    await self.middleware.call("keychaincredential.get_of_type", data["ssh_credentials"],
                                               "SSH_CREDENTIALS")
                except CallError as e:
                    verrors.add("ssh_credentials", str(e))

        # Common for all directions and transports

        for i, source_dataset in enumerate(data["source_datasets"]):
            for snapshot_task in snapshot_tasks:
                if is_child(source_dataset, snapshot_task["dataset"]):
                    if data["recursive"]:
                        for exclude in snapshot_task["exclude"]:
                            if exclude not in data["exclude"]:
                                verrors.add("exclude", f"You should exclude {exclude!r} as bound periodic snapshot "
                                                       f"task dataset {snapshot_task['dataset']!r} does")
                    else:
                        if source_dataset in snapshot_task["exclude"]:
                            verrors.add(f"source_datasets.{i}", f"Dataset {source_dataset!r} is excluded by bound "
                                                                f"periodic snapshot task for dataset "
                                                                f"{snapshot_task['dataset']!r}")

        if not data["recursive"] and data["exclude"]:
            verrors.add("exclude", "Excluding child datasets is only supported for recursive replication")

        for i, v in enumerate(data["exclude"]):
            if not any(v.startswith(ds + "/") for ds in data["source_datasets"]):
                verrors.add(f"exclude.{i}", "This dataset is not a child of any of source datasets")

        if data["replicate"]:
            if not data["recursive"]:
                verrors.add("recursive", "This option is required for full filesystem replication")

            if data["exclude"]:
                verrors.add("exclude", "This option is not supported for full filesystem replication")

            if not data["properties"]:
                verrors.add("properties", "This option is required for full filesystem replication")

        if data["schedule"]:
            if not data["auto"]:
                verrors.add("schedule", "You can't have schedule for replication that does not run automatically")
        else:
            if data["only_matching_schedule"]:
                verrors.add("only_matching_schedule", "You can't have only-matching-schedule without schedule")

        if data["retention_policy"] == "CUSTOM":
            if data["lifetime_value"] is None:
                verrors.add("lifetime_value", "This field is required for custom retention policy")
            if data["lifetime_unit"] is None:
                verrors.add("lifetime_value", "This field is required for custom retention policy")
        else:
            if data["lifetime_value"] is not None:
                verrors.add("lifetime_value", "This field has no sense for specified retention policy")
            if data["lifetime_unit"] is not None:
                verrors.add("lifetime_unit", "This field has no sense for specified retention policy")

        if data["enabled"]:
            for i, snapshot_task in enumerate(snapshot_tasks):
                if not snapshot_task["enabled"]:
                    verrors.add(
                        f"periodic_snapshot_tasks.{i}",
                        "You can't bind disabled periodic snapshot task to enabled replication task"
                    )

        return verrors

    async def _set_periodic_snapshot_tasks(self, replication_task_id, periodic_snapshot_tasks_ids):
        await self.middleware.call("datastore.delete", "storage.replication_repl_periodic_snapshot_tasks",
                                   [["replication_id", "=", replication_task_id]])
        for periodic_snapshot_task_id in periodic_snapshot_tasks_ids:
            await self.middleware.call(
                "datastore.insert", "storage.replication_repl_periodic_snapshot_tasks",
                {
                    "replication_id": replication_task_id,
                    "task_id": periodic_snapshot_task_id,
                },
            )

    async def _query_periodic_snapshot_tasks(self, ids):
        verrors = ValidationErrors()

        query_result = await self.middleware.call("pool.snapshottask.query", [["id", "in", ids]])

        snapshot_tasks = []
        for i, task_id in enumerate(ids):
            for task in query_result:
                if task["id"] == task_id:
                    snapshot_tasks.append(task)
                    break
            else:
                verrors.add(str(i), "This snapshot task does not exist")

        return verrors, snapshot_tasks

    @accepts(Str("transport", enum=["SSH", "SSH+NETCAT", "LOCAL"], required=True),
             Int("ssh_credentials", null=True, default=None))
    async def list_datasets(self, transport, ssh_credentials=None):
        """
        List datasets on remote side

        Accepts `transport` and SSH credentials ID (for non-local transport)

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.list_datasets",
                "params": [
                    "SSH",
                    7
                ]
            }
        """

        return await self.middleware.call("zettarepl.list_datasets", transport, ssh_credentials)

    @accepts(Str("dataset", required=True),
             Str("transport", enum=["SSH", "SSH+NETCAT", "LOCAL"], required=True),
             Int("ssh_credentials", null=True, default=None))
    async def create_dataset(self, dataset, transport, ssh_credentials=None):
        """
        Creates dataset on remote side

        Accepts `dataset` name, `transport` and SSH credentials ID (for non-local transport)

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.create_dataset",
                "params": [
                    "repl/work",
                    "SSH",
                    7
                ]
            }
        """

        return await self.middleware.call("zettarepl.create_dataset", dataset, transport, ssh_credentials)

    @accepts()
    async def list_naming_schemas(self):
        """
        List all naming schemas used in periodic snapshot and replication tasks.
        """
        naming_schemas = []
        for snapshottask in await self.middleware.call("pool.snapshottask.query"):
            naming_schemas.append(snapshottask["naming_schema"])
        for replication in await self.middleware.call("replication.query"):
            naming_schemas.extend(replication["naming_schema"])
            naming_schemas.extend(replication["also_include_naming_schema"])
        return sorted(set(naming_schemas))

    @accepts(
        List("datasets", empty=False, items=[
            Path("dataset", empty=False),
        ]),
        List("naming_schema", empty=False, items=[
            Str("naming_schema", validators=[ReplicationSnapshotNamingSchema()])
        ]),
        Str("transport", enum=["SSH", "SSH+NETCAT", "LOCAL"], required=True),
        Int("ssh_credentials", null=True, default=None),
    )
    async def count_eligible_manual_snapshots(self, datasets, naming_schema, transport, ssh_credentials):
        """
        Count how many existing snapshots of `dataset` match `naming_schema`.

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.count_eligible_manual_snapshots",
                "params": [
                    "repl/work",
                    ["auto-%Y-%m-%d_%H-%M"],
                    "SSH",
                    4
                ]
            }
        """
        return await self.middleware.call("zettarepl.count_eligible_manual_snapshots", datasets, naming_schema,
                                          transport, ssh_credentials)

    @accepts(
        Str("direction", enum=["PUSH", "PULL"], required=True),
        List("source_datasets", items=[Path("dataset", empty=False)], required=True, empty=False),
        Path("target_dataset", required=True, empty=False),
        Str("transport", enum=["SSH", "SSH+NETCAT", "LOCAL", "LEGACY"], required=True),
        Int("ssh_credentials", null=True, default=None),
    )
    async def target_unmatched_snapshots(self, direction, source_datasets, target_dataset, transport, ssh_credentials):
        """
        Check if target has any snapshots that do not exist on source.

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.target_unmatched_snapshots",
                "params": [
                    "PUSH",
                    ["repl/work", "repl/games"],
                    "backup",
                    "SSH",
                    4
                ]
            }

        Returns

            {
                "backup/work": ["auto-2019-10-15_13-00", "auto-2019-10-15_09-00"],
                "backup/games": ["auto-2019-10-15_13-00"],
            }
        """
        return await self.middleware.call("zettarepl.target_unmatched_snapshots", direction, source_datasets,
                                          target_dataset, transport, ssh_credentials)

    @private
    def new_snapshot_name(self, naming_schema):
        return datetime.now().strftime(naming_schema)

    # Legacy pair support
    @private
    @accepts(Dict(
        "replication-pair-data",
        Str("hostname", required=True),
        Str("public-key", required=True),
        Str("user", null=True),
    ))
    async def pair(self, data):
        result = await self.middleware.call("keychaincredential.ssh_pair", {
            "remote_hostname": data["hostname"],
            "username": data["user"] or "root",
            "public_key": data["public-key"],
        })
        return {
            "ssh_port": result["port"],
            "ssh_hostkey": result["host_key"],
        }
예제 #15
0
class GroupService(CRUDService):
    class Config:
        datastore = 'account.bsdgroups'
        datastore_prefix = 'bsdgrp_'
        datastore_extend = 'group.group_extend'

    @private
    async def group_extend(self, group):
        # Get group membership
        group['users'] = [
            gm['user']['id'] for gm in await self.middleware.call(
                'datastore.query', 'account.bsdgroupmembership', [(
                    'group', '=', group['id'])], {'prefix': 'bsdgrpmember_'})
        ]
        group['users'] += [
            gmu['id'] for gmu in await self.middleware.call(
                'datastore.query', 'account.bsdusers', [('bsdusr_group_id',
                                                         '=', group['id'])])
        ]
        return group

    @private
    async def group_compress(self, group):
        if 'local' in group:
            group.pop('local')
        return group

    @filterable
    async def query(self, filters=None, options=None):
        """
        Query groups with `query-filters` and `query-options`. As a performance optimization, only local groups
        will be queried by default.

        Groups from directory services such as NIS, LDAP, or Active Directory will be included in query results
        if the option `{'extra': {'search_dscache': True}}` is specified.
        """
        if not filters:
            filters = []
        filters += self._config.datastore_filters or []

        options = options or {}
        options['extend'] = self._config.datastore_extend
        options['extend_context'] = self._config.datastore_extend_context
        options['prefix'] = self._config.datastore_prefix

        datastore_options = options.copy()
        datastore_options.pop('count', None)
        datastore_options.pop('get', None)

        extra = options.get('extra', {})
        dssearch = extra.pop('search_dscache', False)

        if dssearch:
            return await self.middleware.call('dscache.query', 'GROUPS',
                                              filters, options)

        result = await self.middleware.call('datastore.query',
                                            self._config.datastore, [],
                                            datastore_options)
        for entry in result:
            entry.update({'local': True})
        return await self.middleware.run_in_thread(filter_list, result,
                                                   filters, options)

    @accepts(
        Dict(
            'group_create',
            Int('gid'),
            Str('name', required=True),
            Bool('sudo', default=False),
            Bool('allow_duplicate_gid', default=False),
            List('users', items=[Int('id')], required=False),
            register=True,
        ))
    async def do_create(self, data):
        """
        Create a new group.

        If `gid` is not provided it is automatically filled with the next one available.

        `allow_duplicate_gid` allows distinct group names to share the same gid.

        `users` is a list of user ids (`id` attribute from `user.query`).
        """

        verrors = ValidationErrors()
        await self.__common_validation(verrors, data, 'group_create')
        verrors.check()

        if not data.get('gid'):
            data['gid'] = await self.get_next_gid()

        group = data.copy()
        group['group'] = group.pop('name')

        users = group.pop('users', [])

        group = await self.group_compress(group)
        pk = await self.middleware.call('datastore.insert',
                                        'account.bsdgroups', group,
                                        {'prefix': 'bsdgrp_'})

        for user in users:
            await self.middleware.call('datastore.insert',
                                       'account.bsdgroupmembership', {
                                           'bsdgrpmember_group': pk,
                                           'bsdgrpmember_user': user
                                       })

        await self.middleware.call('service.reload', 'user')

        await self.middleware.call('smb.groupmap_add', data['name'])

        return pk

    @accepts(
        Int('id'),
        Patch(
            'group_create',
            'group_update',
            ('attr', {
                'update': True
            }),
        ),
    )
    async def do_update(self, pk, data):
        """
        Update attributes of an existing group.
        """

        group = await self._get_instance(pk)

        verrors = ValidationErrors()
        await self.__common_validation(verrors, data, 'group_update', pk=pk)
        verrors.check()

        group.update(data)
        delete_groupmap = False
        group.pop('users', None)

        if 'name' in data and data['name'] != group['group']:
            delete_groupmap = group['group']
            group['group'] = group.pop('name')
        else:
            group.pop('name', None)

        group = await self.group_compress(group)
        await self.middleware.call('datastore.update', 'account.bsdgroups', pk,
                                   group, {'prefix': 'bsdgrp_'})

        if 'users' in data:
            existing = {
                i['bsdgrpmember_user']['id']: i
                for i in await self.middleware.call(
                    'datastore.query', 'account.bsdgroupmembership', [(
                        'bsdgrpmember_group', '=', pk)])
            }
            to_remove = set(existing.keys()) - set(data['users'])
            for i in to_remove:
                await self.middleware.call('datastore.delete',
                                           'account.bsdgroupmembership',
                                           existing[i]['id'])

            to_add = set(data['users']) - set(existing.keys())
            for i in to_add:
                await self.middleware.call('datastore.insert',
                                           'account.bsdgroupmembership', {
                                               'bsdgrpmember_group': pk,
                                               'bsdgrpmember_user': i
                                           })

        if delete_groupmap:
            await self.middleware.call('notifier.groupmap_delete',
                                       delete_groupmap)

        await self.middleware.call('service.reload', 'user')

        await self.middleware.call('smb.groupmap_add', group['group'])

        return pk

    @accepts(Int('id'), Dict('options', Bool('delete_users', default=False)))
    async def do_delete(self, pk, options=None):
        """
        Delete group `id`.

        The `delete_users` option deletes all users that have this group as their primary group.
        """

        group = await self._get_instance(pk)
        await self.middleware.call('notifier.groupmap_delete', group['group'])

        if group['builtin']:
            raise CallError('A built-in group cannot be deleted.',
                            errno.EACCES)

        if options['delete_users']:
            for i in await self.middleware.call('datastore.query',
                                                'account.bsdusers',
                                                [('group', '=', group['id'])],
                                                {'prefix': 'bsdusr_'}):
                await self.middleware.call('datastore.delete',
                                           'account.bsdusers', i['id'])

        await self.middleware.call('datastore.delete', 'account.bsdgroups', pk)

        await self.middleware.call('service.reload', 'user')

        return pk

    async def get_next_gid(self):
        """
        Get the next available/free gid.
        """
        last_gid = 999
        for i in await self.middleware.call('datastore.query',
                                            'account.bsdgroups',
                                            [('builtin', '=', False)], {
                                                'order_by': ['gid'],
                                                'prefix': 'bsdgrp_'
                                            }):
            # If the difference between the last gid and the current one is
            # bigger than 1, it means we have a gap and can use it.
            if i['gid'] - last_gid > 1:
                return last_gid + 1
            last_gid = i['gid']
        return last_gid + 1

    @accepts(
        Dict('get_group_obj', Str('groupname', default=None),
             Int('gid', default=None)))
    async def get_group_obj(self, data):
        """
        Returns dictionary containing information from struct grp for the group specified by either
        the groupname or gid. Bypasses group cache.
        """
        return await self.middleware.call('dscache.get_uncached_group',
                                          data['groupname'], data['gid'])

    async def __common_validation(self, verrors, data, schema, pk=None):

        exclude_filter = [('id', '!=', pk)] if pk else []

        if 'name' in data:
            existing = await self.middleware.call(
                'datastore.query', 'account.bsdgroups',
                [('group', '=', data['name'])] + exclude_filter,
                {'prefix': 'bsdgrp_'})
            if existing:
                verrors.add(
                    f'{schema}.name',
                    f'A Group with the name "{data["name"]}" already exists.',
                    errno.EEXIST,
                )

            pw_checkname(verrors, f'{schema}.name', data['name'])

        allow_duplicate_gid = data.pop('allow_duplicate_gid', False)
        if data.get('gid') and not allow_duplicate_gid:
            existing = await self.middleware.call(
                'datastore.query', 'account.bsdgroups',
                [('gid', '=', data['gid'])] + exclude_filter,
                {'prefix': 'bsdgrp_'})
            if existing:
                verrors.add(
                    f'{schema}.gid',
                    f'The Group ID "{data["gid"]}" already exists.',
                    errno.EEXIST,
                )

        if 'users' in data:
            existing = set([
                i['id'] for i in await self.middleware.call(
                    'datastore.query', 'account.bsdusers', [('id', 'in',
                                                             data['users'])])
            ])
            notfound = set(data['users']) - existing
            if notfound:
                verrors.add(
                    f'{schema}.users',
                    f'Following users do not exist: {", ".join(map(str, notfound))}',
                )
예제 #16
0
class IdmapDomainService(CRUDService):
    class Config:
        datastore = 'directoryservice.idmap_domain'
        datastore_prefix = 'idmap_domain_'
        namespace = 'idmap.domain'

    @accepts(
        Dict('idmap_domain_create',
             Str('name', required=True),
             Str('DNS_domain_name'),
             register=True))
    async def do_create(self, data):
        """
        Create a new IDMAP domain. These domains must be unique. This table
        will be automatically populated after joining an Active Directory domain
        if "allow trusted domains" is set to True in the AD service configuration.
        There are three default system domains: DS_TYPE_ACTIVEDIRECTORY, DS_TYPE_LDAP, DS_TYPE_DEFAULT_DOMAIN.
        The system domains correspond with the idmap settings under Active Directory, LDAP, and SMB
        respectively.
        `name` the pre-windows 2000 domain name.
        `DNS_domain_name` DNS name of the domain.
        """
        data["id"] = await self.middleware.call(
            "datastore.insert",
            self._config.datastore,
            data,
            {"prefix": self._config.datastore_prefix},
        )
        return await self._get_instance(data['id'])

    @accepts(Int('id', required=True),
             Patch("idmap_domain_create", "idmap_domain_update",
                   ("attr", {
                       "update": True
                   })))
    async def do_update(self, id, data):
        """
        Update a domain by id.
        """
        old = await self._get_instance(id)
        new = old.copy()
        new.update(data)
        verrors = ValidationErrors()
        verrors.add_child('idmap_domain_update', await self._validate(new))

        if verrors:
            raise verrors

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})
        return await self._get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete a domain by id. Deletion of default system domains is not permitted.
        """
        if id <= 5:
            entry = await self._get_instance(id)
            raise CallError(
                f'Deleting system idmap domain [{entry["name"]}] is not permitted.',
                errno.EPERM)
        await self.middleware.call("datastore.delete", self._config.datastore,
                                   id)

    @private
    async def _validate(self, data):
        verrors = ValidationErrors()
        if data['id'] <= dstype['DS_TYPE_DEFAULT_DOMAIN'].value:
            verrors.add(
                'id',
                f'Modifying system idmap domain [{data["name"]}] is not permitted.'
            )
        return verrors
예제 #17
0
class KMIPService(ConfigService):
    class Config:
        datastore = 'system_kmip'
        datastore_extend = 'kmip.kmip_extend'
        cli_namespace = 'system.kmip'

    ENTRY = Dict(
        'kmip_entry',
        Int('id', required=True),
        Bool('enabled', required=True),
        Bool('manage_sed_disks', required=True),
        Bool('manage_zfs_keys', required=True),
        Int('certificate', null=True, required=True),
        Int('certificate_authority', null=True, required=True),
        Int('port', validators=[Port()], required=True),
        Str('server', required=True, null=True),
    )

    @private
    async def kmip_extend(self, data):
        for k in filter(lambda v: data[v],
                        ('certificate', 'certificate_authority')):
            data[k] = data[k]['id']
        return data

    @accepts(
        Patch(
            'kmip_entry',
            'kmip_update',
            ('rm', {
                'name': 'id'
            }),
            ('add', Bool('enabled')),
            ('add', Bool('force_clear')),
            ('add', Bool('change_server')),
            ('add', Bool('validate')),
            ('attr', {
                'update': True
            }),
        ))
    @job(lock='kmip_update')
    async def do_update(self, job, data):
        """
        Update KMIP Server Configuration.

        System currently authenticates connection with remote KMIP Server with a TLS handshake. `certificate` and
        `certificate_authority` determine the certs which will be used to initiate the TLS handshake with `server`.

        `validate` is enabled by default. When enabled, system will test connection to `server` making sure
        it's reachable.

        `manage_zfs_keys`/`manage_sed_disks` when enabled will sync keys from local database to remote KMIP server.
        When disabled, if there are any keys left to be retrieved from the KMIP server,
        it will sync them back to local database.

        `enabled` if true, cannot be set to disabled if there are existing keys pending to be synced. However users
        can still perform this action by enabling `force_clear`.

        `change_server` is a boolean field which allows users to migrate data between two KMIP servers. System
        will first migrate keys from old KMIP server to local database and then migrate the keys from local database
        to new KMIP server. If it is unable to retrieve all the keys from old server, this will fail. Users can bypass
        this by enabling `force_clear`.

        `force_clear` is a boolean option which when enabled will in this case remove all
        pending keys to be synced from database. It should be used with extreme caution as users may end up with
        not having ZFS dataset or SED disks keys leaving them locked forever. It is disabled by default.
        """
        old = await self.config()
        new = old.copy()
        new.update(data)
        verrors = ValidationErrors()

        if not new['server'] and new['enabled']:
            verrors.add('kmip_update.server',
                        'Please specify a valid hostname or an IPv4 address')

        if new['enabled']:
            verrors.extend(
                (await
                 self.middleware.call('certificate.cert_services_validation',
                                      new['certificate'],
                                      'kmip_update.certificate', False)))

        ca = await self.middleware.call(
            'certificateauthority.query',
            [['id', '=', new['certificate_authority']]])
        if ca and not verrors:
            ca = ca[0]
            if not await self.middleware.call(
                    'cryptokey.validate_cert_with_chain',
                (await self.middleware.call(
                    'certificate.get_instance',
                    new['certificate']))['certificate'], [ca['certificate']]):
                verrors.add(
                    'kmip_update.certificate_authority',
                    'Certificate chain could not be verified with specified certificate authority.'
                )
        elif not ca and new['enabled']:
            verrors.add('kmip_update.certificate_authority',
                        'Please specify a valid id.')

        if new.pop('validate', True) and new['enabled'] and not verrors:
            if not await self.middleware.call('kmip.test_connection', new):
                verrors.add(
                    'kmip_update.server',
                    f'Unable to connect to {new["server"]}:{new["port"]} KMIP server.'
                )

        change_server = new.pop('change_server', False)
        if change_server and new['server'] == old['server']:
            verrors.add(
                'kmip_update.change_server',
                'Please update server field to reflect the new server.')
        if change_server and not new['enabled']:
            verrors.add('kmip_update.enabled',
                        'Must be enabled when change server is enabled.')

        force_clear = new.pop('force_clear', False)
        clear_keys = force_clear if change_server else False
        sync_error = 'KMIP sync is pending, please make sure database and KMIP server ' \
                     'are in sync before proceeding with this operation.'
        if old['enabled'] != new['enabled'] and await self.middleware.call(
                'kmip.kmip_sync_pending'):
            if force_clear:
                clear_keys = True
            else:
                verrors.add('kmip_update.enabled', sync_error)

        verrors.check()

        job.set_progress(30, 'Initial Validation complete')

        if clear_keys:
            await self.middleware.call('kmip.clear_sync_pending_keys')
            job.set_progress(50, 'Cleared keys pending sync')

        if change_server:
            # We will first migrate all the keys to local database - once done with that,
            # we will proceed with pushing it to the new server - we should have the old server
            # old server -> db
            # db -> new server
            # First can be skipped if old server is not reachable and we want to clear keys
            job.set_progress(
                55, 'Starting migration from existing server to new server')
            await self.middleware.call('datastore.update',
                                       self._config.datastore, old['id'], {
                                           'manage_zfs_keys': False,
                                           'manage_sed_disks': False
                                       })
            job.set_progress(
                60, 'Syncing keys from existing server to local database')
            sync_jobs = [(await self.middleware.call(f'kmip.{i}'))
                         for i in ('sync_zfs_keys', 'sync_sed_keys')]
            errors = []
            for sync_job in sync_jobs:
                await sync_job.wait()
                if sync_job.error:
                    errors.append(sync_job.error)
                elif sync_job.result:
                    errors.append(
                        f'Failed to sync {",".join(sync_job.result)}')

            if errors:
                await self.middleware.call('datastore.update',
                                           self._config.datastore, old['id'],
                                           old)
                # We do this because it's possible a few datasets/disks got synced to db and few didn't - this is
                # to push all the data of interest back to the KMIP server from db
                await self.middleware.call('kmip.sync_keys')
                errors = '\n'.join(errors)
                raise CallError(
                    f'Failed to sync keys from {old["server"]} to host: {errors}'
                )

            if await self.middleware.call('kmip.kmip_sync_pending'):
                raise CallError(sync_error)

            job.set_progress(
                80,
                'Successfully synced keys from existing server to local database'
            )

        await self.middleware.call(
            'datastore.update',
            self._config.datastore,
            old['id'],
            new,
        )

        await self.middleware.call('service.start', 'kmip')
        if new['enabled'] and old['enabled'] != new['enabled']:
            await self.middleware.call('kmip.initialize_keys')
        if any(old[k] != new[k]
               for k in ('enabled', 'manage_zfs_keys',
                         'manage_sed_disks')) or change_server:
            job.set_progress(
                90,
                'Starting sync between local database and configured KMIP server'
            )
            await self.middleware.call('kmip.sync_keys')

        return await self.config()
예제 #18
0
class IdmapDomainBackendService(CRUDService):
    class Config:
        datastore = 'directoryservice.idmap_domaintobackend'
        datastore_prefix = 'idmap_dtb_'
        namespace = 'idmap.domaintobackend'

    @accepts(
        Dict('idmap_domaintobackend_create',
             Dict(
                 'domain',
                 Int('id'),
                 Str('idmap_domain_name'),
                 Str('idmap_domain_dns_domain_name'),
             ),
             Str('idmap_backend',
                 enum=[
                     'AD', 'AUTORID', 'FRUIT', 'LDAP', 'NSS', 'RFC2307', 'RID',
                     'SCRIPT', 'TDB'
                 ]),
             register=True))
    async def do_create(self, data):
        """
        Set an idmap backend for a domain.
        `domain` dictionary containing domain information. Has one-to-one relationship with idmap_domain entries.
        `idmap_backed` type of idmap backend to use for the domain.

        Create entry for domain in the respective idmap backend table if one does not exist.
        """
        data = await self.middleware.call('idmap.common_backend_compress',
                                          data)
        verrors = ValidationErrors()
        if data['domain'] in [
                dstype.DS_TYPE_LDAP.value, dstype.DS_TYPE_DEFAULT_DOMAIN.value
        ]:
            if data['idmap_backend'] not in ['ldap', 'tdb']:
                verrors.add(
                    'domaintobackend_create.idmap_backend',
                    f'idmap backend [{data["idmap_backend"]}] is not appropriate for the system domain type {dstype[data["domain"]]}'
                )
        if verrors:
            raise verrors

        backend_entry_is_present = False
        idmap_data = await self.middleware.call(
            f'idmap.{data["idmap_backend"]}.query')
        for i in idmap_data:
            if not i['domain']:
                continue
            if i['domain']['idmap_domain_name'] == data['domain'][
                    'idmap_domain_name']:
                backend_entry_is_present = True
                break

        if not backend_entry_is_present:
            next_idmap_range = await self.middleware.call(
                'idmap.get_next_idmap_range')
            await self.middleware.call(
                f'idmap.{data["idmap_backend"]}.create', {
                    'domain': {
                        'id': data['domain']['id']
                    },
                    'range_low': next_idmap_range[0],
                    'range_high': next_idmap_range[1]
                })
        data["id"] = await self.middleware.call(
            "datastore.insert",
            self._config.datastore,
            data,
            {"prefix": self._config.datastore_prefix},
        )
        return await self._get_instance(data['id'])

    @accepts(Int('id', required=True),
             Patch("idmap_domaintobackend_create",
                   "idmap_domaintobackend_update", ("attr", {
                       "update": True
                   })))
    async def do_update(self, id, data):
        """
        Update idmap to backend mapping by id.
        """
        old = await self._get_instance(id)
        new = old.copy()
        new.update(data)

        new = await self.middleware.call('idmap.common_backend_compress', new)
        verrors = ValidationErrors()
        if new['domain'] in [
                dstype.DS_TYPE_LDAP.value, dstype.DS_TYPE_DEFAULT_DOMAIN.value
        ]:
            if new['idmap_backend'] not in ['ldap', 'tdb']:
                verrors.add(
                    'domaintobackend_create.idmap_backend',
                    f'idmap backend [{new["idmap_backend"]}] is not appropriate for the system domain type {dstype[new["domain"]]}'
                )
        if verrors:
            raise verrors

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})
        updated_entry = await self._get_instance(id)
        try:
            await self.middleware.call('idmap.get_or_create_idmap_by_domain',
                                       updated_entry['domain']['domain_name'])
        except Exception as e:
            self.logger.debug('Failed to generate new idmap backend: %s', e)

        return updated_entry

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete idmap to backend mapping by id
        """
        entry = await self._get_instance(id)
        if entry['domain']['id'] <= dstype['DS_TYPE_DEFAULT_DOMAIN'].value:
            raise CallError(
                f'Deleting mapping for [{entry["domain"]["idmap_domain_name"]}] is not permitted.',
                errno.EPERM)
        await self.middleware.call("datastore.delete", self._config.datastore,
                                   id)
예제 #19
0
            data['comment'] = comment

        _id = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})
        await self.middleware.call('service.restart', 'sysctl')
        return await self.get_instance(_id)

    @accepts(Int('id', required=True),
             Patch(
                 'tunable_create',
                 'tunable_update',
                 ('rm', {
                     'name': 'var'
                 }),
                 ('rm', {
                     'name': 'type'
                 }),
                 ('attr', {
                     'update': True
                 }),
             ))
    async def do_update(self, _id, data):
        """
        Update Tunable of `id`.
        """
        old = await self.get_instance(_id)

        new = old.copy()
        new.update(data)
        if old == new:
예제 #20
0
class IdmapADService(CRUDService):
    class Config:
        datastore = 'directoryservice.idmap_ad'
        datastore_prefix = 'idmap_ad_'
        datastore_extend = 'idmap.common_backend_extend'
        namespace = 'idmap.ad'

    @accepts(
        Dict('idmap_ad_create',
             Dict(
                 'domain',
                 Int('id'),
                 Str('idmap_domain_name'),
                 Str('idmap_domain_dns_domain_name'),
             ),
             Int('range_low',
                 required=True,
                 validators=[Range(min=1000, max=2147483647)]),
             Int('range_high',
                 required=True,
                 validators=[Range(min=1000, max=2147483647)]),
             Str('schema_mode',
                 default='RFC2307',
                 enum=['RFC2307', 'SFU', 'SFU20']),
             Bool('unix_primary_group', default=False),
             Bool('unix_nss_info', default=False),
             register=True))
    async def do_create(self, data):
        """
        Create an entry in the idmap backend table.
        `unix_primary_group` If True, the primary group membership is fetched from the LDAP attributes (gidNumber).
        If False, the primary group membership is calculated via the "primaryGroupID" LDAP attribute.

        `unix_nss_info` if True winbind will retrieve the login shell and home directory from the LDAP attributes.
        If False or if the AD LDAP entry lacks the SFU attributes the smb4.conf parameters `template shell` and `template homedir` are used.

        `schema_mode` Defines the schema that idmap_ad should use when querying Active Directory regarding user and group information.
        This can be either the RFC2307 schema support included in Windows 2003 R2 or the Service for Unix (SFU) schema.
        For SFU 3.0 or 3.5 please choose "SFU", for SFU 2.0 please choose "SFU20". The behavior of primary group membership is
        controlled by the unix_primary_group option.
        """
        verrors = ValidationErrors()
        data = await self.middleware.call('idmap.common_backend_compress',
                                          data)
        verrors.add_child(
            'idmap_ad_create', await
            self.middleware.call('idmap._common_validate', 'ad', data))
        if verrors:
            raise verrors

        data["id"] = await self.middleware.call(
            "datastore.insert",
            self._config.datastore,
            data,
            {"prefix": self._config.datastore_prefix},
        )
        return await self._get_instance(data['id'])

    @accepts(Int('id', required=True),
             Patch("idmap_ad_create", "idmap_ad_update", ("attr", {
                 "update": True
             })))
    async def do_update(self, id, data):
        """
        Update an entry in the idmap backend table by id.
        """
        old = await self._get_instance(id)
        new = old.copy()
        new.update(data)
        verrors = ValidationErrors()
        verrors.add_child(
            'idmap_ad_update', await
            self.middleware.call('idmap._common_validate', 'ad', new))
        if verrors:
            raise verrors

        new = await self.middleware.call('idmap.common_backend_compress', new)
        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})
        return await self._get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete idmap to backend mapping by id
        """
        await self.middleware.call("datastore.delete", self._config.datastore,
                                   id)
예제 #21
0
class SharingNFSService(CRUDService):
    class Config:
        namespace = "sharing.nfs"
        datastore = "sharing.nfs_share"
        datastore_prefix = "nfs_"
        datastore_extend = "sharing.nfs.extend"

    @accepts(
        Dict(
            "sharingnfs_create",
            List("paths", items=[Dir("path")], empty=False),
            Str("comment"),
            List("networks",
                 items=[IPAddr("network", network=True)],
                 default=[]),
            List("hosts", items=[Str("host")], default=[]),
            Bool("alldirs"),
            Bool("ro"),
            Bool("quiet"),
            Str("maproot_user", required=False, default=None, null=True),
            Str("maproot_group", required=False, default=None, null=True),
            Str("mapall_user", required=False, default=None, null=True),
            Str("mapall_group", required=False, default=None, null=True),
            List(
                "security",
                default=[],
                items=[
                    Str("provider", enum=["SYS", "KRB5", "KRB5I", "KRB5P"])
                ],
            ),
            Bool("enabled", default=True),
            register=True,
        ))
    async def do_create(self, data):
        """
        Create a NFS Share.

        `paths` is a list of valid paths which are configured to be shared on this share.

        `networks` is a list of authorized networks that are allowed to access the share having format
        "network/mask" CIDR notation. If empty, all networks are allowed.

        `hosts` is a list of IP's/hostnames which are allowed to access the share. If empty, all IP's/hostnames are
        allowed.

        `alldirs` is a boolean value which when set indicates that the client can mount any subdirectories of the
        selected pool or dataset.
        """
        verrors = ValidationErrors()

        await self.validate(data, "sharingnfs_create", verrors)

        if verrors:
            raise verrors

        await self.compress(data)
        paths = data.pop("paths")
        data["id"] = await self.middleware.call(
            "datastore.insert",
            self._config.datastore,
            data,
            {"prefix": self._config.datastore_prefix},
        )
        for path in paths:
            await self.middleware.call(
                "datastore.insert",
                "sharing.nfs_share_path",
                {
                    "share_id": data["id"],
                    "path": path,
                },
            )
        await self.extend(data)

        await self._service_change("nfs", "reload")

        return data

    @accepts(Int("id"),
             Patch("sharingnfs_create", "sharingnfs_update", ("attr", {
                 "update": True
             })))
    async def do_update(self, id, data):
        """
        Update NFS Share of `id`.
        """
        verrors = ValidationErrors()
        old = await self._get_instance(id)

        new = old.copy()
        new.update(data)

        await self.validate(new, "sharingnfs_update", verrors, old=old)

        if verrors:
            raise verrors

        await self.compress(new)
        paths = new.pop("paths")
        await self.middleware.call("datastore.update", self._config.datastore,
                                   id, new,
                                   {"prefix": self._config.datastore_prefix})
        await self.middleware.call("datastore.delete",
                                   "sharing.nfs_share_path",
                                   [["share_id", "=", id]])
        for path in paths:
            await self.middleware.call(
                "datastore.insert",
                "sharing.nfs_share_path",
                {
                    "share_id": id,
                    "path": path,
                },
            )

        await self.extend(new)
        new["paths"] = paths

        await self._service_change("nfs", "reload")

        return new

    @accepts(Int("id"))
    async def do_delete(self, id):
        """
        Delete NFS Share of `id`.
        """
        await self.middleware.call("datastore.delete", self._config.datastore,
                                   id)
        await self._service_change("nfs", "reload")

    @private
    async def validate(self, data, schema_name, verrors, old=None):
        if not data["paths"]:
            verrors.add(f"{schema_name}.paths",
                        "At least one path is required")

        if verrors:
            raise verrors

        await self.middleware.run_in_thread(self.validate_paths, data,
                                            schema_name, verrors)

        filters = []
        if old:
            filters.append(["id", "!=", old["id"]])
        other_shares = await self.middleware.call("sharing.nfs.query", filters)
        dns_cache = await self.resolve_hostnames(
            sum([share["hosts"]
                 for share in other_shares], []) + data["hosts"])
        await self.middleware.run_in_thread(self.validate_hosts_and_networks,
                                            other_shares, data, schema_name,
                                            verrors, dns_cache)

        for k in ["maproot", "mapall"]:
            if not data[f"{k}_user"] and not data[f"{k}_group"]:
                pass
            elif not data[f"{k}_user"] and data[f"{k}_group"]:
                verrors.add(
                    f"{schema_name}.{k}_user",
                    "This field is required when map group is specified")
            else:
                user = await self.middleware.call("notifier.get_user_object",
                                                  data[f"{k}_user"])
                if not user:
                    verrors.add(f"{schema_name}.{k}_user", "User not found")

                if data[f"{k}_group"]:
                    group = await self.middleware.call(
                        "notifier.get_group_object", data[f"{k}_group"])
                    if not group:
                        verrors.add(f"{schema_name}.{k}_group",
                                    "Group not found")

        if data["maproot_user"] and data["mapall_user"]:
            verrors.add(f"{schema_name}.mapall_user",
                        "maproot_user disqualifies mapall_user")

        if data["security"]:
            nfs_config = await self.middleware.call("nfs.config")
            if not nfs_config["v4"]:
                verrors.add(f"{schema_name}.security",
                            "This is not allowed when NFS v4 is disabled")

    @private
    def validate_paths(self, data, schema_name, verrors):
        dev = None
        is_mountpoint = False
        for i, path in enumerate(data["paths"]):
            stat = os.stat(path)
            if dev is None:
                dev = stat.st_dev
            else:
                if dev != stat.st_dev:
                    verrors.add(
                        f"{schema_name}.paths.{i}",
                        "Paths for a NFS share must reside within the same filesystem"
                    )

            parent = os.path.abspath(os.path.join(path, ".."))
            if os.stat(parent).st_dev != dev:
                is_mountpoint = True
                if any(
                        os.path.abspath(p).startswith(parent + "/")
                        for p in data["paths"] if p != path):
                    verrors.add(
                        f"{schema_name}.paths.{i}",
                        "You cannot share a mount point and subdirectories all at once"
                    )

        if not is_mountpoint and data["alldirs"]:
            verrors.add(f"{schema_name}.alldirs",
                        "This option can only be used for datasets")

    @private
    async def resolve_hostnames(self, hostnames):
        hostnames = list(set(hostnames))

        async def resolve(hostname):
            try:
                return (await asyncio.wait_for(
                    self.middleware.run_in_thread(socket.getaddrinfo, hostname,
                                                  None), 5))[0][4][0]
            except Exception as e:
                self.logger.warning("Unable to resolve host %r: %r", hostname,
                                    e)
                return None

        resolved_hostnames = await asyncio_map(resolve, hostnames, 8)

        return dict(zip(hostnames, resolved_hostnames))

    @private
    def validate_hosts_and_networks(self, other_shares, data, schema_name,
                                    verrors, dns_cache):
        explanation = (
            ". This is so because /etc/exports does not act like ACL and it is undefined which rule among "
            "all overlapping networks will be applied.")

        dev = os.stat(data["paths"][0]).st_dev

        used_networks = set()
        for share in other_shares:
            try:
                share_dev = os.stat(share["paths"][0]).st_dev
            except Exception:
                self.logger.warning("Failed to stat first path for %r",
                                    share,
                                    exc_info=True)
                continue

            if share_dev == dev:
                for host in share["hosts"]:
                    host = dns_cache[host]
                    if host is None:
                        continue

                    try:
                        network = ipaddress.ip_network(host)
                    except Exception:
                        self.logger.warning("Got invalid host %r", host)
                        continue
                    else:
                        used_networks.add(network)

                for network in share["networks"]:
                    try:
                        network = ipaddress.ip_network(network, strict=False)
                    except Exception:
                        self.logger.warning("Got invalid network %r", network)
                        continue
                    else:
                        used_networks.add(network)

                if not share["hosts"] and not share["networks"]:
                    used_networks.add(ipaddress.ip_network("0.0.0.0/0"))
                    used_networks.add(ipaddress.ip_network("::/0"))

                if share["alldirs"] and data["alldirs"]:
                    verrors.add(
                        f"{schema_name}.alldirs",
                        "This option is only available once per mountpoint")

        had_explanation = False
        for i, host in enumerate(data["hosts"]):
            host = dns_cache[host]
            if host is None:
                continue

            network = ipaddress.ip_network(host)
            for another_network in used_networks:
                if network.overlaps(another_network):
                    verrors.add(f"{schema_name}.hosts.{i}", (
                        f"You can't share same filesystem with overlapping networks {network} and {another_network}"
                        + ("" if had_explanation else explanation)))
                    had_explanation = True

            used_networks.add(network)

        had_explanation = False
        for i, network in enumerate(data["networks"]):
            network = ipaddress.ip_network(network, strict=False)

            for another_network in used_networks:
                if network.overlaps(another_network):
                    verrors.add(f"{schema_name}.networks.{i}", (
                        f"You can't share same filesystem with overlapping networks {network} and {another_network}"
                        + ("" if had_explanation else explanation)))
                    had_explanation = True

            used_networks.add(network)

        if not data["hosts"] and not data["networks"]:
            if used_networks:
                verrors.add(
                    f"{schema_name}.networks",
                    (f"You can't share same filesystem with all hosts twice" +
                     ("" if had_explanation else explanation)))

    @private
    async def extend(self, data):
        data["paths"] = [
            path["path"] for path in await self.middleware.call(
                "datastore.query", "sharing.nfs_share_path",
                [["share_id", "=", data["id"]]])
        ]
        data["networks"] = data.pop("network").split()
        data["hosts"] = data["hosts"].split()
        data["security"] = [s.upper() for s in data["security"]]
        return data

    @private
    async def compress(self, data):
        data["network"] = " ".join(data.pop("networks"))
        data["hosts"] = " ".join(data["hosts"])
        data["security"] = [s.lower() for s in data["security"]]
        return data
예제 #22
0
class IdmapRFC2307Service(CRUDService):
    """
    In the rfc2307 backend range acts as a filter. Anything falling outside of it is ignored.
    If no user_dn is specified, then an anonymous bind is performed.
    ldap_url is only required when using a standalone server.
    """
    class Config:
        datastore = 'directoryservice.idmap_rfc2307'
        datastore_prefix = 'idmap_rfc2307_'
        datastore_extend = 'idmap.common_backend_extend'
        namespace = 'idmap.rfc2307'

    @accepts(
        Dict('idmap_rfc2307_create',
             Dict(
                 'domain',
                 Int('id'),
                 Str('idmap_domain_name'),
                 Str('idmap_domain_dns_domain_name'),
             ),
             Int('range_low',
                 required=True,
                 validators=[Range(min=1000, max=2147483647)]),
             Int('range_high',
                 required=True,
                 validators=[Range(min=1000, max=2147483647)]),
             Str('ldap_server', default='AD', enum=['AD', 'STAND-ALONE']),
             Str('bind_path_user'),
             Str('bind_path_group'),
             Bool('user_cn', default=False),
             Bool('cn_realm', default=False),
             Str('ldap_domain'),
             Str('ldap_url'),
             Str('ldap_user_dn'),
             Str('ldap_user_dn_password'),
             Str('ldap_realm'),
             Str('ssl', default='OFF', enum=['OFF', 'ON', 'START_TLS']),
             Int('certificate'),
             register=True))
    async def do_create(self, data):
        """
        Create an entry in the idmap_rfc2307 backend table.

        `ldap_server` defines the type of LDAP server to use. This can either be an LDAP server provided
        by the Active Directory Domain (ad) or a stand-alone LDAP server.

        `bind_path_user` specfies the search base where user objects can be found in the LDAP server.

        `bind_path_group` specifies the search base where group objects can be found in the LDAP server.

        `user_cn` query cn attribute instead of uid attribute for the user name in LDAP.

        `realm` append @realm to cn for groups (and users if user_cn is set) in LDAP queries.

        `ldmap_domain` when using the LDAP server in the Active Directory server, this allows one to
        specify the domain where to access the Active Directory server. This allows using trust relationships
        while keeping all RFC 2307 records in one place. This parameter is optional, the default is to access
        the AD server in the current domain to query LDAP records.

        `ldap_url` when using a stand-alone LDAP server, this parameter specifies the LDAP URL for accessing the LDAP server.

        `ldap_user_dn` defines the user DN to be used for authentication.

        `realm` defines the realm to use in the user and group names. This is only required when using cn_realm together with
         a stand-alone ldap server.
        """
        verrors = ValidationErrors()
        verrors.add_child(
            'idmap_rfc2307_create', await
            self.middleware.call('idmap._common_validate', 'rfc2307', data))
        if verrors:
            raise verrors

        data = await self.middleware.call('idmap.common_backend_compress',
                                          data)
        data["id"] = await self.middleware.call(
            "datastore.insert",
            self._config.datastore,
            data,
            {"prefix": self._config.datastore_prefix},
        )
        return await self._get_instance(data['id'])

    @accepts(Int('id', required=True),
             Patch("idmap_nss_create", "idmap_nss_update", ("attr", {
                 "update": True
             })))
    async def do_update(self, id, data):
        """
        Update an entry in the idmap backend table by id.
        """
        old = await self._get_instance(id)
        new = old.copy()
        new.update(data)
        verrors = ValidationErrors()
        verrors.add_child(
            'idmap_rfc2307_update', await
            self.middleware.call('idmap._common_validate', 'rfc2307', new))

        if verrors:
            raise verrors

        new = await self.middleware.call('idmap.common_backend_compress', new)
        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})
        return await self._get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete idmap to backend mapping by id
        """
        await self.middleware.call("datastore.delete", self._config.datastore,
                                   id)
예제 #23
0
class CloudSyncService(CRUDService):

    local_fs_lock_manager = FsLockManager()
    remote_fs_lock_manager = FsLockManager()

    class Config:
        datastore = "tasks.cloudsync"
        datastore_extend = "cloudsync._extend"

    @filterable
    async def query(self, filters=None, options=None):
        """
        Query all Cloud Sync Tasks with `query-filters` and `query-options`.
        """
        tasks_or_task = await super().query(filters, options)

        jobs = {}
        for j in await self.middleware.call(
                "core.get_jobs", [("method", "=", "cloudsync.sync")],
            {"order_by": ["id"]}):
            try:
                task_id = int(j["arguments"][0])
            except (IndexError, ValueError):
                continue

            if task_id in jobs and jobs[task_id]["state"] == "RUNNING":
                continue

            jobs[task_id] = j

        if isinstance(tasks_or_task, list):
            for task in tasks_or_task:
                task["job"] = jobs.get(task["id"])
        else:
            tasks_or_task["job"] = jobs.get(tasks_or_task["id"])

        return tasks_or_task

    @private
    async def _extend(self, cloud_sync):
        cloud_sync["credentials"] = cloud_sync.pop("credential")

        cloud_sync["encryption_password"] = await self.middleware.call(
            "pwenc.decrypt", cloud_sync["encryption_password"])
        cloud_sync["encryption_salt"] = await self.middleware.call(
            "pwenc.decrypt", cloud_sync["encryption_salt"])

        Cron.convert_db_format_to_schedule(cloud_sync)

        return cloud_sync

    @private
    async def _compress(self, cloud_sync):
        cloud_sync["credential"] = cloud_sync.pop("credentials")

        cloud_sync["encryption_password"] = await self.middleware.call(
            "pwenc.encrypt", cloud_sync["encryption_password"])
        cloud_sync["encryption_salt"] = await self.middleware.call(
            "pwenc.encrypt", cloud_sync["encryption_salt"])

        Cron.convert_schedule_to_db_format(cloud_sync)

        cloud_sync.pop('job', None)

        return cloud_sync

    @private
    async def _get_credentials(self, credentials_id):
        try:
            return await self.middleware.call("datastore.query",
                                              "system.cloudcredentials",
                                              [("id", "=", credentials_id)],
                                              {"get": True})
        except IndexError:
            return None

    @private
    async def _basic_validate(self, verrors, name, data):
        if data["encryption"]:
            if not data["encryption_password"]:
                verrors.add(
                    f"{name}.encryption_password",
                    "This field is required when encryption is enabled")

        credentials = await self._get_credentials(data["credentials"])
        if not credentials:
            verrors.add(f"{name}.credentials", "Invalid credentials")

        try:
            shlex.split(data["args"])
        except ValueError as e:
            verrors.add(f"{name}.args", f"Parse error: {e.args[0]}")

        if verrors:
            raise verrors

        provider = REMOTES[credentials["provider"]]

        schema = []

        if provider.buckets:
            schema.append(Str("bucket", required=True, empty=False))

        schema.append(Str("folder", required=True))

        schema.extend(provider.task_schema)

        schema.extend(self.common_task_schema(provider))

        attributes_verrors = validate_attributes(schema,
                                                 data,
                                                 additional_attrs=True)

        if not attributes_verrors:
            await provider.pre_save_task(data, credentials, verrors)

        verrors.add_child(f"{name}.attributes", attributes_verrors)

    @private
    async def _validate(self, verrors, name, data):
        await self._basic_validate(verrors, name, data)

        for i, (limit1,
                limit2) in enumerate(zip(data["bwlimit"],
                                         data["bwlimit"][1:])):
            if limit1["time"] >= limit2["time"]:
                verrors.add(
                    f"{name}.bwlimit.{i + 1}.time",
                    f"Invalid time order: {limit1['time']}, {limit2['time']}")

        if data["snapshot"]:
            if data["direction"] != "PUSH":
                verrors.add(f"{name}.snapshot",
                            "This option can only be enabled for PUSH tasks")

    @private
    async def _validate_folder(self, verrors, name, data):
        if data["direction"] == "PULL":
            folder = data["attributes"]["folder"].rstrip("/")
            if folder:
                folder_parent = os.path.normpath(os.path.join(folder, ".."))
                if folder_parent == ".":
                    folder_parent = ""
                folder_basename = os.path.basename(folder)
                ls = await self.list_directory(
                    dict(
                        credentials=data["credentials"],
                        encryption=data["encryption"],
                        filename_encryption=data["filename_encryption"],
                        encryption_password=data["encryption_password"],
                        encryption_salt=data["encryption_salt"],
                        attributes=dict(data["attributes"],
                                        folder=folder_parent),
                        args=data["args"],
                    ))
                for item in ls:
                    if item["Name"] == folder_basename:
                        if not item["IsDir"]:
                            verrors.add(f"{name}.attributes.folder",
                                        "This is not a directory")
                        break
                else:
                    verrors.add(f"{name}.attributes.folder",
                                "Directory does not exist")

        if data["direction"] == "PUSH":
            credentials = await self._get_credentials(data["credentials"])

            provider = REMOTES[credentials["provider"]]

            if provider.readonly:
                verrors.add(f"{name}.direction", "This remote is read-only")

    @accepts(
        Dict(
            "cloud_sync_create",
            Str("description", default=""),
            Str("direction", enum=["PUSH", "PULL"], required=True),
            Str("transfer_mode", enum=["SYNC", "COPY", "MOVE"], required=True),
            Str("path", required=True),
            Int("credentials", required=True),
            Bool("encryption", default=False),
            Bool("filename_encryption", default=False),
            Str("encryption_password", default=""),
            Str("encryption_salt", default=""),
            Cron("schedule", defaults={"minute": "00"}, required=True),
            Bool("follow_symlinks", default=False),
            Int("transfers",
                null=True,
                default=None,
                validators=[Range(min=1)]),
            List("bwlimit",
                 default=[],
                 items=[
                     Dict(
                         "cloud_sync_bwlimit", Str("time",
                                                   validators=[Time()]),
                         Int("bandwidth", validators=[Range(min=1)],
                             null=True))
                 ]),
            List("exclude", default=[], items=[Str("path", empty=False)]),
            Dict("attributes", additional_attrs=True, required=True),
            Bool("snapshot", default=False),
            Str("pre_script", default=""),
            Str("post_script", default=""),
            Str("args", default=""),
            Bool("enabled", default=True),
            register=True,
        ))
    async def do_create(self, cloud_sync):
        """
        Creates a new cloud_sync entry.

        .. examples(websocket)::

          Create a new cloud_sync using amazon s3 attributes, which is supposed to run every hour.

            :::javascript
            {
              "id": "6841f242-840a-11e6-a437-00e04d680384",
              "msg": "method",
              "method": "cloudsync.create",
              "params": [{
                "description": "s3 sync",
                "path": "/mnt/tank",
                "credentials": 1,
                "minute": "00",
                "hour": "*",
                "daymonth": "*",
                "month": "*",
                "attributes": {
                  "bucket": "mybucket",
                  "folder": ""
                },
                "enabled": true
              }]
            }
        """

        verrors = ValidationErrors()

        await self._validate(verrors, "cloud_sync", cloud_sync)

        if verrors:
            raise verrors

        await self._validate_folder(verrors, "cloud_sync", cloud_sync)

        if verrors:
            raise verrors

        cloud_sync = await self._compress(cloud_sync)

        cloud_sync["id"] = await self.middleware.call("datastore.insert",
                                                      "tasks.cloudsync",
                                                      cloud_sync)
        await self.middleware.call("service.restart", "cron")

        cloud_sync = await self._extend(cloud_sync)
        return cloud_sync

    @accepts(Int("id"),
             Patch("cloud_sync_create", "cloud_sync_update", ("attr", {
                 "update": True
             })))
    async def do_update(self, id, data):
        """
        Updates the cloud_sync entry `id` with `data`.
        """
        cloud_sync = await self._get_instance(id)

        # credentials is a foreign key for now
        if cloud_sync["credentials"]:
            cloud_sync["credentials"] = cloud_sync["credentials"]["id"]

        cloud_sync.update(data)

        verrors = ValidationErrors()

        await self._validate(verrors, "cloud_sync_update", cloud_sync)

        if verrors:
            raise verrors

        await self._validate_folder(verrors, "cloud_sync_update", cloud_sync)

        if verrors:
            raise verrors

        cloud_sync = await self._compress(cloud_sync)

        await self.middleware.call("datastore.update", "tasks.cloudsync", id,
                                   cloud_sync)
        await self.middleware.call("service.restart", "cron")

        cloud_sync = await self._extend(cloud_sync)
        return cloud_sync

    @accepts(Int("id"))
    async def do_delete(self, id):
        """
        Deletes cloud_sync entry `id`.
        """
        await self.middleware.call("datastore.delete", "tasks.cloudsync", id)
        await self.middleware.call("alert.oneshot_delete",
                                   "CloudSyncTaskFailed", id)
        await self.middleware.call("service.restart", "cron")

    @accepts(Int("credentials_id"))
    async def list_buckets(self, credentials_id):
        credentials = await self._get_credentials(credentials_id)
        if not credentials:
            raise CallError("Invalid credentials")

        provider = REMOTES[credentials["provider"]]

        if not provider.buckets:
            raise CallError("This provider does not use buckets")

        return await self.ls({"credentials": credentials}, "")

    @accepts(
        Dict(
            "cloud_sync_ls",
            Int("credentials", required=True),
            Bool("encryption", default=False),
            Bool("filename_encryption", default=False),
            Str("encryption_password", default=""),
            Str("encryption_salt", default=""),
            Dict("attributes", required=True, additional_attrs=True),
            Str("args", default=""),
        ))
    async def list_directory(self, cloud_sync):
        """
        List contents of a remote bucket / directory.

        If remote supports buckets, path is constructed by two keys "bucket"/"folder" in `attributes`.
        If remote does not support buckets, path is constructed using "folder" key only in `attributes`.
        "folder" is directory name and "bucket" is bucket name for remote.

        Path examples:

        S3 Service
        `bucketname/directory/name`

        Dropbox Service
        `directory/name`


        `credentials` is a valid id of a Cloud Sync Credential which will be used to connect to the provider.
        """
        verrors = ValidationErrors()

        await self._basic_validate(verrors, "cloud_sync", dict(cloud_sync))

        if verrors:
            raise verrors

        credentials = await self._get_credentials(cloud_sync["credentials"])

        path = get_remote_path(REMOTES[credentials["provider"]],
                               cloud_sync["attributes"])

        return await self.ls(dict(cloud_sync, credentials=credentials), path)

    @private
    async def ls(self, config, path):
        async with RcloneConfig(config) as config:
            proc = await run([
                "rclone", "--config", config.config_path, "lsjson",
                "remote:" + path
            ],
                             check=False,
                             encoding="utf8")
            if proc.returncode == 0:
                return json.loads(proc.stdout)
            else:
                raise CallError(proc.stderr)

    @item_method
    @accepts(Int("id"))
    @job(lock=lambda args: "cloud_sync:{}".format(args[-1]),
         lock_queue_size=1,
         logs=True)
    async def sync(self, job, id):
        """
        Run the cloud_sync job `id`, syncing the local data to remote.
        """

        cloud_sync = await self._get_instance(id)

        credentials = cloud_sync["credentials"]

        local_path = cloud_sync["path"]
        local_direction = FsLockDirection.READ if cloud_sync[
            "direction"] == "PUSH" else FsLockDirection.WRITE

        remote_path = get_remote_path(REMOTES[credentials["provider"]],
                                      cloud_sync["attributes"])
        remote_direction = FsLockDirection.READ if cloud_sync[
            "direction"] == "PULL" else FsLockDirection.WRITE

        directions = {
            FsLockDirection.READ: "reading",
            FsLockDirection.WRITE: "writing",
        }

        job.set_progress(
            0,
            f"Locking local path {local_path!r} for {directions[local_direction]}"
        )
        async with self.local_fs_lock_manager.lock(local_path,
                                                   local_direction):
            job.set_progress(
                0,
                f"Locking remote path {remote_path!r} for {directions[remote_direction]}"
            )
            async with self.remote_fs_lock_manager.lock(
                    f"{credentials['id']}/{remote_path}", remote_direction):
                job.set_progress(0, "Starting")
                try:
                    await rclone(self.middleware, job, cloud_sync)
                    await self.middleware.call("alert.oneshot_delete",
                                               "CloudSyncTaskFailed",
                                               cloud_sync["id"])
                except Exception:
                    await self.middleware.call(
                        "alert.oneshot_create", "CloudSyncTaskFailed", {
                            "id": cloud_sync["id"],
                            "name": cloud_sync["description"],
                        })
                    raise

    @item_method
    @accepts(Int("id"))
    async def abort(self, id):
        """
        Aborts cloud sync task.
        """

        cloud_sync = await self._get_instance(id)

        if cloud_sync["job"] is None:
            return False

        if cloud_sync["job"]["state"] not in ["WAITING", "RUNNING"]:
            return False

        await self.middleware.call("core.job_abort", cloud_sync["job"]["id"])
        return True

    @accepts()
    async def providers(self):
        """
        Returns a list of dictionaries of supported providers for Cloud Sync Tasks.

        `credentials_schema` is JSON schema for credentials attributes.

        `task_schema` is JSON schema for task attributes.

        `buckets` is a boolean value which is set to "true" if provider supports buckets.

        Example of a single provider:

        [
            {
                "name": "AMAZON_CLOUD_DRIVE",
                "title": "Amazon Cloud Drive",
                "credentials_schema": [
                    {
                        "property": "client_id",
                        "schema": {
                            "title": "Amazon Application Client ID",
                            "_required_": true,
                            "type": "string"
                        }
                    },
                    {
                        "property": "client_secret",
                        "schema": {
                            "title": "Application Key",
                            "_required_": true,
                            "type": "string"
                        }
                    }
                ],
                "credentials_oauth": null,
                "buckets": false,
                "bucket_title": "Bucket",
                "task_schema": []
            }
        ]
        """
        return sorted([{
            "name":
            provider.name,
            "title":
            provider.title,
            "credentials_schema": [{
                "property": field.name,
                "schema": field.to_json_schema()
            } for field in provider.credentials_schema],
            "credentials_oauth":
            f"{OAUTH_URL}/{provider.name.lower()}"
            if provider.credentials_oauth else None,
            "buckets":
            provider.buckets,
            "bucket_title":
            provider.bucket_title,
            "task_schema": [{
                "property": field.name,
                "schema": field.to_json_schema()
            } for field in provider.task_schema +
                            self.common_task_schema(provider)],
        } for provider in REMOTES.values()],
                      key=lambda provider: provider["title"].lower())

    def common_task_schema(self, provider):
        schema = []

        if provider.fast_list:
            schema.append(
                Bool("fast_list",
                     default=False,
                     title="Use --fast-list",
                     description=textwrap.dedent("""\
                Use fewer transactions in exchange for more RAM. This may also speed up or slow down your
                transfer. See [rclone documentation](https://rclone.org/docs/#fast-list) for more details.
            """).rstrip()))

        return schema
예제 #24
0
class IdmapRIDService(CRUDService):
    class Config:
        datastore = 'directoryservice.idmap_rid'
        datastore_prefix = 'idmap_rid_'
        datastore_extend = 'idmap.common_backend_extend'
        namespace = 'idmap.rid'

    @accepts(
        Dict('idmap_rid_create',
             Dict(
                 'domain',
                 Int('id'),
                 Str('idmap_domain_name'),
                 Str('idmap_domain_dns_domain_name'),
             ),
             Int('range_low',
                 required=True,
                 validators=[Range(min=1000, max=2147483647)]),
             Int('range_high',
                 required=True,
                 validators=[Range(min=1000, max=2147483647)]),
             register=True))
    async def do_create(self, data):
        """
        Create an entry in the idmap_rid backend table.
        """
        verrors = ValidationErrors()
        verrors.add_child(
            'idmap_rid_create', await
            self.middleware.call('idmap._common_validate', 'rid', data))
        if verrors:
            raise verrors

        data = await self.middleware.call('idmap.common_backend_compress',
                                          data)
        data["id"] = await self.middleware.call(
            "datastore.insert",
            self._config.datastore,
            data,
            {"prefix": self._config.datastore_prefix},
        )
        return await self._get_instance(data['id'])

    @accepts(Int('id', required=True),
             Patch("idmap_rid_create", "idmap_rid_update", ("attr", {
                 "update": True
             })))
    async def do_update(self, id, data):
        """
        Update an entry in the idmap backend table by id.
        """
        old = await self._get_instance(id)
        new = old.copy()
        new.update(data)
        verrors = ValidationErrors()
        verrors.add_child(
            'idmap_rid_update', await
            self.middleware.call('idmap._common_validate', 'rid', new))

        if verrors:
            raise verrors

        new = await self.middleware.call('idmap.common_backend_compress', new)
        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})
        return await self._get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete idmap to backend mapping by id
        """
        await self.middleware.call("datastore.delete", self._config.datastore,
                                   id)
예제 #25
0
class RsyncTaskService(CRUDService):

    class Config:
        datastore = 'tasks.rsync'
        datastore_prefix = 'rsync_'
        datastore_extend = 'rsynctask.rsync_task_extend'

    @private
    async def rsync_task_extend(self, data):
        data['extra'] = list(filter(None, re.split(r"\s+", data["extra"])))
        for field in ('mode', 'direction'):
            data[field] = data[field].upper()
        Cron.convert_db_format_to_schedule(data)
        return data

    @private
    async def validate_rsync_task(self, data, schema):
        verrors = ValidationErrors()

        # Windows users can have spaces in their usernames
        # http://www.freebsd.org/cgi/query-pr.cgi?pr=164808

        username = data.get('user')
        if ' ' in username:
            verrors.add(f'{schema}.user', 'User names cannot have spaces')
            raise verrors

        user = None
        with contextlib.suppress(KeyError):
            user = await self.middleware.call('dscache.get_uncached_user', username)

        if not user:
            verrors.add(f'{schema}.user', f'Provided user "{username}" does not exist')
            raise verrors

        remote_host = data.get('remotehost')
        if not remote_host:
            verrors.add(f'{schema}.remotehost', 'Please specify a remote host')

        if data.get('extra'):
            data['extra'] = ' '.join(data['extra'])
        else:
            data['extra'] = ''

        mode = data.get('mode')
        if not mode:
            verrors.add(f'{schema}.mode', 'This field is required')

        remote_module = data.get('remotemodule')
        if mode == 'MODULE' and not remote_module:
            verrors.add(f'{schema}.remotemodule', 'This field is required')

        if mode == 'SSH':
            remote_port = data.get('remoteport')
            if not remote_port:
                verrors.add(f'{schema}.remoteport', 'This field is required')

            remote_path = data.get('remotepath')
            if not remote_path:
                verrors.add(f'{schema}.remotepath', 'This field is required')

            search = os.path.join(user['pw_dir'], '.ssh', 'id_[edr]*')
            exclude_from_search = os.path.join(user['pw_dir'], '.ssh', 'id_[edr]*pub')
            key_files = set(glob.glob(search)) - set(glob.glob(exclude_from_search))
            if not key_files:
                verrors.add(
                    f'{schema}.user',
                    'In order to use rsync over SSH you need a user'
                    ' with a private key (DSA/ECDSA/RSA) set up in home dir.'
                )
            else:
                for file in glob.glob(search):
                    if '.pub' not in file:
                        # file holds a private key and it's permissions should be 600
                        if os.stat(file).st_mode & 0o077 != 0:
                            verrors.add(
                                f'{schema}.user',
                                f'Permissions {oct(os.stat(file).st_mode & 0o777)} for {file} are too open. Please '
                                f'correct them by running chmod 600 {file}'
                            )

            if(
                data.get('validate_rpath') and
                remote_path and
                remote_host and
                remote_port
            ):
                if '@' in remote_host:
                    remote_username, remote_host = remote_host.rsplit('@', 1)
                else:
                    remote_username = username

                try:
                    with (await asyncio.wait_for(asyncssh.connect(
                            remote_host,
                            port=remote_port,
                            username=remote_username,
                            client_keys=key_files,
                            known_hosts=None
                    ), timeout=5)) as conn:

                        await conn.run(f'test -d {shlex.quote(remote_path)}', check=True)

                except asyncio.TimeoutError:

                    verrors.add(
                        f'{schema}.remotehost',
                        'SSH timeout occurred. Remote path cannot be validated.'
                    )

                except OSError as e:

                    if e.errno == 113:
                        verrors.add(
                            f'{schema}.remotehost',
                            f'Connection to the remote host {remote_host} on port {remote_port} failed.'
                        )
                    else:
                        verrors.add(
                            f'{schema}.remotehost',
                            e.__str__()
                        )

                except asyncssh.DisconnectError as e:

                    verrors.add(
                        f'{schema}.remotehost',
                        f'Disconnect Error[ error code {e.code} ] was generated when trying to '
                        f'communicate with remote host {remote_host} and remote user {remote_username}.'
                    )

                except asyncssh.ProcessError as e:

                    if e.code == '1':
                        verrors.add(
                            f'{schema}.remotepath',
                            'The Remote Path you specified does not exist or is not a directory.'
                            'Either create one yourself on the remote machine or uncheck the '
                            'validate_rpath field'
                        )
                    else:
                        verrors.add(
                            f'{schema}.remotepath',
                            f'Connection to Remote Host was successful but failed to verify '
                            f'Remote Path. {e.__str__()}'
                        )

                except asyncssh.Error as e:

                    if e.__class__.__name__ in e.__str__():
                        exception_reason = e.__str__()
                    else:
                        exception_reason = e.__class__.__name__ + ' ' + e.__str__()
                    verrors.add(
                        f'{schema}.remotepath',
                        f'Remote Path could not be validated. An exception was raised. {exception_reason}'
                    )
            elif data.get('validate_rpath'):
                verrors.add(
                    f'{schema}.remotepath',
                    'Remote path could not be validated because of missing fields'
                )

        data.pop('validate_rpath', None)

        # Keeping compatibility with legacy UI
        for field in ('mode', 'direction'):
            data[field] = data[field].lower()

        return verrors, data

    @accepts(Dict(
        'rsync_task_create',
        Str('path', required=True, max_length=RSYNC_PATH_LIMIT),
        Str('user', required=True),
        Str('remotehost'),
        Int('remoteport'),
        Str('mode', enum=['MODULE', 'SSH'], default='MODULE'),
        Str('remotemodule'),
        Str('remotepath'),
        Bool('validate_rpath'),
        Str('direction', enum=['PULL', 'PUSH'], default='PUSH'),
        Str('desc'),
        Cron(
            'schedule',
            defaults={'minute': '00'},
        ),
        Bool('recursive'),
        Bool('times'),
        Bool('compress'),
        Bool('archive'),
        Bool('delete'),
        Bool('quiet'),
        Bool('preserveperm'),
        Bool('preserveattr'),
        Bool('delayupdates'),
        List('extra', items=[Str('extra')]),
        Bool('enabled'),
        register=True,
    ))
    async def do_create(self, data):
        """
        Create a Rsync Task.

        See the comment in Rsyncmod about `path` length limits.

        `remotehost` is ip address or hostname of the remote system. If username differs on the remote host,
        "username@remote_host" format should be used.

        `mode` represents different operating mechanisms for Rsync i.e Rsync Module mode / Rsync SSH mode.

        `remotemodule` is the name of remote module, this attribute should be specified when `mode` is set to MODULE.

        `remotepath` specifies the path on the remote system.

        `validate_rpath` is a boolean which when sets validates the existence of the remote path.

        `direction` specifies if data should be PULLED or PUSHED from the remote system.

        `compress` when set reduces the size of the data which is to be transmitted.

        `archive` when set makes rsync run recursively, preserving symlinks, permissions, modification times, group,
        and special files.

        `delete` when set deletes files in the destination directory which do not exist in the source directory.

        `preserveperm` when set preserves original file permissions.

        .. examples(websocket)::

          Create a Rsync Task which pulls data from a remote system every 5 minutes.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "rsynctask.create",
                "params": [{
                    "enabled": true,
                    "schedule": {
                        "minute": "5",
                        "hour": "*",
                        "dom": "*",
                        "month": "*",
                        "dow": "*"
                    },
                    "desc": "Test rsync task",
                    "user": "******",
                    "mode": "MODULE",
                    "remotehost": "[email protected]",
                    "compress": true,
                    "archive": true,
                    "direction": "PULL",
                    "path": "/mnt/vol1/rsync_dataset",
                    "remotemodule": "remote_module1"
                }]
            }
        """
        verrors, data = await self.validate_rsync_task(data, 'rsync_task_create')
        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(data)

        data['id'] = await self.middleware.call(
            'datastore.insert',
            self._config.datastore,
            data,
            {'prefix': self._config.datastore_prefix}
        )
        await self.middleware.call('service.restart', 'cron')

        return await self._get_instance(data['id'])

    @accepts(
        Int('id', validators=[Range(min=1)]),
        Patch('rsync_task_create', 'rsync_task_update', ('attr', {'update': True}))
    )
    async def do_update(self, id, data):
        """
        Update Rsync Task of `id`.
        """
        old = await self.query(filters=[('id', '=', id)], options={'get': True})

        new = old.copy()
        new.update(data)

        verrors, data = await self.validate_rsync_task(new, 'rsync_task_update')
        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(new)

        await self.middleware.call(
            'datastore.update',
            self._config.datastore,
            id,
            new,
            {'prefix': self._config.datastore_prefix}
        )
        await self.middleware.call('service.restart', 'cron')

        return await self.query(filters=[('id', '=', id)], options={'get': True})

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete Rsync Task of `id`.
        """
        res = await self.middleware.call('datastore.delete', self._config.datastore, id)
        await self.middleware.call('service.restart', 'cron')
        return res

    @private
    async def commandline(self, id):
        """
        Helper method to generate the rsync command avoiding code duplication.
        """
        rsync = await self._get_instance(id)
        path = shlex.quote(rsync['path'])

        line = [
            '/usr/bin/lockf', '-s', '-t', '0', '-k', path, '/usr/local/bin/rsync'
        ]
        for name, flag in (
            ('archive', '-a'),
            ('compress', '-z'),
            ('delayupdates', '--delay-updates'),
            ('delete', '--delete-delay'),
            ('preserveattr', '-X'),
            ('preserveperm', '-p'),
            ('recursive', '-r'),
            ('times', '-t'),
        ):
            if rsync[name]:
                line.append(flag)
        if rsync['extra']:
            line.append(' '.join(rsync['extra']))

        # Do not use username if one is specified in host field
        # See #5096 for more details
        if '@' in rsync['remotehost']:
            remote = rsync['remotehost']
        else:
            remote = f'"{rsync["user"]}"@{rsync["remotehost"]}'

        if rsync['mode'] == 'MODULE':
            module_args = [path, f'{remote}::"{rsync["remotemodule"]}"']
            if rsync['direction'] != 'PUSH':
                module_args.reverse()
            line += module_args
        else:
            line += [
                '-e',
                f'"ssh -p {rsync["remoteport"]} -o BatchMode=yes -o StrictHostKeyChecking=yes"'
            ]
            path_args = [path, f'{remote}:"{shlex.quote(rsync["remotepath"])}"']
            if rsync['direction'] != 'PUSH':
                path_args.reverse()
            line += path_args

        if rsync['quiet']:
            line += ['>', '/dev/null', '2>&1']

        return ' '.join(line)

    @item_method
    @accepts(Int('id'))
    @job(lock=lambda args: args[-1], logs=True)
    def run(self, job, id):
        """
        Job to run rsync task of `id`.

        Output is saved to job log excerpt as well as syslog.
        """
        rsync = self.middleware.call_sync('rsynctask._get_instance', id)
        commandline = self.middleware.call_sync('rsynctask.commandline', id)

        cp = run_command_with_user_context(
            commandline, rsync['user'], lambda v: job.logs_fd.write(v)
        )

        for klass in ('RsyncSuccess', 'RsyncFailed') if not rsync['quiet'] else ():
            self.middleware.call_sync('alert.oneshot_delete', klass, rsync['id'])

        if cp.returncode != 0:
            if not rsync['quiet']:
                self.middleware.call_sync('alert.oneshot_create', 'RsyncFailed', rsync)

            raise CallError(
                f'rsync command returned {cp.returncode}. Check logs for further information.'
            )
        elif not rsync['quiet']:
            self.middleware.call_sync('alert.oneshot_create', 'RsyncSuccess', rsync)
예제 #26
0
파일: alert.py 프로젝트: MrYHM/freenas
class AlertServiceService(CRUDService):
    class Config:
        datastore = "system.alertservice"
        datastore_extend = "alertservice._extend"
        datastore_order_by = ["name"]

    @accepts()
    async def list_types(self):
        return [{
            "name": name,
            "title": factory.title,
        } for name, factory in sorted(ALERT_SERVICES_FACTORIES.items(),
                                      key=lambda i: i[1].title.lower())]

    @private
    async def _extend(self, service):
        try:
            service["type__title"] = ALERT_SERVICES_FACTORIES[
                service["type"]].title
        except KeyError:
            service["type__title"] = "<Unknown>"

        return service

    @private
    async def _compress(self, service):
        return service

    @private
    async def _validate(self, service, schema_name):
        verrors = ValidationErrors()

        factory = ALERT_SERVICES_FACTORIES.get(service["type"])
        if factory is None:
            verrors.add(f"{schema_name}.type", "This field has invalid value")
            raise verrors

        try:
            factory.validate(service["attributes"])
        except ValidationErrors as e:
            verrors.add_child(f"{schema_name}.attributes", e)

        validate_settings(verrors, f"{schema_name}.settings",
                          service["settings"])

        if verrors:
            raise verrors

    @accepts(
        Dict(
            "alert_service_create",
            Str("name"),
            Str("type"),
            Dict("attributes", additional_attrs=True),
            Bool("enabled"),
            Dict("settings", additional_attrs=True),
            register=True,
        ))
    async def do_create(self, data):
        await self._validate(data, "alert_service_create")

        data["id"] = await self.middleware.call("datastore.insert",
                                                self._config.datastore, data)

        await self._extend(data)

        return data

    @accepts(Int("id"),
             Patch(
                 "alert_service_create",
                 "alert_service_update",
                 ("attr", {
                     "update": True
                 }),
             ))
    async def do_update(self, id, data):
        old = await self.middleware.call(
            "datastore.query", self._config.datastore, [("id", "=", id)], {
                "extend": self._config.datastore_extend,
                "get": True
            })

        new = old.copy()
        new.update(data)

        await self._validate(data, "alert_service_update")

        await self._compress(data)

        await self.middleware.call("datastore.update", self._config.datastore,
                                   id, data)

        await self._extend(new)

        return new

    @accepts(Int("id"))
    async def do_delete(self, id):
        return await self.middleware.call("datastore.delete",
                                          self._config.datastore, id)

    @accepts(
        Patch(
            "alert_service_create",
            "alert_service_test",
            ("attr", {
                "update": True
            }),
        ))
    async def test(self, data):
        await self._validate(data, "alert_service_test")

        factory = ALERT_SERVICES_FACTORIES.get(data["type"])
        if factory is None:
            self.logger.error("Alert service %r does not exist", data["type"])
            return False

        try:
            alert_service = factory(self.middleware, data["attributes"])
        except Exception:
            self.logger.error(
                "Error creating alert service %r with parameters=%r",
                data["type"],
                data["attributes"],
                exc_info=True)
            return False

        test_alert = Alert(
            title="Test alert",
            node="A",
            datetime=datetime.utcnow(),
            level=AlertLevel.INFO,
        )

        try:
            await alert_service.send([test_alert], [], [test_alert])
        except Exception:
            self.logger.error("Error in alert service %r",
                              data["type"],
                              exc_info=True)
            return False

        return True
예제 #27
0
class UserService(CRUDService):
    class Config:
        datastore = 'account.bsdusers'
        datastore_extend = 'user.user_extend'
        datastore_prefix = 'bsdusr_'

    @private
    async def user_extend(self, user):

        # Get group membership
        user['groups'] = [
            gm['group']['id'] for gm in await self.middleware.call(
                'datastore.query', 'account.bsdgroupmembership', [(
                    'user', '=', user['id'])], {'prefix': 'bsdgrpmember_'})
        ]

        # Get authorized keys
        keysfile = f'{user["home"]}/.ssh/authorized_keys'
        user['sshpubkey'] = None
        if os.path.exists(keysfile):
            try:
                with open(keysfile, 'r') as f:
                    user['sshpubkey'] = f.read()
            except Exception:
                pass
        return user

    @accepts(
        Dict(
            'user_create',
            Int('uid'),
            Str('username', required=True),
            Int('group'),
            Bool('group_create', default=False),
            Str('home', default='/nonexistent'),
            Str('home_mode', default='755'),
            Str('shell', default='/bin/csh'),
            Str('full_name', required=True),
            Str('email'),
            Str('password'),
            Bool('password_disabled', default=False),
            Bool('locked', default=False),
            Bool('microsoft_account', default=False),
            Bool('sudo', default=False),
            Str('sshpubkey'),
            List('groups'),
            Dict('attributes', additional_attrs=True),
            register=True,
        ))
    async def do_create(self, data):

        verrors = ValidationErrors()

        if (not data.get('group') and not data.get('group_create')) or (
                data.get('group') is not None and data.get('group_create')):
            verrors.add(
                'group', f'Enter either a group name or create a new group to '
                'continue.', errno.EINVAL)

        await self.__common_validation(verrors, data)

        if data.get('sshpubkey') and not data['home'].startswith('/mnt'):
            verrors.add(
                'sshpubkey',
                'The home directory is not writable. Leave this field blank.')

        if verrors:
            raise verrors

        groups = data.pop('groups') or []
        create = data.pop('group_create')

        if create:
            group = await self.middleware.call(
                'group.query', [('group', '=', data['username'])])
            if group:
                group = group[0]
            else:
                group = await self.middleware.call('group.create',
                                                   {'name': data['username']})
                group = (await self.middleware.call('group.query',
                                                    [('id', '=', group)]))[0]
            data['group'] = group['id']
        else:
            group = await self.middleware.call('group.query',
                                               [('id', '=', data['group'])])
            if not group:
                raise CallError(f'Group {data["group"]} not found')
            group = group[0]

        # Is this a new directory or not? Let's not nuke existing directories,
        # e.g. /, /root, /mnt/tank/my-dataset, etc ;).
        new_homedir = False
        home_mode = data.pop('home_mode')
        if data['home'] and data['home'] != '/nonexistent':
            try:
                os.makedirs(data['home'], mode=int(home_mode, 8))
                os.chown(data['home'], data['uid'], group['gid'])
            except FileExistsError:
                if not os.path.isdir(data['home']):
                    raise CallError(
                        'Path for home directory already '
                        'exists and is not a directory', errno.EEXIST)

                # If it exists, ensure the user is owner
                os.chown(data['home'], data['uid'], group['gid'])
            except OSError as oe:
                raise CallError('Failed to create the home directory '
                                f'({data["home"]}) for user: {oe}')
            else:
                new_homedir = True
            if os.stat(data['home']).st_dev == os.stat('/mnt').st_dev:
                raise CallError(
                    f'The path for the home directory "({data["home"]})" '
                    'must include a volume or dataset.')

        if not data.get('uid'):
            data['uid'] = await self.get_next_uid()

        pk = None  # Make sure pk exists to rollback in case of an error
        try:
            password = await self.__set_password(data)
            sshpubkey = data.pop('sshpubkey',
                                 None)  # datastore does not have sshpubkey

            pk = await self.middleware.call('datastore.insert',
                                            'account.bsdusers', data,
                                            {'prefix': 'bsdusr_'})

            await self.__set_groups(pk, groups)

        except Exception:
            if pk is not None:
                await self.middleware.call('datastore.delete',
                                           'account.bsdusers', pk)
            if new_homedir:
                # Be as atomic as possible when creating the user if
                # commands failed to execute cleanly.
                shutil.rmtree(data['home'])
            raise

        await self.middleware.call('service.reload', 'user')

        await self.__set_smbpasswd(data['username'], password)

        if os.path.exists(data['home']):
            for f in os.listdir(SKEL_PATH):
                if f.startswith('dot'):
                    dest_file = os.path.join(data['home'], f[3:])
                else:
                    dest_file = os.path.join(data['home'], f)
                if not os.path.exists(dest_file):
                    shutil.copyfile(os.path.join(SKEL_PATH, f), dest_file)
                    os.chown(dest_file, data['uid'], group['gid'])

            data['sshpubkey'] = sshpubkey
            try:
                await self.__update_sshpubkey(data['home'], data,
                                              group['group'])
            except PermissionError as e:
                self.logger.warn('Failed to update authorized keys',
                                 exc_info=True)
                raise CallError(f'Failed to update authorized keys: {e}')

        return pk

    @accepts(
        Int('id'),
        Patch(
            'user_create',
            'user_update',
            ('attr', {
                'update': True
            }),
            ('rm', {
                'name': 'group_create'
            }),
        ),
    )
    async def do_update(self, pk, data):

        user = await self._get_instance(pk)

        verrors = ValidationErrors()

        if 'group' in data:
            group = await self.middleware.call('datastore.query',
                                               'account.bsdgroups',
                                               [('id', '=', data['group'])])
            if not group:
                verrors.add('group', f'Group {data["group"]} not found',
                            errno.ENOENT)
            group = group[0]
        else:
            group = user['group']
            user['group'] = group['id']

        await self.__common_validation(verrors, data, pk=pk)

        home = data.get('home') or user['home']
        # root user (uid 0) is an exception to the rule
        if data.get('sshpubkey'
                    ) and not home.startswith('/mnt') and user['uid'] != 0:
            verrors.add('sshpubkey',
                        'Home directory is not writable, leave this blank"')

        # Do not allow attributes to be changed for builtin user
        if user['builtin']:
            for i in ('group', 'home', 'home_mode', 'uid', 'username'):
                if i in data:
                    verrors.add(i, 'This attribute cannot be changed')

        if verrors:
            raise verrors

        # Copy the home directory if it changed
        if ('home' in data
                and data['home'] not in (user['home'], '/nonexistent')
                and not data['home'].startswith(f'{user["home"]}/')):
            home_copy = True
            home_old = user['home']
        else:
            home_copy = False

        # After this point user dict has values from data
        user.update(data)

        if home_copy and not os.path.isdir(user['home']):
            try:
                os.makedirs(user['home'])
                os.chown(user['home'], user['uid'], group['bsdgrp_gid'])
            except OSError:
                self.logger.warn('Failed to chown homedir', exc_info=True)
            if not os.path.isdir(user['home']):
                raise CallError(f'{user["home"]} is not a directory')

        home_mode = user.pop('home_mode', None)
        if user['builtin']:
            home_mode = None

        def set_home_mode():
            if home_mode is not None:
                try:
                    os.chmod(user['home'], int(home_mode, 8))
                except OSError:
                    self.logger.warn('Failed to set homedir mode',
                                     exc_info=True)

        try:
            await self.__update_sshpubkey(
                home_old if home_copy else user['home'],
                user,
                group['bsdgrp_group'],
            )
        except PermissionError as e:
            self.logger.warn('Failed to update authorized keys', exc_info=True)
            raise CallError(f'Failed to update authorized keys: {e}')

        if home_copy:

            def do_home_copy():
                try:
                    subprocess.run(
                        f"/usr/bin/su - {user['username']} -c '/bin/cp -a {home_old}/ {user['home']}/'",
                        shell=True,
                        check=True)
                except subprocess.CalledProcessError as e:
                    self.logger.warn(f"Failed to copy homedir: {e}")
                set_home_mode()

            asyncio.ensure_future(
                self.middleware.run_in_io_thread(do_home_copy))
        else:
            set_home_mode()

        user.pop('sshpubkey', None)
        password = await self.__set_password(user)

        if 'groups' in user:
            groups = user.pop('groups')
            await self.__set_groups(pk, groups)

        await self.middleware.call('datastore.update', 'account.bsdusers', pk,
                                   user, {'prefix': 'bsdusr_'})

        await self.middleware.call('service.reload', 'user')

        await self.__set_smbpasswd(user['username'], password)

        return pk

    @accepts(Int('id'), Dict('options', Bool('delete_group', default=True)))
    async def do_delete(self, pk, options=None):

        user = await self._get_instance(pk)

        if user['builtin']:
            raise CallError('Cannot delete a built-in user', errno.EINVAL)

        if options['delete_group'] and not user['group']['bsdgrp_builtin']:
            count = await self.middleware.call(
                'datastore.query', 'account.bsdgroupmembership',
                [('group', '=', user['group']['id'])], {
                    'prefix': 'bsdgrpmember_',
                    'count': True
                })
            count2 = await self.middleware.call(
                'datastore.query', 'account.bsdusers',
                [('group', '=', user['group']['id']), ('id', '!=', pk)], {
                    'prefix': 'bsdusr_',
                    'count': True
                })
            if count == 0 and count2 == 0:
                try:
                    await self.middleware.call('group.delete',
                                               user['group']['id'])
                except Exception:
                    self.logger.warn(
                        f'Failed to delete primary group of {user["username"]}',
                        exc_info=True)

        await run('smbpasswd', '-x', user['username'], check=False)

        if await self.middleware.call('notifier.common', 'system',
                                      'domaincontroller_enabled'):
            await self.middleware.call('notifier.samba4', 'user_delete',
                                       [user['username']])

        # TODO: add a hook in CIFS service
        cifs = await self.middleware.call('datastore.query', 'services.cifs',
                                          [], {'prefix': 'cifs_srv_'})
        if cifs:
            cifs = cifs[0]
            if cifs['guest'] == user['username']:
                await self.middleware.call('datastore.update', 'services.cifs',
                                           cifs['id'], {'guest': 'nobody'},
                                           {'prefix': 'cifs_srv_'})

        await self.middleware.call('datastore.delete', 'account.bsdusers', pk)
        await self.middleware.call('service.reload', 'user')

        return pk

    @item_method
    @accepts(
        Int('id'),
        Str('key'),
        Any('value'),
    )
    async def set_attribute(self, pk, key, value):
        """
        Set user general purpose `attributes` dictionary `key` to `value`.

        e.g. Setting key="foo" value="var" will result in {"attributes": {"foo": "bar"}}
        """
        user = await self._get_instance(pk)
        user.pop('group')

        user['attributes'][key] = value
        await self.middleware.call('datastore.update', 'account.bsdusers', pk,
                                   user, {'prefix': 'bsdusr_'})

        return True

    @item_method
    @accepts(
        Int('id'),
        Str('key'),
    )
    async def pop_attribute(self, pk, key):
        """
        Remove user general purpose `attributes` dictionary `key`.
        """
        user = await self._get_instance(pk)
        user.pop('group')

        if key in user['attributes']:
            user['attributes'].pop(key)
            await self.middleware.call('datastore.update', 'account.bsdusers',
                                       pk, user, {'prefix': 'bsdusr_'})
            return True
        else:
            return False

    @accepts()
    async def get_next_uid(self):
        """
        Get the next available/free uid.
        """
        last_uid = 999
        for i in await self.middleware.call('datastore.query',
                                            'account.bsdusers',
                                            [('builtin', '=', False)], {
                                                'order_by': ['uid'],
                                                'prefix': 'bsdusr_'
                                            }):
            # If the difference between the last uid and the current one is
            # bigger than 1, it means we have a gap and can use it.
            if i['uid'] - last_uid > 1:
                return last_uid + 1
            last_uid = i['uid']
        return last_uid + 1

    async def __common_validation(self, verrors, data, pk=None):

        exclude_filter = [('id', '!=', pk)] if pk else []

        if 'username' in data:
            pw_checkname(verrors, 'username', data['username'])

            if await self.middleware.call(
                    'datastore.query', 'account.bsdusers',
                [('username', '=', data['username'])] + exclude_filter,
                {'prefix': 'bsdusr_'}):
                verrors.add(
                    'username',
                    f'The username "{data["username"]}" already exists.',
                    errno.EEXIST)

        password = data.get('password')
        if password and '?' in password:
            # See bug #4098
            verrors.add(
                'password',
                'An SMB issue prevents creating passwords containing a '
                'question mark (?).', errno.EINVAL)
        elif not pk and not password and not data.get('password_disabled'):
            verrors.add('password', 'Password is required')
        elif data.get('password_disabled') and password:
            verrors.add(
                'password_disabled',
                'Leave "Password" blank when "Disable password login" '
                'is checked.')

        if 'home' in data:
            if ':' in data['home']:
                verrors.add('home',
                            '"Home Directory" cannot contain colons (:).')
            if not data['home'].startswith(
                    '/mnt/') and data['home'] != '/nonexistent':
                verrors.add(
                    'home', '"Home Directory" must begin with /mnt/ or set to '
                    '/nonexistent.')

        if 'groups' in data:
            groups = data.get('groups') or []
            if groups and len(groups) > 64:
                verrors.add(
                    'groups',
                    'A user cannot belong to more than 64 auxiliary groups.')

        if 'full_name' in data and ':' in data['full_name']:
            verrors.add('full_name',
                        'The ":" character is not allowed in a "Full Name".')

    async def __set_password(self, data):
        if 'password' not in data:
            return
        password = data.pop('password')
        if password:
            data['unixhash'] = crypted_password(password)
            # See http://samba.org.ru/samba/docs/man/manpages/smbpasswd.5.html
            data[
                'smbhash'] = f'{data["username"]}:{data["uid"]}:{"X" * 32}:{nt_password(password)}:[U          ]:LCT-{int(time.time()):X}:'
        else:
            data['unixhash'] = '*'
            data['smbhash'] = '*'
        return password

    async def __set_smbpasswd(self, username, password):
        """
        Currently the way we set samba passwords is using smbpasswd
        and that can only happen after the user exists in master.passwd.
        That is the reason we have two methods/steps to set password.
        """
        if not password:
            return
        proc = await Popen(['smbpasswd', '-D', '0', '-s', '-a', username],
                           stdout=subprocess.PIPE,
                           stderr=subprocess.PIPE,
                           stdin=subprocess.PIPE)
        await proc.communicate(input=f'{password}\n{password}\n'.encode())

    async def __set_groups(self, pk, groups):

        groups = set(groups)
        existing_ids = set()
        for gm in await self.middleware.call('datastore.query',
                                             'account.bsdgroupmembership',
                                             [('user', '=', pk)],
                                             {'prefix': 'bsdgrpmember_'}):
            if gm['id'] not in groups:
                await self.middleware.call('datastore.delete',
                                           'account.bsdgroupmembership',
                                           gm['id'])
            else:
                existing_ids.add(gm['id'])

        for _id in groups - existing_ids:
            group = await self.middleware.call('datastore.query',
                                               'account.bsdgroups',
                                               [('id', '=', _id)],
                                               {'prefix': 'bsdgrp_'})
            if not group:
                raise CallError(f'Group {_id} not found', errno.ENOENT)
            await self.middleware.call('datastore.insert',
                                       'account.bsdgroupmembership', {
                                           'group': _id,
                                           'user': pk
                                       }, {'prefix': 'bsdgrpmember_'})

    async def __update_sshpubkey(self, homedir, user, group):
        if 'sshpubkey' not in user:
            return
        if not os.path.isdir(homedir):
            return

        sshpath = f'hotexamples_com/.ssh'
        keysfile = f'{sshpath}/authorized_keys'

        pubkey = user.get('sshpubkey') or ''
        pubkey = pubkey.strip()
        if pubkey == '':
            try:
                os.unlink(keysfile)
            except OSError:
                pass
            return

        oldpubkey = ''
        try:
            with open(keysfile, 'r') as f:
                oldpubkey = f.read().strip()
        except Exception:
            pass

        if pubkey == oldpubkey:
            return

        if not os.path.isdir(sshpath):
            os.mkdir(sshpath, mode=0o700)
        if not os.path.isdir(sshpath):
            raise CallError(f'{sshpath} is not a directory')
        with open(keysfile, 'w') as f:
            f.write(pubkey)
            f.write('\n')
        os.chmod(keysfile, 0o600)
        await run('/usr/sbin/chown',
                  '-R',
                  f'{user["username"]}:{group}',
                  sshpath,
                  check=False)
예제 #28
0
class UserService(CRUDService):
    class Config:
        datastore = 'account.bsdusers'
        datastore_extend = 'user.user_extend'
        datastore_prefix = 'bsdusr_'

    @private
    async def user_extend(self, user):

        # Normalize email, empty is really null
        if user['email'] == '':
            user['email'] = None

        # Get group membership
        user['groups'] = [
            gm['group']['id'] for gm in await self.middleware.call(
                'datastore.query', 'account.bsdgroupmembership', [(
                    'user', '=', user['id'])], {'prefix': 'bsdgrpmember_'})
        ]

        # Get authorized keys
        keysfile = f'{user["home"]}/.ssh/authorized_keys'
        user['sshpubkey'] = None
        if os.path.exists(keysfile):
            try:
                with open(keysfile, 'r') as f:
                    user['sshpubkey'] = f.read()
            except Exception:
                pass
        return user

    @private
    async def user_compress(self, user):
        if 'local' in user:
            user.pop('local')
        return user

    @filterable
    async def query(self, filters=None, options=None):
        """
        Query users with `query-filters` and `query-options`. As a performance optimization, only local users
        will be queried by default.

        Users from directory services such as NIS, LDAP, or Active Directory will be included in query results
        if the option `{'extra': {'search_dscache': True}}` is specified.
        """
        if not filters:
            filters = []
        filters += self._config.datastore_filters or []

        options = options or {}
        options['extend'] = self._config.datastore_extend
        options['extend_context'] = self._config.datastore_extend_context
        options['prefix'] = self._config.datastore_prefix

        datastore_options = options.copy()
        datastore_options.pop('count', None)
        datastore_options.pop('get', None)

        extra = options.get('extra', {})
        dssearch = extra.pop('search_dscache', False)

        if dssearch:
            return await self.middleware.call('dscache.query', 'USERS',
                                              filters, options)

        result = await self.middleware.call('datastore.query',
                                            self._config.datastore, [],
                                            datastore_options)
        for entry in result:
            entry.update({'local': True})
        return await self.middleware.run_in_thread(filter_list, result,
                                                   filters, options)

    @accepts(
        Dict(
            'user_create',
            Int('uid'),
            Str('username', required=True, max_length=16),
            Int('group'),
            Bool('group_create', default=False),
            Str('home', default='/nonexistent'),
            Str('home_mode', default='755'),
            Str('shell', default='/bin/csh'),
            Str('full_name', required=True),
            Str('email', validators=[Email()], null=True, default=None),
            Str('password', private=True),
            Bool('password_disabled', default=False),
            Bool('locked', default=False),
            Bool('microsoft_account', default=False),
            Bool('sudo', default=False),
            Str('sshpubkey', null=True, max_length=None),
            List('groups', default=[]),
            Dict('attributes', additional_attrs=True),
            register=True,
        ))
    async def do_create(self, data):
        """
        Create a new user.

        If `uid` is not provided it is automatically filled with the next one available.

        `group` is required if `group_create` is false.

        `password` is required if `password_disabled` is false.

        Available choices for `shell` can be retrieved with `user.shell_choices`.

        `attributes` is a general-purpose object for storing arbitrary user information.
        """
        verrors = ValidationErrors()

        if (not data.get('group') and not data.get('group_create')) or (
                data.get('group') is not None and data.get('group_create')):
            verrors.add(
                'user_create.group',
                f'Enter either a group name or create a new group to '
                'continue.', errno.EINVAL)

        await self.__common_validation(verrors, data, 'user_create')

        if data.get('sshpubkey') and not data['home'].startswith('/mnt'):
            verrors.add(
                'user_create.sshpubkey',
                'The home directory is not writable. Leave this field blank.')

        verrors.check()

        groups = data.pop('groups')
        create = data.pop('group_create')

        if create:
            group = await self.middleware.call(
                'group.query', [('group', '=', data['username'])])
            if group:
                group = group[0]
            else:
                group = await self.middleware.call('group.create',
                                                   {'name': data['username']})
                group = (await self.middleware.call('group.query',
                                                    [('id', '=', group)]))[0]
            data['group'] = group['id']
        else:
            group = await self.middleware.call('group.query',
                                               [('id', '=', data['group'])])
            if not group:
                raise CallError(f'Group {data["group"]} not found')
            group = group[0]

        # Is this a new directory or not? Let's not nuke existing directories,
        # e.g. /, /root, /mnt/tank/my-dataset, etc ;).
        new_homedir = False
        home_mode = data.pop('home_mode')
        if data['home'] and data['home'] != '/nonexistent':
            try:
                try:
                    os.makedirs(data['home'], mode=int(home_mode, 8))
                    new_homedir = True
                    await self.middleware.call(
                        'filesystem.setperm', {
                            'path': data['home'],
                            'mode': home_mode,
                            'uid': data['uid'],
                            'gid': group['gid'],
                            'options': {
                                'stripacl': True
                            }
                        })
                except FileExistsError:
                    if not os.path.isdir(data['home']):
                        raise CallError(
                            'Path for home directory already '
                            'exists and is not a directory', errno.EEXIST)

                    # If it exists, ensure the user is owner.
                    await self.middleware.call(
                        'filesystem.chown', {
                            'path': data['home'],
                            'uid': data['uid'],
                            'gid': group['gid'],
                        })
                except OSError as oe:
                    raise CallError('Failed to create the home directory '
                                    f'({data["home"]}) for user: {oe}')
                if os.stat(data['home']).st_dev == os.stat('/mnt').st_dev:
                    raise CallError(
                        f'The path for the home directory "({data["home"]})" '
                        'must include a volume or dataset.')
            except Exception:
                if new_homedir:
                    shutil.rmtree(data['home'])
                raise

        if not data.get('uid'):
            data['uid'] = await self.get_next_uid()

        pk = None  # Make sure pk exists to rollback in case of an error
        data = await self.user_compress(data)
        try:
            await self.__set_password(data)
            sshpubkey = data.pop('sshpubkey',
                                 None)  # datastore does not have sshpubkey

            pk = await self.middleware.call('datastore.insert',
                                            'account.bsdusers', data,
                                            {'prefix': 'bsdusr_'})

            await self.__set_groups(pk, groups)

        except Exception:
            if pk is not None:
                await self.middleware.call('datastore.delete',
                                           'account.bsdusers', pk)
            if new_homedir:
                # Be as atomic as possible when creating the user if
                # commands failed to execute cleanly.
                shutil.rmtree(data['home'])
            raise

        await self.middleware.call('service.reload', 'user')

        await self.__set_smbpasswd(data['username'])

        if os.path.exists(data['home']):
            for f in os.listdir(SKEL_PATH):
                if f.startswith('dot'):
                    dest_file = os.path.join(data['home'], f[3:])
                else:
                    dest_file = os.path.join(data['home'], f)
                if not os.path.exists(dest_file):
                    shutil.copyfile(os.path.join(SKEL_PATH, f), dest_file)
                    await self.middleware.call(
                        'filesystem.chown', {
                            'path': dest_file,
                            'uid': data['uid'],
                            'gid': group['gid'],
                            'options': {
                                'recursive': True
                            }
                        })

            data['sshpubkey'] = sshpubkey
            try:
                await self.__update_sshpubkey(data['home'], data,
                                              group['group'])
            except PermissionError as e:
                self.logger.warn('Failed to update authorized keys',
                                 exc_info=True)
                raise CallError(f'Failed to update authorized keys: {e}')

        return pk

    @accepts(
        Int('id'),
        Patch(
            'user_create',
            'user_update',
            ('attr', {
                'update': True
            }),
            ('rm', {
                'name': 'group_create'
            }),
        ),
    )
    async def do_update(self, pk, data):
        """
        Update attributes of an existing user.
        """

        user = await self._get_instance(pk)

        verrors = ValidationErrors()

        if 'group' in data:
            group = await self.middleware.call('datastore.query',
                                               'account.bsdgroups',
                                               [('id', '=', data['group'])])
            if not group:
                verrors.add('user_update.group',
                            f'Group {data["group"]} not found', errno.ENOENT)
            group = group[0]
        else:
            group = user['group']
            user['group'] = group['id']

        await self.__common_validation(verrors, data, 'user_update', pk=pk)

        home = data.get('home') or user['home']
        has_home = home != '/nonexistent'
        # root user (uid 0) is an exception to the rule
        if data.get('sshpubkey'
                    ) and not home.startswith('/mnt') and user['uid'] != 0:
            verrors.add('user_update.sshpubkey',
                        'Home directory is not writable, leave this blank"')

        # Do not allow attributes to be changed for builtin user
        if user['builtin']:
            for i in ('group', 'home', 'home_mode', 'uid', 'username'):
                if i in data:
                    verrors.add(f'user_update.{i}',
                                'This attribute cannot be changed')

        verrors.check()

        # Copy the home directory if it changed
        if (has_home and 'home' in data and data['home'] != user['home']
                and not data['home'].startswith(f'{user["home"]}/')):
            home_copy = True
            home_old = user['home']
        else:
            home_copy = False

        # After this point user dict has values from data
        user.update(data)

        if home_copy and not os.path.isdir(user['home']):
            try:
                os.makedirs(user['home'])
                await self.middleware.call(
                    'filesystem.chown', {
                        'path': user['home'],
                        'uid': user['uid'],
                        'gid': group['bsdgrp_gid'],
                    })
            except OSError:
                self.logger.warn('Failed to chown homedir', exc_info=True)
            if not os.path.isdir(user['home']):
                raise CallError(f'{user["home"]} is not a directory')

        home_mode = user.pop('home_mode', None)
        if user['builtin']:
            home_mode = None

        def set_home_mode():
            if home_mode is not None:
                try:
                    # Strip ACL before chmod. This is required when aclmode = restricted
                    setfacl = subprocess.run(
                        ['/bin/setfacl', '-b', user['home']], check=False)
                    if setfacl.returncode != 0:
                        self.logger.debug('Failed to strip ACL: %s',
                                          setfacl.stderr.decode())
                    os.chmod(user['home'], int(home_mode, 8))
                except OSError:
                    self.logger.warn('Failed to set homedir mode',
                                     exc_info=True)

        try:
            await self.__update_sshpubkey(
                home_old if home_copy else user['home'],
                user,
                group['bsdgrp_group'],
            )
        except PermissionError as e:
            self.logger.warn('Failed to update authorized keys', exc_info=True)
            raise CallError(f'Failed to update authorized keys: {e}')

        if home_copy:

            def do_home_copy():
                try:
                    subprocess.run(
                        f"/usr/bin/su - {user['username']} -c '/bin/cp -a {home_old}/ {user['home']}/'",
                        shell=True,
                        check=True)
                except subprocess.CalledProcessError as e:
                    self.logger.warn(f"Failed to copy homedir: {e}")
                set_home_mode()

            asyncio.ensure_future(self.middleware.run_in_thread(do_home_copy))
        elif has_home:
            set_home_mode()

        user.pop('sshpubkey', None)
        await self.__set_password(user)

        if 'groups' in user:
            groups = user.pop('groups')
            await self.__set_groups(pk, groups)

        user = await self.user_compress(user)
        await self.middleware.call('datastore.update', 'account.bsdusers', pk,
                                   user, {'prefix': 'bsdusr_'})

        await self.middleware.call('service.reload', 'user')

        await self.__set_smbpasswd(user['username'])

        return pk

    @accepts(Int('id'), Dict('options', Bool('delete_group', default=True)))
    async def do_delete(self, pk, options=None):
        """
        Delete user `id`.

        The `delete_group` option deletes the user primary group if it is not being used by
        any other user.
        """

        user = await self._get_instance(pk)

        if user['builtin']:
            raise CallError('Cannot delete a built-in user', errno.EINVAL)

        if options['delete_group'] and not user['group']['bsdgrp_builtin']:
            count = await self.middleware.call(
                'datastore.query', 'account.bsdgroupmembership',
                [('group', '=', user['group']['id'])], {
                    'prefix': 'bsdgrpmember_',
                    'count': True
                })
            count2 = await self.middleware.call(
                'datastore.query', 'account.bsdusers',
                [('group', '=', user['group']['id']), ('id', '!=', pk)], {
                    'prefix': 'bsdusr_',
                    'count': True
                })
            if count == 0 and count2 == 0:
                try:
                    await self.middleware.call('group.delete',
                                               user['group']['id'])
                except Exception:
                    self.logger.warn(
                        f'Failed to delete primary group of {user["username"]}',
                        exc_info=True)

        await run('smbpasswd', '-x', user['username'], check=False)

        # TODO: add a hook in CIFS service
        cifs = await self.middleware.call('datastore.query', 'services.cifs',
                                          [], {'prefix': 'cifs_srv_'})
        if cifs:
            cifs = cifs[0]
            if cifs['guest'] == user['username']:
                await self.middleware.call('datastore.update', 'services.cifs',
                                           cifs['id'], {'guest': 'nobody'},
                                           {'prefix': 'cifs_srv_'})

        await self.middleware.call('datastore.delete', 'account.bsdusers', pk)
        await self.middleware.call('service.reload', 'user')

        return pk

    @accepts()
    def shell_choices(self):
        """
        Return the available shell choices to be used in `user.create` and `user.update`.
        """
        with open('/etc/shells', 'r') as f:
            shells = [x.rstrip() for x in f.readlines() if x.startswith('/')]
        return {
            shell: os.path.basename(shell)
            for shell in shells + ['/usr/sbin/nologin']
        }

    @accepts(
        Dict('get_user_obj', Str('username', default=None),
             Int('uid', default=None)))
    async def get_user_obj(self, data):
        """
        Returns dictionary containing information from struct passwd for the user specified by either
        the username or uid. Bypasses user cache.
        """
        return await self.middleware.call('dscache.get_uncached_user',
                                          data['username'], data['uid'])

    @item_method
    @accepts(
        Int('id'),
        Str('key'),
        Any('value'),
    )
    async def set_attribute(self, pk, key, value):
        """
        Set user general purpose `attributes` dictionary `key` to `value`.

        e.g. Setting key="foo" value="var" will result in {"attributes": {"foo": "bar"}}
        """
        user = await self._get_instance(pk)

        user['attributes'][key] = value

        await self.middleware.call('datastore.update', 'account.bsdusers', pk,
                                   {'attributes': user['attributes']},
                                   {'prefix': 'bsdusr_'})

        return True

    @item_method
    @accepts(
        Int('id'),
        Str('key'),
    )
    async def pop_attribute(self, pk, key):
        """
        Remove user general purpose `attributes` dictionary `key`.
        """
        user = await self._get_instance(pk)

        if key in user['attributes']:
            user['attributes'].pop(key)

            await self.middleware.call('datastore.update', 'account.bsdusers',
                                       pk, {'attributes': user['attributes']},
                                       {'prefix': 'bsdusr_'})
            return True
        else:
            return False

    @accepts()
    async def get_next_uid(self):
        """
        Get the next available/free uid.
        """
        last_uid = 999
        for i in await self.middleware.call('datastore.query',
                                            'account.bsdusers',
                                            [('builtin', '=', False)], {
                                                'order_by': ['uid'],
                                                'prefix': 'bsdusr_'
                                            }):
            # If the difference between the last uid and the current one is
            # bigger than 1, it means we have a gap and can use it.
            if i['uid'] - last_uid > 1:
                return last_uid + 1
            last_uid = i['uid']
        return last_uid + 1

    @no_auth_required
    @accepts()
    async def has_root_password(self):
        """
        Return whether the root user has a valid password set.

        This is used when the system is installed without a password and must be set on
        first use/login.
        """
        return (await self.middleware.call(
            'datastore.query', 'account.bsdusers', [
                ('bsdusr_username', '=', 'root')
            ], {'get': True}))['bsdusr_unixhash'] != '*'

    @no_auth_required
    @accepts(Str('password'))
    @pass_app
    async def set_root_password(self, app, password):
        """
        Set password for root user if it is not already set.
        """
        if not app.authenticated and await self.middleware.call(
                'user.has_root_password'):
            raise CallError(
                'You cannot call this method anonymously if root already has a password',
                errno.EACCES)

        root = await self.middleware.call('user.query',
                                          [('username', '=', 'root')],
                                          {'get': True})
        await self.middleware.call('user.update', root['id'],
                                   {'password': password})

    async def __common_validation(self, verrors, data, schema, pk=None):

        exclude_filter = [('id', '!=', pk)] if pk else []

        if 'username' in data:
            pw_checkname(verrors, f'{schema}.username', data['username'])

            if await self.middleware.call(
                    'datastore.query', 'account.bsdusers',
                [('username', '=', data['username'])] + exclude_filter,
                {'prefix': 'bsdusr_'}):
                verrors.add(
                    f'{schema}.username',
                    f'The username "{data["username"]}" already exists.',
                    errno.EEXIST)

        password = data.get('password')
        if password and '?' in password:
            # See bug #4098
            verrors.add(
                f'{schema}.password',
                'An SMB issue prevents creating passwords containing a '
                'question mark (?).', errno.EINVAL)
        elif not pk and not password and not data.get('password_disabled'):
            verrors.add(f'{schema}.password', 'Password is required')
        elif data.get('password_disabled') and password:
            verrors.add(
                f'{schema}.password_disabled',
                'Leave "Password" blank when "Disable password login" is checked.'
            )

        if 'home' in data:
            if ':' in data['home']:
                verrors.add(f'{schema}.home',
                            '"Home Directory" cannot contain colons (:).')
            if not data['home'].startswith(
                    '/mnt/') and data['home'] != '/nonexistent':
                verrors.add(
                    f'{schema}.home',
                    '"Home Directory" must begin with /mnt/ or set to '
                    '/nonexistent.')

        if 'home_mode' in data:
            try:
                o = int(data['home_mode'], 8)
                assert o & 0o777 == o
            except (AssertionError, ValueError, TypeError):
                verrors.add(
                    f'{schema}.home_mode',
                    'Please provide a valid value for home_mode attribute')

        if 'groups' in data:
            groups = data.get('groups') or []
            if groups and len(groups) > 64:
                verrors.add(
                    f'{schema}.groups',
                    'A user cannot belong to more than 64 auxiliary groups.')

        if 'full_name' in data and ':' in data['full_name']:
            verrors.add(f'{schema}.full_name',
                        'The ":" character is not allowed in a "Full Name".')

    async def __set_password(self, data):
        if 'password' not in data:
            return
        password = data.pop('password')
        if password:
            data['unixhash'] = crypted_password(password)
            # See http://samba.org.ru/samba/docs/man/manpages/smbpasswd.5.html
            data[
                'smbhash'] = f'{data["username"]}:{data["uid"]}:{"X" * 32}:{nt_password(password)}:[U         ]:LCT-{int(time.time()):X}:'
        else:
            data['unixhash'] = '*'
            data['smbhash'] = '*'
        return password

    async def __set_smbpasswd(self, username):
        """
        This method will update or create an entry in samba's passdb.tdb file.
        Update will only happen if the account's nt_password has changed or
        if the account's 'locked' state has changed. Samba's passdb python
        library will raise an exception if a corresponding Unix user does not
        exist. That is the reason we have two methods/steps to set password.
        """
        await self.middleware.call('smb.update_passdb_user', username)

    async def __set_groups(self, pk, groups):

        groups = set(groups)
        existing_ids = set()
        for gm in await self.middleware.call('datastore.query',
                                             'account.bsdgroupmembership',
                                             [('user', '=', pk)],
                                             {'prefix': 'bsdgrpmember_'}):
            if gm['id'] not in groups:
                await self.middleware.call('datastore.delete',
                                           'account.bsdgroupmembership',
                                           gm['id'])
            else:
                existing_ids.add(gm['id'])

        for _id in groups - existing_ids:
            group = await self.middleware.call('datastore.query',
                                               'account.bsdgroups',
                                               [('id', '=', _id)],
                                               {'prefix': 'bsdgrp_'})
            if not group:
                raise CallError(f'Group {_id} not found', errno.ENOENT)
            await self.middleware.call('datastore.insert',
                                       'account.bsdgroupmembership', {
                                           'group': _id,
                                           'user': pk
                                       }, {'prefix': 'bsdgrpmember_'})

    async def __update_sshpubkey(self, homedir, user, group):
        if 'sshpubkey' not in user:
            return
        if not os.path.isdir(homedir):
            return

        sshpath = f'hotexamples_com/.ssh'
        keysfile = f'{sshpath}/authorized_keys'

        pubkey = user.get('sshpubkey') or ''
        pubkey = pubkey.strip()
        if pubkey == '':
            try:
                os.unlink(keysfile)
            except OSError:
                pass
            return

        oldpubkey = ''
        try:
            with open(keysfile, 'r') as f:
                oldpubkey = f.read().strip()
        except Exception:
            pass

        if pubkey == oldpubkey:
            return

        if not os.path.isdir(sshpath):
            os.mkdir(sshpath, mode=0o700)
        if not os.path.isdir(sshpath):
            raise CallError(f'{sshpath} is not a directory')

        # Make extra sure to enforce correct mode on .ssh directory.
        # stripping the ACL will allow subsequent chmod calls to succeed even if
        # dataset aclmode is restricted.
        await self.middleware.call(
            'filesystem.setperm', {
                'path': sshpath,
                'mode': str(700),
                'uid': user['uid'],
                'gid': group,
                'options': {
                    'recursive': True,
                    'stripacl': True
                }
            })

        with open(keysfile, 'w') as f:
            f.write(pubkey)
            f.write('\n')
        await self.middleware.call('filesystem.setperm', {
            'path': keysfile,
            'mode': str(600)
        })
예제 #29
0
파일: smart.py 프로젝트: wesleywwf/freenas
class SMARTTestService(CRUDService):

    class Config:
        datastore = 'tasks.smarttest'
        datastore_extend = 'smart.test.smart_test_extend'
        datastore_prefix = 'smarttest_'
        namespace = 'smart.test'
        cli_namespace = 'task.smart_test'

    ENTRY = Patch(
        'smart_task_create', 'smart_task_entry',
        ('add', Int('id')),
    )

    @private
    async def smart_test_extend(self, data):
        disks = data.pop('disks')
        data['disks'] = [disk['disk_identifier'] for disk in disks]
        test_type = {
            'L': 'LONG',
            'S': 'SHORT',
            'C': 'CONVEYANCE',
            'O': 'OFFLINE',
        }
        data['type'] = test_type[data.pop('type')]
        Cron.convert_db_format_to_schedule(data)
        return data

    @private
    async def validate_data(self, data, schema):
        verrors = ValidationErrors()

        smart_tests = await self.query(filters=[('type', '=', data['type'])])
        configured_disks = [d for test in smart_tests for d in test['disks']]
        disks_dict = await self.disk_choices()

        disks = data.get('disks')
        used_disks = []
        invalid_disks = []
        for disk in disks:
            if disk in configured_disks:
                used_disks.append(disks_dict[disk])
            if disk not in disks_dict.keys():
                invalid_disks.append(disk)

        if used_disks:
            verrors.add(
                f'{schema}.disks',
                f'The following disks already have tests for this type: {", ".join(used_disks)}'
            )

        if invalid_disks:
            verrors.add(
                f'{schema}.disks',
                f'The following disks are invalid: {", ".join(invalid_disks)}'
            )

        return verrors

    @accepts(Bool('full_disk', default=False))
    async def disk_choices(self, full_disk):
        """
        Returns disk choices for S.M.A.R.T. test.

        `full_disk` will return full disk objects instead of just names.
        """
        return {
            disk['identifier']: disk if full_disk else disk['name']
            for disk in await self.middleware.call('disk.query', [['devname', '!^', 'nv']])
        }

    @accepts(
        Dict(
            'smart_task_create',
            Cron(
                'schedule',
                exclude=['minute']
            ),
            Str('desc'),
            Bool('all_disks', default=False),
            List('disks', items=[Str('disk')]),
            Str('type', enum=['LONG', 'SHORT', 'CONVEYANCE', 'OFFLINE'], required=True),
            register=True
        )
    )
    async def do_create(self, data):
        """
        Create a SMART Test Task.

        `disks` is a list of valid disks which should be monitored in this task.

        `type` is specified to represent the type of SMART test to be executed.

        `all_disks` when enabled sets the task to cover all disks in which case `disks` is not required.

        .. examples(websocket)::

          Create a SMART Test Task which executes after every 30 minutes.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "smart.test.create",
                "params": [{
                    "schedule": {
                        "minute": "30",
                        "hour": "*",
                        "dom": "*",
                        "month": "*",
                        "dow": "*"
                    },
                    "all_disks": true,
                    "type": "OFFLINE",
                    "disks": []
                }]
            }
        """
        data['type'] = data.pop('type')[0]
        verrors = await self.validate_data(data, 'smart_test_create')

        if data['all_disks']:
            if data.get('disks'):
                verrors.add(
                    'smart_test_create.disks',
                    'This test is already enabled for all disks'
                )
        else:
            if not data.get('disks'):
                verrors.add(
                    'smart_test_create.disks',
                    'This field is required'
                )

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(data)

        data['id'] = await self.middleware.call(
            'datastore.insert',
            self._config.datastore,
            data,
            {'prefix': self._config.datastore_prefix}
        )

        asyncio.ensure_future(self._service_change('smartd', 'restart'))

        return await self.get_instance(data['id'])

    async def do_update(self, id, data):
        """
        Update SMART Test Task of `id`.
        """
        old = await self.query(filters=[('id', '=', id)], options={'get': True})
        new = old.copy()
        new.update(data)

        new['type'] = new.pop('type')[0]
        old['type'] = old.pop('type')[0]
        new_disks = [disk for disk in new['disks'] if disk not in old['disks']]
        deleted_disks = [disk for disk in old['disks'] if disk not in new['disks']]
        if old['type'] == new['type']:
            new['disks'] = new_disks
        verrors = await self.validate_data(new, 'smart_test_update')

        new['disks'] = [disk for disk in chain(new_disks, old['disks']) if disk not in deleted_disks]

        if new['all_disks']:
            if new.get('disks'):
                verrors.add(
                    'smart_test_update.disks',
                    'This test is already enabled for all disks'
                )
        else:
            if not new.get('disks'):
                verrors.add(
                    'smart_test_update.disks',
                    'This field is required'
                )

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(new)

        await self.middleware.call(
            'datastore.update',
            self._config.datastore,
            id,
            new,
            {'prefix': self._config.datastore_prefix}
        )

        asyncio.ensure_future(self._service_change('smartd', 'restart'))

        return await self.get_instance(id)

    async def do_delete(self, id):
        """
        Delete SMART Test Task of `id`.
        """
        response = await self.middleware.call(
            'datastore.delete',
            self._config.datastore,
            id
        )

        asyncio.ensure_future(self._service_change('smartd', 'restart'))

        return response

    @accepts(
        List(
            'disks', items=[
                Dict(
                    'disk_run',
                    Str('identifier', required=True),
                    Str('mode', enum=['FOREGROUND', 'BACKGROUND'], default='BACKGROUND'),
                    Str('type', enum=['LONG', 'SHORT', 'CONVEYANCE', 'OFFLINE'], required=True),
                )
            ]
        )
    )
    @returns(List('smart_manual_test', items=[Dict(
        'smart_manual_test_disk_response',
        Str('disk', required=True),
        Str('identifier', required=True),
        Str('error', required=True, null=True),
        Datetime('expected_result_time'),
        Int('job'),
    )]))
    async def manual_test(self, disks):
        """
        Run manual SMART tests for `disks`.

        `type` indicates what type of SMART test will be ran and must be specified.
        """
        verrors = ValidationErrors()
        test_disks_list = []
        if not disks:
            verrors.add(
                'disks',
                'Please specify at least one disk.'
            )
        else:
            disks_choices = await self.disk_choices(True)
            devices = await self.middleware.call('device.get_storage_devices_topology')

            for index, disk in enumerate(disks):
                if current_disk := disks_choices.get(disk['identifier']):
                    test_disks_list.append({
                        'disk': current_disk['name'],
                        **disk
                    })
                else:
                    verrors.add(
                        f'disks.{index}.identifier',
                        f'{disk["identifier"]} is not valid. Please provide a valid disk identifier.'
                    )
                    continue

                if current_disk['name'] is None:
                    verrors.add(
                        f'disks.{index}.identifier',
                        f'Test cannot be performed for {disk["identifier"]} disk. Failed to retrieve name.'
                    )

                device = devices.get(current_disk['name'])
                if not device:
                    verrors.add(
                        f'disks.{index}.identifier',
                        f'Test cannot be performed for {disk["identifier"]}. Unable to retrieve disk details.'
                    )

        verrors.check()

        return await asyncio_map(self.__manual_test, test_disks_list, 16)
예제 #30
0
class SharingSMBService(CRUDService):
    class Config:
        namespace = 'sharing.smb'
        datastore = 'sharing.cifs_share'
        datastore_prefix = 'cifs_'
        datastore_extend = 'sharing.smb.extend'

    @accepts(
        Dict('sharingsmb_create',
             Str('path', required=True),
             Bool('home', default=False),
             Str('name'),
             Str('comment'),
             Bool('ro', default=False),
             Bool('browsable', default=True),
             Bool('recyclebin', default=False),
             Bool('showhiddenfiles', default=False),
             Bool('guestok', default=False),
             Bool('guestonly', default=False),
             Bool('abe', default=False),
             List('hostsallow', items=[IPAddr('ip', network=True)],
                  default=[]),
             List('hostsdeny', items=[IPAddr('ip', network=True)], default=[]),
             List('vfsobjects',
                  default=['zfs_space', 'zfsacl', 'streams_xattr']),
             Int('storage_task'),
             Str('auxsmbconf'),
             Bool('default_permissions'),
             register=True))
    async def do_create(self, data):
        verrors = ValidationErrors()
        path = data['path']

        default_perms = data.pop('default_permissions', True)

        await self.clean(data, 'sharingsmb_create', verrors)
        await self.validate(data, 'sharingsmb_create', verrors)

        if verrors:
            raise verrors

        if path and not os.path.exists(path):
            try:
                os.makedirs(path)
            except OSError as e:
                raise CallError(f'Failed to create {path}: {e}')

        await self.compress(data)
        await self.set_storage_tasks(data)
        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})
        await self.extend(data)  # We should do this in the insert call ?

        await self._service_change('cifs', 'reload')
        await self.apply_default_perms(default_perms, path, data['home'])

        return data

    @accepts(Int('id'),
             Patch('sharingsmb_create', 'sharingsmb_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        verrors = ValidationErrors()
        path = data.get('path')
        default_perms = data.pop('default_permissions', False)

        old = await self.middleware.call(
            'datastore.query', self._config.datastore, [('id', '=', id)], {
                'extend': self._config.datastore_extend,
                'prefix': self._config.datastore_prefix,
                'get': True
            })

        new = old.copy()
        new.update(data)

        await self.clean(new, 'sharingsmb_update', verrors, id=id)
        await self.validate(new, 'sharingsmb_update', verrors, old=old)

        if verrors:
            raise verrors

        if path and not os.path.exists(path):
            try:
                os.makedirs(path)
            except OSError as e:
                raise CallError(f'Failed to create {path}: {e}')

        await self.compress(new)
        await self.set_storage_tasks(new)
        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})
        await self.extend(new)  # same here ?

        await self._service_change('cifs', 'reload')
        await self.apply_default_perms(default_perms, path, data['home'])

        return new

    @accepts(Int('id'))
    async def do_delete(self, id):
        share = await self._get_instance(id)
        result = await self.middleware.call('datastore.delete',
                                            self._config.datastore, id)
        await self.middleware.call('notifier.sharesec_delete', share['name'])
        await self._service_change('cifs', 'reload')
        return result

    @private
    async def clean(self, data, schema_name, verrors, id=None):
        data['name'] = await self.name_exists(data, schema_name, verrors, id)

    @private
    async def validate(self, data, schema_name, verrors, old=None):
        home_result = await self.home_exists(data['home'], schema_name,
                                             verrors, old)

        if home_result:
            verrors.add(f'{schema_name}.home',
                        'Only one share is allowed to be a home share.')
        elif not home_result and not data['path']:
            verrors.add(f'{schema_name}.path', 'This field is required.')

        if data['path']:
            await check_path_resides_within_volume(verrors, self.middleware,
                                                   f"{schema_name}.path",
                                                   data['path'])

        if data.get('name') and data['name'] == 'global':
            verrors.add(
                f'{schema_name}.name',
                'Global is a reserved section name, please select another one')

    @private
    async def home_exists(self, home, schema_name, verrors, old=None):
        home_filters = [('home', '=', True)]
        home_result = None

        if home:
            if old and old['id'] is not None:
                id = old['id']

                if not old['home']:
                    home_filters.append(('id', '!=', id))
                    # The user already had this set as the home share
                    home_result = await self.middleware.call(
                        'datastore.query', self._config.datastore,
                        home_filters,
                        {'prefix': self._config.datastore_prefix})

        return home_result

    @private
    async def name_exists(self, data, schema_name, verrors, id=None):
        name = data['name']
        path = data['path']
        name_filters = [('name', '=', name)]
        path_filters = [('path', '=', path)]

        if path and not name:
            name = path.rsplit('/', 1)[-1]

        if id is not None:
            name_filters.append(('id', '!=', id))
            path_filters.append(('id', '!=', id))

        name_result = await self.middleware.call(
            'datastore.query', self._config.datastore, name_filters,
            {'prefix': self._config.datastore_prefix})
        path_result = await self.middleware.call(
            'datastore.query', self._config.datastore, path_filters,
            {'prefix': self._config.datastore_prefix})

        if name_result:
            verrors.add(f'{schema_name}.name',
                        'A share with this name already exists.')

        if path_result:
            verrors.add(f'{schema_name}.path',
                        'A share with this path already exists.')

        return name

    @private
    async def extend(self, data):
        data['hostsallow'] = data['hostsallow'].split()
        data['hostsdeny'] = data['hostsdeny'].split()

        return data

    @private
    async def compress(self, data):
        data['hostsallow'] = ' '.join(data['hostsallow'])
        data['hostsdeny'] = ' '.join(data['hostsdeny'])

        return data

    @private
    async def apply_default_perms(self, default_perms, path, is_home):
        if default_perms:
            try:
                (owner,
                 group) = await self.middleware.call('notifier.mp_get_owner',
                                                     path)
            except Exception:
                (owner, group) = ('root', 'wheel')

            await self.middleware.call('notifier.winacl_reset', path, owner,
                                       group, None, not is_home)

    @accepts(Str('path', required=True))
    async def get_storage_tasks(self, path):
        zfs_datasets = await self.middleware.call(
            'zfs.dataset.query', [('type', '=', 'FILESYSTEM')])
        task_list = []
        task_dict = {}

        for ds in zfs_datasets:
            tasks = []
            name = ds['name']
            mountpoint = ds['properties']['mountpoint']['parsed']

            if path == mountpoint:
                tasks = await self.middleware.call(
                    'datastore.query', 'storage.task',
                    [['task_filesystem', '=', name]])
            elif path.startswith(f'{mountpoint}/'):
                tasks = await self.middleware.call(
                    'datastore.query', 'storage.task',
                    [['task_filesystem', '=', name],
                     ['task_recursive', '=', 'True']])

            task_list.extend(tasks)

        for task in task_list:
            task_id = task['id']
            fs = task['task_filesystem']
            retcount = task['task_ret_count']
            retunit = task['task_ret_unit']
            _interval = task['task_interval']
            interval = dict(await
                            self.middleware.call('notifier.choices',
                                                 'TASK_INTERVAL'))[_interval]

            msg = f'{fs} - every {interval} - {retcount}{retunit}'

            task_dict[task_id] = msg

        return task_dict

    @private
    async def set_storage_tasks(self, data):
        task = data.get('storage_task', None)
        home = data['home']
        path = data['path']
        task_list = []

        if not task:
            if path:
                task_list = await self.get_storage_tasks(path=path)
            elif home:
                task_list = await self.get_storage_tasks(home=home)

        if task_list:
            data['storage_task'] = list(task_list.keys())[0]

        return data

    @accepts()
    def vfsobjects_choices(self):
        vfs_modules_path = '/usr/local/lib/shared-modules/vfs'
        vfs_modules = []
        vfs_exclude = {'shadow_copy2', 'recycle', 'aio_pthread'}

        if os.path.exists(vfs_modules_path):
            vfs_modules.extend(
                filter(
                    lambda m: m not in vfs_exclude,
                    map(lambda f: f.rpartition('.')[0],
                        os.listdir(vfs_modules_path))))
        else:
            vfs_modules.extend(['streams_xattr'])

        return vfs_modules