Пример #1
0
class KeyValueService(Service):
    class Config:
        private = True

    @accepts(Str('key'))
    async def has_key(self, key):
        try:
            await self.get(key)
            return True
        except KeyError:
            return False

    @accepts(Str('key'), Any('default', null=True, default=None))
    async def get(self, key, default):
        try:
            return json.loads(
                (await
                 self.middleware.call("datastore.query", "system.keyvalue",
                                      [["key", "=", key]],
                                      {"get": True}))["value"])
        except IndexError:
            if default is not None:
                return default

            raise KeyError(key)

    @accepts(
        Str('key'),
        Any('value'),
        Dict('options', additional_attrs=True),
    )
    async def set(self, key, value, options):
        try:
            row = await self.middleware.call("datastore.query",
                                             "system.keyvalue",
                                             [["key", "=", key]],
                                             {"get": True})
        except IndexError:
            await self.middleware.call("datastore.insert", "system.keyvalue", {
                "key": key,
                "value": json.dumps(value)
            }, options)
        else:
            await self.middleware.call("datastore.update", "system.keyvalue",
                                       row["id"], {"value": json.dumps(value)},
                                       options)

        return value

    @accepts(
        Str('key'),
        Dict('options', additional_attrs=True),
    )
    async def delete(self, key, options):
        await self.middleware.call("datastore.delete", "system.keyvalue",
                                   [["key", "=", key]], options)
Пример #2
0
class CacheService(Service):
    class Config:
        private = True

    def __init__(self, *args, **kwargs):
        super(CacheService, self).__init__(*args, **kwargs)
        self.__cache = {}

    @accepts(Str('key'))
    def has_key(self, key):
        """
        Check if given `key` is in cache.
        """
        return key in self.__cache

    @accepts(Str('key'))
    def get(self, key):
        return self.__cache[key]

    @accepts(Str('key'), Any('value'))
    def put(self, key, value):
        self.__cache[key] = value

    @accepts(Str('key'))
    def pop(self, key):
        return self.__cache.pop(key, None)
Пример #3
0
 def register(self, name, description, private=False, returns=None):
     if name in self._events:
         raise ValueError(f'Event {name!r} already registered.')
     self._events[name] = {
         'description': description,
         'accepts': [],
         'returns': [returns] if returns else [Any(name, null=True)],
     }
     if private:
         self.__events_private.add(name)
Пример #4
0
    def __new__(cls, name, bases, attrs):
        klass = super().__new__(cls, name, bases, attrs)
        if name == 'EventSource' and bases == ():
            return klass

        for i in (('ACCEPTS', name.lower()), ('RETURNS',
                                              f'{name.lower()}_returns')):
            doc_type = getattr(klass, i[0])
            if doc_type == NotImplementedError:
                doc_type = Any(null=True)
            if not doc_type.name:
                doc_type.name = i[1]
            setattr(klass, i[0], [doc_type])

        return klass
Пример #5
0
class CacheService(Service):
    class Config:
        private = True

    def __init__(self, *args, **kwargs):
        super(CacheService, self).__init__(*args, **kwargs)
        self.__cache = {}

    @accepts(Str('key'))
    def has_key(self, key):
        """
        Check if given `key` is in cache.
        """
        return key in self.__cache

    @accepts(Str('key'))
    def get(self, key):
        """
        Get `key` from cache.

        Raises:
            KeyError: not found in the cache
        """
        return self.__cache[key]

    @accepts(Str('key'), Any('value'))
    def put(self, key, value):
        """
        Put `key` of `value` in the cache.
        """
        self.__cache[key] = value

    @accepts(Str('key'))
    def pop(self, key):
        """
        Removes and returns `key` from cache.
        """
        return self.__cache.pop(key, None)
Пример #6
0
class UserService(CRUDService):
    class Config:
        datastore = 'account.bsdusers'
        datastore_extend = 'user.user_extend'
        datastore_prefix = 'bsdusr_'

    @private
    async def user_extend(self, user):

        # Normalize email, empty is really null
        if user['email'] == '':
            user['email'] = None

        # Get group membership
        user['groups'] = [
            gm['group']['id'] for gm in await self.middleware.call(
                'datastore.query', 'account.bsdgroupmembership', [(
                    'user', '=', user['id'])], {'prefix': 'bsdgrpmember_'})
        ]

        # Get authorized keys
        keysfile = f'{user["home"]}/.ssh/authorized_keys'
        user['sshpubkey'] = None
        if os.path.exists(keysfile):
            try:
                with open(keysfile, 'r') as f:
                    user['sshpubkey'] = f.read()
            except Exception:
                pass
        return user

    @private
    async def user_compress(self, user):
        if 'local' in user:
            user.pop('local')
        if 'id_type_both' in user:
            user.pop('id_type_both')
        return user

    @filterable
    async def query(self, filters=None, options=None):
        """
        Query users with `query-filters` and `query-options`. As a performance optimization, only local users
        will be queried by default.

        Users from directory services such as NIS, LDAP, or Active Directory will be included in query results
        if the option `{'extra': {'search_dscache': True}}` is specified.
        """
        if not filters:
            filters = []

        options = options or {}
        options['extend'] = self._config.datastore_extend
        options['extend_context'] = self._config.datastore_extend_context
        options['prefix'] = self._config.datastore_prefix

        datastore_options = options.copy()
        datastore_options.pop('count', None)
        datastore_options.pop('get', None)

        extra = options.get('extra', {})
        dssearch = extra.pop('search_dscache', False)

        if dssearch:
            return await self.middleware.call('dscache.query', 'USERS',
                                              filters, options)

        result = await self.middleware.call('datastore.query',
                                            self._config.datastore, [],
                                            datastore_options)
        for entry in result:
            entry.update({'local': True, 'id_type_both': False})
        return await self.middleware.run_in_thread(filter_list, result,
                                                   filters, options)

    @accepts(
        Dict(
            'user_create',
            Int('uid'),
            Str('username', required=True, max_length=16),
            Int('group'),
            Bool('group_create', default=False),
            Str('home', default='/nonexistent'),
            Str('home_mode', default='755'),
            Str('shell', default='/bin/csh' if IS_FREEBSD else '/usr/bin/zsh'),
            Str('full_name', required=True),
            Str('email', validators=[Email()], null=True, default=None),
            Str('password', private=True),
            Bool('password_disabled', default=False),
            Bool('locked', default=False),
            Bool('microsoft_account', default=False),
            Bool('smb', default=True),
            Bool('sudo', default=False),
            Str('sshpubkey', null=True, max_length=None),
            List('groups', default=[]),
            Dict('attributes', additional_attrs=True),
            register=True,
        ))
    async def do_create(self, data):
        """
        Create a new user.

        If `uid` is not provided it is automatically filled with the next one available.

        `group` is required if `group_create` is false.

        `password` is required if `password_disabled` is false.

        Available choices for `shell` can be retrieved with `user.shell_choices`.

        `attributes` is a general-purpose object for storing arbitrary user information.

        `smb` specifies whether the user should be allowed access to SMB shares. User
        willl also automatically be added to the `builtin_users` group.
        """
        verrors = ValidationErrors()

        if (not data.get('group') and not data.get('group_create')) or (
                data.get('group') is not None and data.get('group_create')):
            verrors.add(
                'user_create.group',
                f'Enter either a group name or create a new group to '
                'continue.', errno.EINVAL)

        await self.__common_validation(verrors, data, 'user_create')

        if data.get('sshpubkey') and not data['home'].startswith('/mnt'):
            verrors.add(
                'user_create.sshpubkey',
                'The home directory is not writable. Leave this field blank.')

        verrors.check()

        groups = data.pop('groups')
        create = data.pop('group_create')

        if create:
            group = await self.middleware.call(
                'group.query', [('group', '=', data['username'])])
            if group:
                group = group[0]
            else:
                group = await self.middleware.call('group.create', {
                    'name': data['username'],
                    'smb': False
                })
                group = (await self.middleware.call('group.query',
                                                    [('id', '=', group)]))[0]
            data['group'] = group['id']
        else:
            group = await self.middleware.call('group.query',
                                               [('id', '=', data['group'])])
            if not group:
                raise CallError(f'Group {data["group"]} not found')
            group = group[0]

        if data['smb']:
            groups.append(
                (await self.middleware.call('group.query',
                                            [('group', '=', 'builtin_users')],
                                            {'get': True}))['id'])

        # Is this a new directory or not? Let's not nuke existing directories,
        # e.g. /, /root, /mnt/tank/my-dataset, etc ;).
        new_homedir = False
        home_mode = data.pop('home_mode')
        if data['home'] and data['home'] != '/nonexistent':
            try:
                try:
                    os.makedirs(data['home'], mode=int(home_mode, 8))
                    new_homedir = True
                    await self.middleware.call(
                        'filesystem.setperm', {
                            'path': data['home'],
                            'mode': home_mode,
                            'uid': data['uid'],
                            'gid': group['gid'],
                            'options': {
                                'stripacl': True
                            }
                        })
                except FileExistsError:
                    if not os.path.isdir(data['home']):
                        raise CallError(
                            'Path for home directory already '
                            'exists and is not a directory', errno.EEXIST)

                    # If it exists, ensure the user is owner.
                    await self.middleware.call(
                        'filesystem.chown', {
                            'path': data['home'],
                            'uid': data['uid'],
                            'gid': group['gid'],
                        })
                except OSError as oe:
                    raise CallError('Failed to create the home directory '
                                    f'({data["home"]}) for user: {oe}')
            except Exception:
                if new_homedir:
                    shutil.rmtree(data['home'])
                raise

        if not data.get('uid'):
            data['uid'] = await self.get_next_uid()

        pk = None  # Make sure pk exists to rollback in case of an error
        data = await self.user_compress(data)
        try:
            await self.__set_password(data)
            sshpubkey = data.pop('sshpubkey',
                                 None)  # datastore does not have sshpubkey

            pk = await self.middleware.call('datastore.insert',
                                            'account.bsdusers', data,
                                            {'prefix': 'bsdusr_'})

            await self.__set_groups(pk, groups)

        except Exception:
            if pk is not None:
                await self.middleware.call('datastore.delete',
                                           'account.bsdusers', pk)
            if new_homedir:
                # Be as atomic as possible when creating the user if
                # commands failed to execute cleanly.
                shutil.rmtree(data['home'])
            raise

        await self.middleware.call('service.reload', 'user')

        if data['smb']:
            await self.__set_smbpasswd(data['username'])

        if os.path.exists(data['home']):
            for f in os.listdir(SKEL_PATH):
                if f.startswith('dot'):
                    dest_file = os.path.join(data['home'], f[3:])
                else:
                    dest_file = os.path.join(data['home'], f)
                if not os.path.exists(dest_file):
                    shutil.copyfile(os.path.join(SKEL_PATH, f), dest_file)
                    await self.middleware.call(
                        'filesystem.chown', {
                            'path': dest_file,
                            'uid': data['uid'],
                            'gid': group['gid'],
                            'options': {
                                'recursive': True
                            }
                        })

            data['sshpubkey'] = sshpubkey
            try:
                await self.update_sshpubkey(data['home'], data, group['group'])
            except PermissionError as e:
                self.logger.warn('Failed to update authorized keys',
                                 exc_info=True)
                raise CallError(f'Failed to update authorized keys: {e}')

        return pk

    @accepts(
        Int('id'),
        Patch(
            'user_create',
            'user_update',
            ('attr', {
                'update': True
            }),
            ('rm', {
                'name': 'group_create'
            }),
        ),
    )
    async def do_update(self, pk, data):
        """
        Update attributes of an existing user.
        """

        user = await self._get_instance(pk)

        verrors = ValidationErrors()

        if 'group' in data:
            group = await self.middleware.call('datastore.query',
                                               'account.bsdgroups',
                                               [('id', '=', data['group'])])
            if not group:
                verrors.add('user_update.group',
                            f'Group {data["group"]} not found', errno.ENOENT)
            group = group[0]
        else:
            group = user['group']
            user['group'] = group['id']

        await self.__common_validation(verrors, data, 'user_update', pk=pk)

        home = data.get('home') or user['home']
        has_home = home != '/nonexistent'
        # root user (uid 0) is an exception to the rule
        if data.get('sshpubkey'
                    ) and not home.startswith('/mnt') and user['uid'] != 0:
            verrors.add('user_update.sshpubkey',
                        'Home directory is not writable, leave this blank"')

        # Do not allow attributes to be changed for builtin user
        if user['builtin']:
            for i in ('group', 'home', 'home_mode', 'uid', 'username'):
                if i in data:
                    verrors.add(f'user_update.{i}',
                                'This attribute cannot be changed')

        verrors.check()

        must_change_pdb_entry = False
        for k in ('username', 'password', 'locked'):
            new_val = data.get(k)
            old_val = user.get(k)
            if new_val is not None and old_val != new_val:
                if k == 'username':
                    try:
                        await self.middleware.call("smb.remove_passdb_user",
                                                   old_val)
                    except Exception:
                        self.logger.debug(
                            "Failed to remove passdb entry for user [%s]",
                            old_val,
                            exc_info=True)

                must_change_pdb_entry = True

        # Copy the home directory if it changed
        if (has_home and 'home' in data and data['home'] != user['home']
                and not data['home'].startswith(f'{user["home"]}/')):
            home_copy = True
            home_old = user['home']
        else:
            home_copy = False

        # After this point user dict has values from data
        user.update(data)

        if home_copy and not os.path.isdir(user['home']):
            try:
                os.makedirs(user['home'])
                await self.middleware.call(
                    'filesystem.chown', {
                        'path': user['home'],
                        'uid': user['uid'],
                        'gid': group['bsdgrp_gid'],
                    })
            except OSError:
                self.logger.warn('Failed to chown homedir', exc_info=True)
            if not os.path.isdir(user['home']):
                raise CallError(f'{user["home"]} is not a directory')

        home_mode = user.pop('home_mode', None)
        if user['builtin']:
            home_mode = None

        def set_home_mode():
            if home_mode is not None:
                try:
                    # Strip ACL before chmod. This is required when aclmode = restricted
                    setfacl = subprocess.run(
                        ['/bin/setfacl', '-b', user['home']], check=False)
                    if setfacl.returncode != 0 and setfacl.stderr:
                        self.logger.debug('Failed to strip ACL: %s',
                                          setfacl.stderr.decode())
                    os.chmod(user['home'], int(home_mode, 8))
                except OSError:
                    self.logger.warn('Failed to set homedir mode',
                                     exc_info=True)

        try:
            update_sshpubkey_args = [
                home_old if home_copy else user['home'],
                user,
                group['bsdgrp_group'],
            ]
            await self.update_sshpubkey(*update_sshpubkey_args)
        except PermissionError as e:
            self.logger.warn('Failed to update authorized keys', exc_info=True)
            raise CallError(f'Failed to update authorized keys: {e}')
        else:
            if user['uid'] == 0:
                if await self.middleware.call('failover.licensed'):
                    try:
                        await self.middleware.call('failover.call_remote',
                                                   'user.update_sshpubkey',
                                                   update_sshpubkey_args)
                    except Exception:
                        self.logger.error(
                            'Failed to sync root ssh pubkey to standby node',
                            exc_info=True)

        if home_copy:

            def do_home_copy():
                try:
                    command = f"/bin/cp -a {shlex.quote(home_old) + '/'} {shlex.quote(user['home'] + '/')}"
                    subprocess.run(
                        ["/usr/bin/su", "-", user["username"], "-c", command],
                        check=True)
                except subprocess.CalledProcessError as e:
                    self.logger.warn(f"Failed to copy homedir: {e}")
                set_home_mode()

            asyncio.ensure_future(self.middleware.run_in_thread(do_home_copy))
        elif has_home:
            asyncio.ensure_future(self.middleware.run_in_thread(set_home_mode))

        user.pop('sshpubkey', None)
        await self.__set_password(user)

        if 'groups' in user:
            groups = user.pop('groups')
            await self.__set_groups(pk, groups)

        user = await self.user_compress(user)
        await self.middleware.call('datastore.update', 'account.bsdusers', pk,
                                   user, {'prefix': 'bsdusr_'})

        await self.middleware.call('service.reload', 'user')
        if user['smb'] and must_change_pdb_entry:
            await self.__set_smbpasswd(user['username'])

        return pk

    @accepts(Int('id'), Dict('options', Bool('delete_group', default=True)))
    async def do_delete(self, pk, options=None):
        """
        Delete user `id`.

        The `delete_group` option deletes the user primary group if it is not being used by
        any other user.
        """

        user = await self._get_instance(pk)

        if user['builtin']:
            raise CallError('Cannot delete a built-in user', errno.EINVAL)

        if options['delete_group'] and not user['group']['bsdgrp_builtin']:
            count = await self.middleware.call(
                'datastore.query', 'account.bsdgroupmembership',
                [('group', '=', user['group']['id'])], {
                    'prefix': 'bsdgrpmember_',
                    'count': True
                })
            count2 = await self.middleware.call(
                'datastore.query', 'account.bsdusers',
                [('group', '=', user['group']['id']), ('id', '!=', pk)], {
                    'prefix': 'bsdusr_',
                    'count': True
                })
            if count == 0 and count2 == 0:
                try:
                    await self.middleware.call('group.delete',
                                               user['group']['id'])
                except Exception:
                    self.logger.warn(
                        f'Failed to delete primary group of {user["username"]}',
                        exc_info=True)

        if user['smb']:
            await run('smbpasswd', '-x', user['username'], check=False)

        # TODO: add a hook in CIFS service
        cifs = await self.middleware.call('datastore.query', 'services.cifs',
                                          [], {'prefix': 'cifs_srv_'})
        if cifs:
            cifs = cifs[0]
            if cifs['guest'] == user['username']:
                await self.middleware.call('datastore.update', 'services.cifs',
                                           cifs['id'], {'guest': 'nobody'},
                                           {'prefix': 'cifs_srv_'})

        await self.middleware.call('datastore.delete', 'account.bsdusers', pk)
        await self.middleware.call('service.reload', 'user')

        return pk

    @accepts(Int('user_id', default=None, null=True))
    def shell_choices(self, user_id=None):
        """
        Return the available shell choices to be used in `user.create` and `user.update`.

        If `user_id` is provided, shell choices are filtered to ensure the user can access the shell choices provided.
        """
        user = self.middleware.call_sync('user.get_instance',
                                         user_id) if user_id else None
        with open('/etc/shells', 'r') as f:
            shells = [x.rstrip() for x in f.readlines() if x.startswith('/')]
        return {
            shell: os.path.basename(shell)
            for shell in (shells + ['/usr/sbin/nologin'])
            if 'netcli' not in shell or (user and user['username'] == 'root')
        }

    @accepts(
        Dict('get_user_obj', Str('username', default=None),
             Int('uid', default=None)))
    async def get_user_obj(self, data):
        """
        Returns dictionary containing information from struct passwd for the user specified by either
        the username or uid. Bypasses user cache.
        """
        return await self.middleware.call('dscache.get_uncached_user',
                                          data['username'], data['uid'])

    @item_method
    @accepts(
        Int('id'),
        Str('key'),
        Any('value'),
    )
    async def set_attribute(self, pk, key, value):
        """
        Set user general purpose `attributes` dictionary `key` to `value`.

        e.g. Setting key="foo" value="var" will result in {"attributes": {"foo": "bar"}}
        """
        user = await self._get_instance(pk)

        user['attributes'][key] = value

        await self.middleware.call('datastore.update', 'account.bsdusers', pk,
                                   {'attributes': user['attributes']},
                                   {'prefix': 'bsdusr_'})

        return True

    @item_method
    @accepts(
        Int('id'),
        Str('key'),
    )
    async def pop_attribute(self, pk, key):
        """
        Remove user general purpose `attributes` dictionary `key`.
        """
        user = await self._get_instance(pk)

        if key in user['attributes']:
            user['attributes'].pop(key)

            await self.middleware.call('datastore.update', 'account.bsdusers',
                                       pk, {'attributes': user['attributes']},
                                       {'prefix': 'bsdusr_'})
            return True
        else:
            return False

    @accepts()
    async def get_next_uid(self):
        """
        Get the next available/free uid.
        """
        last_uid = 999
        for i in await self.middleware.call('datastore.query',
                                            'account.bsdusers',
                                            [('builtin', '=', False)], {
                                                'order_by': ['uid'],
                                                'prefix': 'bsdusr_'
                                            }):
            # If the difference between the last uid and the current one is
            # bigger than 1, it means we have a gap and can use it.
            if i['uid'] - last_uid > 1:
                return last_uid + 1
            last_uid = i['uid']
        return last_uid + 1

    @no_auth_required
    @accepts()
    async def has_root_password(self):
        """
        Return whether the root user has a valid password set.

        This is used when the system is installed without a password and must be set on
        first use/login.
        """
        return (await self.middleware.call(
            'datastore.query', 'account.bsdusers', [
                ('bsdusr_username', '=', 'root')
            ], {'get': True}))['bsdusr_unixhash'] != '*'

    @no_auth_required
    @accepts(Str('password'),
             Dict(
                 'options',
                 Dict(
                     'ec2',
                     Str('instance_id', required=True),
                 ),
                 update=True,
             ))
    @pass_app()
    async def set_root_password(self, app, password, options):
        """
        Set password for root user if it is not already set.
        """
        if not app.authenticated:
            if await self.middleware.call('user.has_root_password'):
                raise CallError(
                    'You cannot call this method anonymously if root already has a password',
                    errno.EACCES)

            if await self.middleware.call('system.environment') == 'EC2':
                if 'ec2' not in options:
                    raise CallError(
                        'You need to specify instance ID when setting initial root password on EC2 instance',
                        errno.EACCES,
                    )

                if options['ec2']['instance_id'] != await self.middleware.call(
                        'ec2.instance_id'):
                    raise CallError('Incorrect EC2 instance ID', errno.EACCES)

        root = await self.middleware.call('user.query',
                                          [('username', '=', 'root')],
                                          {'get': True})
        await self.middleware.call('user.update', root['id'],
                                   {'password': password})

    async def __common_validation(self, verrors, data, schema, pk=None):

        exclude_filter = [('id', '!=', pk)] if pk else []

        if 'username' in data:
            pw_checkname(verrors, f'{schema}.username', data['username'])

            if await self.middleware.call(
                    'datastore.query', 'account.bsdusers',
                [('username', '=', data['username'])] + exclude_filter,
                {'prefix': 'bsdusr_'}):
                verrors.add(
                    f'{schema}.username',
                    f'The username "{data["username"]}" already exists.',
                    errno.EEXIST)
            if data.get('smb'):
                smb_users = await self.middleware.call(
                    'datastore.query', 'account.bsdusers',
                    [('smb', '=', True)] + exclude_filter,
                    {'prefix': 'bsdusr_'})

                if any(
                        filter(
                            lambda x: data['username'].casefold() == x[
                                'username'].casefold(), smb_users)):
                    verrors.add(
                        f'{schema}.smb',
                        f'Username "{data["username"]}" conflicts with existing SMB user. Note that SMB '
                        f'usernames are case-insensitive.',
                        errno.EEXIST,
                    )

        password = data.get('password')
        if password and '?' in password:
            # See bug #4098
            verrors.add(
                f'{schema}.password',
                'An SMB issue prevents creating passwords containing a '
                'question mark (?).', errno.EINVAL)
        elif not pk and not password and not data.get('password_disabled'):
            verrors.add(f'{schema}.password', 'Password is required')
        elif data.get('password_disabled') and password:
            verrors.add(
                f'{schema}.password_disabled',
                'Leave "Password" blank when "Disable password login" is checked.'
            )

        if 'home' in data:
            if ':' in data['home']:
                verrors.add(f'{schema}.home',
                            '"Home Directory" cannot contain colons (:).')
            if data['home'] != '/nonexistent':
                if not data['home'].startswith('/mnt/'):
                    verrors.add(
                        f'{schema}.home',
                        '"Home Directory" must begin with /mnt/ or set to '
                        '/nonexistent.')
                elif not any(
                        data['home'] == i['path']
                        or data['home'].startswith(i['path'] + '/')
                        for i in await self.middleware.call('pool.query')):
                    verrors.add(
                        f'{schema}.home',
                        f'The path for the home directory "({data["home"]})" '
                        'must include a volume or dataset.')
                elif await self.middleware.call('filesystem.path_is_encrypted',
                                                data['home']):
                    verrors.add(
                        f'{schema}.home',
                        'Path component for "Home Directory" is currently encrypted and locked'
                    )

        if 'home_mode' in data:
            try:
                o = int(data['home_mode'], 8)
                assert o & 0o777 == o
            except (AssertionError, ValueError, TypeError):
                verrors.add(
                    f'{schema}.home_mode',
                    'Please provide a valid value for home_mode attribute')

        if 'groups' in data:
            groups = data.get('groups') or []
            if groups and len(groups) > 64:
                verrors.add(
                    f'{schema}.groups',
                    'A user cannot belong to more than 64 auxiliary groups.')

        if 'full_name' in data and ':' in data['full_name']:
            verrors.add(f'{schema}.full_name',
                        'The ":" character is not allowed in a "Full Name".')

        if 'shell' in data and data['shell'] not in await self.middleware.call(
                'user.shell_choices', pk):
            verrors.add(f'{schema}.shell', 'Please select a valid shell.')

    async def __set_password(self, data):
        if 'password' not in data:
            return
        password = data.pop('password')
        if password:
            data['unixhash'] = crypted_password(password)
            # See http://samba.org.ru/samba/docs/man/manpages/smbpasswd.5.html
            data[
                'smbhash'] = f'{data["username"]}:{data["uid"]}:{"X" * 32}:{nt_password(password)}:[U         ]:LCT-{int(time.time()):X}:'
        else:
            data['unixhash'] = '*'
            data['smbhash'] = '*'
        return password

    async def __set_smbpasswd(self, username):
        """
        This method will update or create an entry in samba's passdb.tdb file.
        Update will only happen if the account's nt_password has changed or
        if the account's 'locked' state has changed. Samba's passdb python
        library will raise an exception if a corresponding Unix user does not
        exist. That is the reason we have two methods/steps to set password.
        """
        await self.middleware.call('smb.update_passdb_user', username)

    async def __set_groups(self, pk, groups):

        groups = set(groups)
        existing_ids = set()
        for gm in await self.middleware.call('datastore.query',
                                             'account.bsdgroupmembership',
                                             [('user', '=', pk)],
                                             {'prefix': 'bsdgrpmember_'}):
            if gm['id'] not in groups:
                await self.middleware.call('datastore.delete',
                                           'account.bsdgroupmembership',
                                           gm['id'])
            else:
                existing_ids.add(gm['id'])

        for _id in groups - existing_ids:
            group = await self.middleware.call('datastore.query',
                                               'account.bsdgroups',
                                               [('id', '=', _id)],
                                               {'prefix': 'bsdgrp_'})
            if not group:
                raise CallError(f'Group {_id} not found', errno.ENOENT)
            await self.middleware.call('datastore.insert',
                                       'account.bsdgroupmembership', {
                                           'group': _id,
                                           'user': pk
                                       }, {'prefix': 'bsdgrpmember_'})

    @private
    async def update_sshpubkey(self, homedir, user, group):
        if 'sshpubkey' not in user:
            return
        if not os.path.isdir(homedir):
            return

        sshpath = f'hotexamples_com/.ssh'
        keysfile = f'{sshpath}/authorized_keys'
        gid = -1

        pubkey = user.get('sshpubkey') or ''
        pubkey = pubkey.strip()
        if pubkey == '':
            try:
                os.unlink(keysfile)
            except OSError:
                pass
            return

        oldpubkey = ''
        try:
            with open(keysfile, 'r') as f:
                oldpubkey = f.read().strip()
        except Exception:
            pass

        if pubkey == oldpubkey:
            return

        if not os.path.isdir(sshpath):
            os.mkdir(sshpath, mode=0o700)
        if not os.path.isdir(sshpath):
            raise CallError(f'{sshpath} is not a directory')

        # Make extra sure to enforce correct mode on .ssh directory.
        # stripping the ACL will allow subsequent chmod calls to succeed even if
        # dataset aclmode is restricted.
        try:
            gid = (await self.middleware.call('group.get_group_obj',
                                              {'groupname': group}))['gr_gid']
        except Exception:
            # leaving gid at -1 avoids altering the GID value.
            self.logger.debug("Failed to convert %s to gid",
                              group,
                              exc_info=True)

        await self.middleware.call(
            'filesystem.setperm', {
                'path': sshpath,
                'mode': str(700),
                'uid': user['uid'],
                'gid': gid,
                'options': {
                    'recursive': True,
                    'stripacl': True
                }
            })

        with open(keysfile, 'w') as f:
            f.write(pubkey)
            f.write('\n')
        await self.middleware.call('filesystem.setperm', {
            'path': keysfile,
            'mode': str(600)
        })
Пример #7
0
class ZFSDatasetService(CRUDService):
    class Config:
        namespace = 'zfs.dataset'
        private = True
        process_pool = True

    def locked_datasets(self, names=None):
        query_filters = []
        if names is not None:
            names_optimized = []
            for name in sorted(names, key=len):
                if not any(
                        name.startswith(f'{existing_name}/')
                        for existing_name in names_optimized):
                    names_optimized.append(name)

            query_filters.append(['id', 'in', names_optimized])

        result = self.flatten_datasets(
            self.query(
                query_filters,
                {
                    'extra': {
                        'flat': False,  # So child datasets are also queried
                        'properties':
                        ['encryption', 'keystatus', 'mountpoint']
                    },
                }))

        post_filters = [['encrypted', '=', True]]

        try:
            about_to_lock_dataset = self.middleware.call_sync(
                'cache.get', 'about_to_lock_dataset')
        except KeyError:
            about_to_lock_dataset = None

        post_filters.append([
            'OR', [['key_loaded', '=', False]] +
            ([['id', '=', about_to_lock_dataset],
              ['id', '^', f'{about_to_lock_dataset}/']]
             if about_to_lock_dataset else [])
        ])

        return [{
            'id':
            dataset['id'],
            'mountpoint':
            dataset['properties'].get('mountpoint', {}).get('value'),
        } for dataset in filter_list(result, post_filters)]

    def flatten_datasets(self, datasets):
        return sum(
            [[deepcopy(ds)] + self.flatten_datasets(ds.get('children') or [])
             for ds in datasets], [])

    @filterable
    def query(self, filters, options):
        """
        In `query-options` we can provide `extra` arguments which control which data should be retrieved
        for a dataset.

        `query-options.extra.snapshots` is a boolean which when set will retrieve snapshots for the dataset in question
        by adding a snapshots key to the dataset data.

        `query-options.extra.retrieve_children` is a boolean set to true by default. When set to true, will retrieve
        all children datasets which can cause a performance penalty. When set to false, will not retrieve children
        datasets which does not incur the performance penalty.

        `query-options.extra.properties` is a list of properties which should be retrieved. If null ( by default ),
        it would retrieve all properties, if empty, it will retrieve no property.

        We provide 2 ways how zfs.dataset.query returns dataset's data. First is a flat structure ( default ), which
        means that all the datasets in the system are returned as separate objects which also contain all the data
        their is for their children. This retrieval type is slightly slower because of duplicates which exist in
        each object.
        Second type is hierarchical where only top level datasets are returned in the list and they contain all the
        children there are for them in `children` key. This retrieval type is slightly faster.
        These options are controlled by `query-options.extra.flat` attribute which defaults to true.

        `query-options.extra.user_properties` controls if user defined properties of datasets should be retrieved
        or not.

        While we provide a way to exclude all properties from data retrieval, we introduce a single attribute
        `query-options.extra.retrieve_properties` which if set to false will make sure that no property is retrieved
        whatsoever and overrides any other property retrieval attribute.
        """
        options = options or {}
        extra = options.get('extra', {}).copy()
        props = extra.get('properties', None)
        flat = extra.get('flat', True)
        user_properties = extra.get('user_properties', True)
        retrieve_properties = extra.get('retrieve_properties', True)
        retrieve_children = extra.get('retrieve_children', True)
        snapshots = extra.get('snapshots')
        snapshots_recursive = extra.get('snapshots_recursive')
        snapshots_properties = extra.get('snapshots_properties', [])
        if not retrieve_properties:
            # This is a short hand version where consumer can specify that they don't want any property to
            # be retrieved
            user_properties = False
            props = []

        with libzfs.ZFS() as zfs:
            # Handle `id` filter specially to avoiding getting all datasets
            kwargs = dict(props=props,
                          user_props=user_properties,
                          snapshots=snapshots,
                          retrieve_children=retrieve_children,
                          snapshots_recursive=snapshots_recursive,
                          snapshot_props=snapshots_properties)
            if filters and filters[0][0] == 'id':
                if filters[0][1] == '=':
                    kwargs['datasets'] = [filters[0][2]]
                if filters[0][1] == 'in':
                    kwargs['datasets'] = filters[0][2]

            datasets = zfs.datasets_serialized(**kwargs)
            if flat:
                datasets = self.flatten_datasets(datasets)
            else:
                datasets = list(datasets)

        return filter_list(datasets, filters, options)

    def query_for_quota_alert(self):
        return [{
            k: v
            for k, v in dataset['properties'].items() if k in [
                "name", "quota", "available", "refquota", "usedbydataset",
                "mounted", "mountpoint", "org.freenas:quota_warning",
                "org.freenas:quota_critical", "org.freenas:refquota_warning",
                "org.freenas:refquota_critical"
            ]
        } for dataset in self.query()]

    def common_load_dataset_checks(self, ds):
        self.common_encryption_checks(ds)
        if ds.key_loaded:
            raise CallError(f'{id} key is already loaded')

    def common_encryption_checks(self, ds):
        if not ds.encrypted:
            raise CallError(f'{id} is not encrypted')

    def path_to_dataset(self, path):
        with libzfs.ZFS() as zfs:
            try:
                zh = zfs.get_dataset_by_path(path)
                ds_name = zh.name
            except libzfs.ZFSException:
                ds_name = None

        return ds_name

    def get_quota(self, ds, quota_type):
        if quota_type == 'dataset':
            dataset = self.query([('id', '=', ds)], {'get': True})
            return [{
                'quota_type':
                'DATASET',
                'id':
                ds,
                'name':
                ds,
                'quota':
                int(dataset['properties']['quota']['rawvalue']),
                'refquota':
                int(dataset['properties']['refquota']['rawvalue']),
                'used_bytes':
                int(dataset['properties']['used']['rawvalue']),
            }]

        quota_list = []
        quota_get = subprocess.run(
            [
                'zfs', f'{quota_type}space', '-H', '-n', '-p', '-o',
                'name,used,quota,objquota,objused', ds
            ],
            capture_output=True,
            check=False,
        )
        if quota_get.returncode != 0:
            raise CallError(
                f'Failed to get {quota_type} quota for {ds}: [{quota_get.stderr.decode()}]'
            )

        for quota in quota_get.stdout.decode().splitlines():
            m = quota.split('\t')
            if len(m) != 5:
                self.logger.debug('Invalid %s quota: %s', quota_type.lower(),
                                  quota)
                continue

            entry = {
                'quota_type': quota_type.upper(),
                'id': int(m[0]),
                'name': None,
                'quota': int(m[2]),
                'used_bytes': int(m[1]),
                'used_percent': 0,
                'obj_quota': int(m[3]) if m[3] != '-' else 0,
                'obj_used': int(m[4]) if m[4] != '-' else 0,
                'obj_used_percent': 0,
            }
            if entry['quota'] > 0:
                entry['used_percent'] = entry['used_bytes'] / entry[
                    'quota'] * 100

            if entry['obj_quota'] > 0:
                entry['obj_used_percent'] = entry['obj_used'] / entry[
                    'obj_quota'] * 100

            try:
                if entry['quota_type'] == 'USER':
                    entry['name'] = (self.middleware.call_sync(
                        'user.get_user_obj', {'uid': entry['id']}))['pw_name']
                else:
                    entry['name'] = (self.middleware.call_sync(
                        'group.get_group_obj',
                        {'gid': entry['id']}))['gr_name']

            except Exception:
                self.logger.debug('Unable to resolve %s id %d to name',
                                  quota_type.lower(), entry['id'])
                pass

            quota_list.append(entry)

        return quota_list

    def set_quota(self, ds, quota_list):
        cmd = ['zfs', 'set']
        cmd.extend(quota_list)
        cmd.append(ds)
        quota_set = subprocess.run(cmd, check=False)
        if quota_set.returncode != 0:
            raise CallError(
                f'Failed to set userspace quota on {ds}: [{quota_set.stderr.decode()}]'
            )

    @accepts(
        Str('id'),
        Dict(
            'load_key_options',
            Bool('mount', default=True),
            Bool('recursive', default=False),
            Any('key', default=None, null=True),
            Str('key_location', default=None, null=True),
        ),
    )
    def load_key(self, id, options):
        mount_ds = options.pop('mount')
        recursive = options.pop('recursive')
        try:
            with libzfs.ZFS() as zfs:
                ds = zfs.get_dataset(id)
                self.common_load_dataset_checks(ds)
                ds.load_key(**options)
        except libzfs.ZFSException as e:
            self.logger.error(f'Failed to load key for {id}', exc_info=True)
            raise CallError(f'Failed to load key for {id}: {e}')
        else:
            if mount_ds:
                self.mount(id, {'recursive': recursive})

    @accepts(Str('name'), List('params', private=True))
    @job()
    def bulk_process(self, job, name, params):
        f = getattr(self, name, None)
        if not f:
            raise CallError(f'{name} method not found in zfs.dataset')

        statuses = []
        for i in params:
            result = error = None
            try:
                result = f(*i)
            except Exception as e:
                error = str(e)
            finally:
                statuses.append({'result': result, 'error': error})

        return statuses

    @accepts(Str('id'),
             Dict(
                 'check_key',
                 Any('key', default=None, null=True),
                 Str('key_location', default=None, null=True),
             ))
    def check_key(self, id, options):
        """
        Returns `true` if the `key` is valid, `false` otherwise.
        """
        try:
            with libzfs.ZFS() as zfs:
                ds = zfs.get_dataset(id)
                self.common_encryption_checks(ds)
                return ds.check_key(**options)
        except libzfs.ZFSException as e:
            self.logger.error(f'Failed to check key for {id}', exc_info=True)
            raise CallError(f'Failed to check key for {id}: {e}')

    @accepts(Str('id'),
             Dict(
                 'unload_key_options',
                 Bool('recursive', default=False),
                 Bool('force_umount', default=False),
                 Bool('umount', default=False),
             ))
    def unload_key(self, id, options):
        force = options.pop('force_umount')
        if options.pop('umount') and self.middleware.call_sync(
                'zfs.dataset.query', [['id', '=', id]], {
                    'extra': {
                        'retrieve_children': False
                    },
                    'get': True
                })['properties'].get('mountpoint', {}).get('value',
                                                           'none') != 'none':
            self.umount(id, {'force': force})
        try:
            with libzfs.ZFS() as zfs:
                ds = zfs.get_dataset(id)
                self.common_encryption_checks(ds)
                if not ds.key_loaded:
                    raise CallError(f'{id}\'s key is not loaded')
                ds.unload_key(**options)
        except libzfs.ZFSException as e:
            self.logger.error(f'Failed to unload key for {id}', exc_info=True)
            raise CallError(f'Failed to unload key for {id}: {e}')

    @accepts(
        Str('id'),
        Dict(
            'change_key_options',
            Dict('encryption_properties', Str('keyformat'), Str('keylocation'),
                 Int('pbkdf2iters')),
            Bool('load_key', default=True),
            Any('key', default=None, null=True),
        ),
    )
    def change_key(self, id, options):
        try:
            with libzfs.ZFS() as zfs:
                ds = zfs.get_dataset(id)
                self.common_encryption_checks(ds)
                ds.change_key(props=options['encryption_properties'],
                              load_key=options['load_key'],
                              key=options['key'])
        except libzfs.ZFSException as e:
            self.logger.error(f'Failed to change key for {id}', exc_info=True)
            raise CallError(f'Failed to change key for {id}: {e}')

    @accepts(Str('id'),
             Dict(
                 'change_encryption_root_options',
                 Bool('load_key', default=True),
             ))
    def change_encryption_root(self, id, options):
        try:
            with libzfs.ZFS() as zfs:
                ds = zfs.get_dataset(id)
                ds.change_key(load_key=options['load_key'], inherit=True)
        except libzfs.ZFSException as e:
            raise CallError(f'Failed to change encryption root for {id}: {e}')

    @accepts(
        Dict(
            'dataset_create',
            Bool('create_ancestors', default=False),
            Str('name', required=True),
            Str('type', enum=['FILESYSTEM', 'VOLUME'], default='FILESYSTEM'),
            Dict(
                'properties',
                Bool('sparse'),
                additional_attrs=True,
            ),
        ))
    def do_create(self, data):
        """
        Creates a ZFS dataset.
        """

        verrors = ValidationErrors()

        if '/' not in data['name']:
            verrors.add('name', 'You need a full name, e.g. pool/newdataset')

        if verrors:
            raise verrors

        properties = data.get('properties') or {}
        sparse = properties.pop('sparse', False)
        params = {}

        for k, v in data['properties'].items():
            params[k] = v

        # it's important that we set xattr=sa for various
        # performance reasons related to ea handling
        # pool.dataset.create already sets this by default
        # so mirror the behavior here
        if data['type'] == 'FILESYSTEM' and 'xattr' not in params:
            params['xattr'] = 'sa'

        try:
            with libzfs.ZFS() as zfs:
                pool = zfs.get(data['name'].split('/')[0])
                pool.create(
                    data['name'],
                    params,
                    fstype=getattr(libzfs.DatasetType, data['type']),
                    sparse_vol=sparse,
                    create_ancestors=data['create_ancestors'],
                )
        except libzfs.ZFSException as e:
            self.logger.error('Failed to create dataset', exc_info=True)
            raise CallError(f'Failed to create dataset: {e}')
        else:
            return data

    @accepts(
        Str('id'),
        Dict(
            'dataset_update',
            Dict(
                'properties',
                additional_attrs=True,
            ),
        ),
    )
    def do_update(self, id, data):
        try:
            with libzfs.ZFS() as zfs:
                dataset = zfs.get_dataset(id)

                if 'properties' in data:
                    properties = data['properties'].copy()
                    # Set these after reservations
                    for k in ['quota', 'refquota']:
                        if k in properties:
                            properties[k] = properties.pop(k)  # Set them last
                    self.update_zfs_object_props(properties, dataset)

        except libzfs.ZFSException as e:
            self.logger.error('Failed to update dataset', exc_info=True)
            raise CallError(f'Failed to update dataset: {e}')
        else:
            return data

    def update_zfs_object_props(self, properties, zfs_object):
        for k, v in properties.items():
            # If prop already exists we just update it,
            # otherwise create a user property
            prop = zfs_object.properties.get(k)
            try:
                if prop:
                    if v.get('source') == 'INHERIT':
                        prop.inherit(recursive=v.get('recursive', False))
                    elif 'value' in v and (prop.value != v['value']
                                           or prop.source.name == 'INHERITED'):
                        prop.value = v['value']
                    elif 'parsed' in v and (prop.parsed != v['parsed'] or
                                            prop.source.name == 'INHERITED'):
                        prop.parsed = v['parsed']
                else:
                    if v.get('source') == 'INHERIT':
                        pass
                    else:
                        if 'value' not in v:
                            raise ValidationError(
                                'properties',
                                f'properties.{k} needs a "value" attribute')
                        if ':' not in k:
                            raise ValidationError(
                                'properties',
                                f'User property needs a colon (:) in its name`'
                            )
                        prop = libzfs.ZFSUserProperty(v['value'])
                        zfs_object.properties[k] = prop
            except libzfs.ZFSException as e:
                raise ZFSSetPropertyError(k, str(e))

    @accepts(Str('id'),
             Dict(
                 'options',
                 Bool('force', default=False),
                 Bool('recursive', default=False),
             ))
    def do_delete(self, id, options):
        force = options['force']
        recursive = options['recursive']

        args = []
        if force:
            args += ['-f']
        if recursive:
            args += ['-r']

        # If dataset is mounted and has receive_resume_token, we should destroy it or ZFS will say
        # "cannot destroy 'pool/dataset': dataset already exists"
        recv_run = subprocess.run(['zfs', 'recv', '-A', id],
                                  stdout=subprocess.DEVNULL,
                                  stderr=subprocess.DEVNULL)
        # Destroying may take a long time, lets not use py-libzfs as it will block
        # other ZFS operations.
        try:
            subprocess.run(
                ['zfs', 'destroy'] + args + [id],
                text=True,
                capture_output=True,
                check=True,
            )
        except subprocess.CalledProcessError as e:
            if recv_run.returncode == 0 and e.stderr.strip().endswith(
                    'dataset does not exist'):
                # This operation might have deleted this dataset if it was created by `zfs recv` operation
                return
            self.logger.error('Failed to delete dataset', exc_info=True)
            error = e.stderr.strip()
            errno_ = errno.EFAULT
            if "Device busy" in error or "dataset is busy" in error:
                errno_ = errno.EBUSY
            raise CallError(f'Failed to delete dataset: {error}', errno_)
        return True

    @accepts(Str('name'), Dict('options', Bool('recursive', default=False)))
    def mount(self, name, options):
        try:
            with libzfs.ZFS() as zfs:
                dataset = zfs.get_dataset(name)
                if options['recursive']:
                    dataset.mount_recursive()
                else:
                    dataset.mount()
        except libzfs.ZFSException as e:
            self.logger.error('Failed to mount dataset', exc_info=True)
            raise CallError(f'Failed to mount dataset: {e}')

    @accepts(Str('name'), Dict('options', Bool('force', default=False)))
    def umount(self, name, options):
        try:
            with libzfs.ZFS() as zfs:
                dataset = zfs.get_dataset(name)
                dataset.umount(force=options['force'])
        except libzfs.ZFSException as e:
            self.logger.error('Failed to umount dataset', exc_info=True)
            raise CallError(f'Failed to umount dataset: {e}')

    @accepts(Str('dataset'),
             Dict('options', Str('new_name', required=True, empty=False),
                  Bool('recursive', default=False)))
    def rename(self, name, options):
        try:
            with libzfs.ZFS() as zfs:
                dataset = zfs.get_dataset(name)
                dataset.rename(options['new_name'],
                               recursive=options['recursive'])
        except libzfs.ZFSException as e:
            self.logger.error('Failed to rename dataset', exc_info=True)
            raise CallError(f'Failed to rename dataset: {e}')

    def promote(self, name):
        try:
            with libzfs.ZFS() as zfs:
                dataset = zfs.get_dataset(name)
                dataset.promote()
        except libzfs.ZFSException as e:
            self.logger.error('Failed to promote dataset', exc_info=True)
            raise CallError(f'Failed to promote dataset: {e}')

    def inherit(self, name, prop, recursive=False):
        try:
            with libzfs.ZFS() as zfs:
                dataset = zfs.get_dataset(name)
                zprop = dataset.properties.get(prop)
                if not zprop:
                    raise CallError(f'Property {prop!r} not found.',
                                    errno.ENOENT)
                zprop.inherit(recursive=recursive)
        except libzfs.ZFSException as e:
            raise CallError(str(e))
Пример #8
0
class UserService(CRUDService):
    class Config:
        datastore = 'account.bsdusers'
        datastore_extend = 'user.user_extend'
        datastore_prefix = 'bsdusr_'

    @private
    async def user_extend(self, user):

        # Get group membership
        user['groups'] = [
            gm['group']['id'] for gm in await self.middleware.call(
                'datastore.query', 'account.bsdgroupmembership', [(
                    'user', '=', user['id'])], {'prefix': 'bsdgrpmember_'})
        ]

        # Get authorized keys
        keysfile = f'{user["home"]}/.ssh/authorized_keys'
        user['sshpubkey'] = None
        if os.path.exists(keysfile):
            try:
                with open(keysfile, 'r') as f:
                    user['sshpubkey'] = f.read()
            except Exception:
                pass
        return user

    @accepts(
        Dict(
            'user_create',
            Int('uid'),
            Str('username', required=True),
            Int('group'),
            Bool('group_create', default=False),
            Str('home', default='/nonexistent'),
            Str('home_mode', default='755'),
            Str('shell', default='/bin/csh'),
            Str('full_name', required=True),
            Str('email', validators=[Email()], null=True, default=None),
            Str('password', private=True),
            Bool('password_disabled', default=False),
            Bool('locked', default=False),
            Bool('microsoft_account', default=False),
            Bool('sudo', default=False),
            Str('sshpubkey', null=True),
            List('groups', default=[]),
            Dict('attributes', additional_attrs=True),
            register=True,
        ))
    async def do_create(self, data):
        """
        Create a new user.

        If `uid` is not provided it is automatically filled with the next one available.

        `group` is required if `group_create` is false.

        `password` is required if `password_disabled` is false.

        Available choices for `shell` can be retrieved with `user.shell_choices`.

        `attributes` is a general-purpose object for storing arbitrary user information.
        """
        verrors = ValidationErrors()

        if (not data.get('group') and not data.get('group_create')) or (
                data.get('group') is not None and data.get('group_create')):
            verrors.add(
                'user_create.group',
                f'Enter either a group name or create a new group to '
                'continue.', errno.EINVAL)

        await self.__common_validation(verrors, data, 'user_create')

        if data.get('sshpubkey') and not data['home'].startswith('/mnt'):
            verrors.add(
                'user_create.sshpubkey',
                'The home directory is not writable. Leave this field blank.')

        verrors.check()

        groups = data.pop('groups')
        create = data.pop('group_create')

        if create:
            group = await self.middleware.call(
                'group.query', [('group', '=', data['username'])])
            if group:
                group = group[0]
            else:
                group = await self.middleware.call('group.create',
                                                   {'name': data['username']})
                group = (await self.middleware.call('group.query',
                                                    [('id', '=', group)]))[0]
            data['group'] = group['id']
        else:
            group = await self.middleware.call('group.query',
                                               [('id', '=', data['group'])])
            if not group:
                raise CallError(f'Group {data["group"]} not found')
            group = group[0]

        # Is this a new directory or not? Let's not nuke existing directories,
        # e.g. /, /root, /mnt/tank/my-dataset, etc ;).
        new_homedir = False
        home_mode = data.pop('home_mode')
        if data['home'] and data['home'] != '/nonexistent':
            try:
                try:
                    os.makedirs(data['home'], mode=int(home_mode, 8))
                    new_homedir = True
                    os.chown(data['home'], data['uid'], group['gid'])
                except FileExistsError:
                    if not os.path.isdir(data['home']):
                        raise CallError(
                            'Path for home directory already '
                            'exists and is not a directory', errno.EEXIST)

                    # If it exists, ensure the user is owner
                    os.chown(data['home'], data['uid'], group['gid'])
                except OSError as oe:
                    raise CallError('Failed to create the home directory '
                                    f'({data["home"]}) for user: {oe}')
                if os.stat(data['home']).st_dev == os.stat('/mnt').st_dev:
                    raise CallError(
                        f'The path for the home directory "({data["home"]})" '
                        'must include a volume or dataset.')
            except Exception:
                if new_homedir:
                    shutil.rmtree(data['home'])
                raise

        if not data.get('uid'):
            data['uid'] = await self.get_next_uid()

        pk = None  # Make sure pk exists to rollback in case of an error
        try:
            password = await self.__set_password(data)
            sshpubkey = data.pop('sshpubkey',
                                 None)  # datastore does not have sshpubkey

            pk = await self.middleware.call('datastore.insert',
                                            'account.bsdusers', data,
                                            {'prefix': 'bsdusr_'})

            await self.__set_groups(pk, groups)

        except Exception:
            if pk is not None:
                await self.middleware.call('datastore.delete',
                                           'account.bsdusers', pk)
            if new_homedir:
                # Be as atomic as possible when creating the user if
                # commands failed to execute cleanly.
                shutil.rmtree(data['home'])
            raise

        await self.middleware.call('service.reload', 'user')

        await self.__set_smbpasswd(data['username'], password)

        if os.path.exists(data['home']):
            for f in os.listdir(SKEL_PATH):
                if f.startswith('dot'):
                    dest_file = os.path.join(data['home'], f[3:])
                else:
                    dest_file = os.path.join(data['home'], f)
                if not os.path.exists(dest_file):
                    shutil.copyfile(os.path.join(SKEL_PATH, f), dest_file)
                    os.chown(dest_file, data['uid'], group['gid'])

            data['sshpubkey'] = sshpubkey
            try:
                await self.__update_sshpubkey(data['home'], data,
                                              group['group'])
            except PermissionError as e:
                self.logger.warn('Failed to update authorized keys',
                                 exc_info=True)
                raise CallError(f'Failed to update authorized keys: {e}')

        return pk

    @accepts(
        Int('id'),
        Patch(
            'user_create',
            'user_update',
            ('attr', {
                'update': True
            }),
            ('rm', {
                'name': 'group_create'
            }),
        ),
    )
    async def do_update(self, pk, data):

        user = await self._get_instance(pk)

        verrors = ValidationErrors()

        if 'group' in data:
            group = await self.middleware.call('datastore.query',
                                               'account.bsdgroups',
                                               [('id', '=', data['group'])])
            if not group:
                verrors.add('user_update.group',
                            f'Group {data["group"]} not found', errno.ENOENT)
            group = group[0]
        else:
            group = user['group']
            user['group'] = group['id']

        await self.__common_validation(verrors, data, 'user_update', pk=pk)

        home = data.get('home') or user['home']
        # root user (uid 0) is an exception to the rule
        if data.get('sshpubkey'
                    ) and not home.startswith('/mnt') and user['uid'] != 0:
            verrors.add('user_update.sshpubkey',
                        'Home directory is not writable, leave this blank"')

        # Do not allow attributes to be changed for builtin user
        if user['builtin']:
            for i in ('group', 'home', 'home_mode', 'uid', 'username'):
                if i in data:
                    verrors.add(f'user_update.{i}',
                                'This attribute cannot be changed')

        verrors.check()

        # Copy the home directory if it changed
        if ('home' in data
                and data['home'] not in (user['home'], '/nonexistent')
                and not data['home'].startswith(f'{user["home"]}/')):
            home_copy = True
            home_old = user['home']
        else:
            home_copy = False

        # After this point user dict has values from data
        user.update(data)

        if home_copy and not os.path.isdir(user['home']):
            try:
                os.makedirs(user['home'])
                os.chown(user['home'], user['uid'], group['bsdgrp_gid'])
            except OSError:
                self.logger.warn('Failed to chown homedir', exc_info=True)
            if not os.path.isdir(user['home']):
                raise CallError(f'{user["home"]} is not a directory')

        home_mode = user.pop('home_mode', None)
        if user['builtin']:
            home_mode = None

        def set_home_mode():
            if home_mode is not None:
                try:
                    os.chmod(user['home'], int(home_mode, 8))
                except OSError:
                    self.logger.warn('Failed to set homedir mode',
                                     exc_info=True)

        try:
            await self.__update_sshpubkey(
                home_old if home_copy else user['home'],
                user,
                group['bsdgrp_group'],
            )
        except PermissionError as e:
            self.logger.warn('Failed to update authorized keys', exc_info=True)
            raise CallError(f'Failed to update authorized keys: {e}')

        if home_copy:

            def do_home_copy():
                try:
                    subprocess.run(
                        f"/usr/bin/su - {user['username']} -c '/bin/cp -a {home_old}/ {user['home']}/'",
                        shell=True,
                        check=True)
                except subprocess.CalledProcessError as e:
                    self.logger.warn(f"Failed to copy homedir: {e}")
                set_home_mode()

            asyncio.ensure_future(self.middleware.run_in_thread(do_home_copy))
        else:
            set_home_mode()

        user.pop('sshpubkey', None)
        password = await self.__set_password(user)

        if 'groups' in user:
            groups = user.pop('groups')
            await self.__set_groups(pk, groups)

        await self.middleware.call('datastore.update', 'account.bsdusers', pk,
                                   user, {'prefix': 'bsdusr_'})

        await self.middleware.call('service.reload', 'user')

        await self.__set_smbpasswd(user['username'], password)

        return pk

    @accepts(Int('id'), Dict('options', Bool('delete_group', default=True)))
    async def do_delete(self, pk, options=None):
        """
        Delete user `id`.

        The `delete_group` option deletes the user primary group if it is not being used by
        any other user.
        """

        user = await self._get_instance(pk)

        if user['builtin']:
            raise CallError('Cannot delete a built-in user', errno.EINVAL)

        if options['delete_group'] and not user['group']['bsdgrp_builtin']:
            count = await self.middleware.call(
                'datastore.query', 'account.bsdgroupmembership',
                [('group', '=', user['group']['id'])], {
                    'prefix': 'bsdgrpmember_',
                    'count': True
                })
            count2 = await self.middleware.call(
                'datastore.query', 'account.bsdusers',
                [('group', '=', user['group']['id']), ('id', '!=', pk)], {
                    'prefix': 'bsdusr_',
                    'count': True
                })
            if count == 0 and count2 == 0:
                try:
                    await self.middleware.call('group.delete',
                                               user['group']['id'])
                except Exception:
                    self.logger.warn(
                        f'Failed to delete primary group of {user["username"]}',
                        exc_info=True)

        await run('smbpasswd', '-x', user['username'], check=False)

        # TODO: add a hook in CIFS service
        cifs = await self.middleware.call('datastore.query', 'services.cifs',
                                          [], {'prefix': 'cifs_srv_'})
        if cifs:
            cifs = cifs[0]
            if cifs['guest'] == user['username']:
                await self.middleware.call('datastore.update', 'services.cifs',
                                           cifs['id'], {'guest': 'nobody'},
                                           {'prefix': 'cifs_srv_'})

        await self.middleware.call('datastore.delete', 'account.bsdusers', pk)
        await self.middleware.call('service.reload', 'user')

        return pk

    @accepts()
    def shell_choices(self):
        """
        Return the available shell choices to be used in `user.create` and `user.update`.
        """
        with open('/etc/shells', 'r') as f:
            shells = [x.rstrip() for x in f.readlines() if x.startswith('/')]
        return {
            shell: os.path.basename(shell)
            for shell in shells + ['/usr/sbin/nologin']
        }

    @item_method
    @accepts(
        Int('id'),
        Str('key'),
        Any('value'),
    )
    async def set_attribute(self, pk, key, value):
        """
        Set user general purpose `attributes` dictionary `key` to `value`.

        e.g. Setting key="foo" value="var" will result in {"attributes": {"foo": "bar"}}
        """
        user = await self._get_instance(pk)

        user['attributes'][key] = value

        await self.middleware.call('datastore.update', 'account.bsdusers', pk,
                                   {'attributes': user['attributes']},
                                   {'prefix': 'bsdusr_'})

        return True

    @item_method
    @accepts(
        Int('id'),
        Str('key'),
    )
    async def pop_attribute(self, pk, key):
        """
        Remove user general purpose `attributes` dictionary `key`.
        """
        user = await self._get_instance(pk)

        if key in user['attributes']:
            user['attributes'].pop(key)

            await self.middleware.call('datastore.update', 'account.bsdusers',
                                       pk, {'attributes': user['attributes']},
                                       {'prefix': 'bsdusr_'})
            return True
        else:
            return False

    @accepts()
    async def get_next_uid(self):
        """
        Get the next available/free uid.
        """
        last_uid = 999
        for i in await self.middleware.call('datastore.query',
                                            'account.bsdusers',
                                            [('builtin', '=', False)], {
                                                'order_by': ['uid'],
                                                'prefix': 'bsdusr_'
                                            }):
            # If the difference between the last uid and the current one is
            # bigger than 1, it means we have a gap and can use it.
            if i['uid'] - last_uid > 1:
                return last_uid + 1
            last_uid = i['uid']
        return last_uid + 1

    @no_auth_required
    @accepts()
    async def has_root_password(self):
        """
        Return whether the root user has a valid password set.

        This is used when the system is installed without a password and must be set on
        first use/login.
        """
        return (await self.middleware.call(
            'datastore.query', 'account.bsdusers', [
                ('bsdusr_username', '=', 'root')
            ], {'get': True}))['bsdusr_unixhash'] != '*'

    @no_auth_required
    @accepts(Str('password'))
    @pass_app
    async def set_root_password(self, app, password):
        """
        Set password for root user if it is not already set.
        """
        if not app.authenticated and await self.middleware.call(
                'user.has_root_password'):
            raise CallError(
                'You cannot call this method anonymously if root already has a password',
                errno.EACCES)

        root = await self.middleware.call('user.query',
                                          [('username', '=', 'root')],
                                          {'get': True})
        await self.middleware.call('user.update', root['id'],
                                   {'password': password})

    async def __common_validation(self, verrors, data, schema, pk=None):

        exclude_filter = [('id', '!=', pk)] if pk else []

        if 'username' in data:
            pw_checkname(verrors, f'{schema}.username', data['username'])

            if await self.middleware.call(
                    'datastore.query', 'account.bsdusers',
                [('username', '=', data['username'])] + exclude_filter,
                {'prefix': 'bsdusr_'}):
                verrors.add(
                    f'{schema}.username',
                    f'The username "{data["username"]}" already exists.',
                    errno.EEXIST)

        password = data.get('password')
        if password and '?' in password:
            # See bug #4098
            verrors.add(
                f'{schema}.password',
                'An SMB issue prevents creating passwords containing a '
                'question mark (?).', errno.EINVAL)
        elif not pk and not password and not data.get('password_disabled'):
            verrors.add(f'{schema}.password', 'Password is required')
        elif data.get('password_disabled') and password:
            verrors.add(
                f'{schema}.password_disabled',
                'Leave "Password" blank when "Disable password login" is checked.'
            )

        if 'home' in data:
            if ':' in data['home']:
                verrors.add(f'{schema}.home',
                            '"Home Directory" cannot contain colons (:).')
            if not data['home'].startswith(
                    '/mnt/') and data['home'] != '/nonexistent':
                verrors.add(
                    f'{schema}.home',
                    '"Home Directory" must begin with /mnt/ or set to '
                    '/nonexistent.')

        if 'home_mode' in data:
            try:
                o = int(data['home_mode'], 8)
                assert o & 0o777 == o
            except (AssertionError, ValueError, TypeError):
                verrors.add(
                    f'{schema}.home_mode',
                    'Please provide a valid value for home_mode attribute')

        if 'groups' in data:
            groups = data.get('groups') or []
            if groups and len(groups) > 64:
                verrors.add(
                    f'{schema}.groups',
                    'A user cannot belong to more than 64 auxiliary groups.')

        if 'full_name' in data and ':' in data['full_name']:
            verrors.add(f'{schema}.full_name',
                        'The ":" character is not allowed in a "Full Name".')

    async def __set_password(self, data):
        if 'password' not in data:
            return
        password = data.pop('password')
        if password:
            data['unixhash'] = crypted_password(password)
            # See http://samba.org.ru/samba/docs/man/manpages/smbpasswd.5.html
            data[
                'smbhash'] = f'{data["username"]}:{data["uid"]}:{"X" * 32}:{nt_password(password)}:[U          ]:LCT-{int(time.time()):X}:'
        else:
            data['unixhash'] = '*'
            data['smbhash'] = '*'
        return password

    async def __set_smbpasswd(self, username, password):
        """
        Currently the way we set samba passwords is using smbpasswd
        and that can only happen after the user exists in master.passwd.
        That is the reason we have two methods/steps to set password.
        """
        if not password:
            return
        proc = await Popen(['smbpasswd', '-D', '0', '-s', '-a', username],
                           stdout=subprocess.PIPE,
                           stderr=subprocess.PIPE,
                           stdin=subprocess.PIPE)
        await proc.communicate(input=f'{password}\n{password}\n'.encode())

    async def __set_groups(self, pk, groups):

        groups = set(groups)
        existing_ids = set()
        for gm in await self.middleware.call('datastore.query',
                                             'account.bsdgroupmembership',
                                             [('user', '=', pk)],
                                             {'prefix': 'bsdgrpmember_'}):
            if gm['id'] not in groups:
                await self.middleware.call('datastore.delete',
                                           'account.bsdgroupmembership',
                                           gm['id'])
            else:
                existing_ids.add(gm['id'])

        for _id in groups - existing_ids:
            group = await self.middleware.call('datastore.query',
                                               'account.bsdgroups',
                                               [('id', '=', _id)],
                                               {'prefix': 'bsdgrp_'})
            if not group:
                raise CallError(f'Group {_id} not found', errno.ENOENT)
            await self.middleware.call('datastore.insert',
                                       'account.bsdgroupmembership', {
                                           'group': _id,
                                           'user': pk
                                       }, {'prefix': 'bsdgrpmember_'})

    async def __update_sshpubkey(self, homedir, user, group):
        if 'sshpubkey' not in user:
            return
        if not os.path.isdir(homedir):
            return

        sshpath = f'hotexamples_com/.ssh'
        keysfile = f'{sshpath}/authorized_keys'

        pubkey = user.get('sshpubkey') or ''
        pubkey = pubkey.strip()
        if pubkey == '':
            try:
                os.unlink(keysfile)
            except OSError:
                pass
            return

        oldpubkey = ''
        try:
            with open(keysfile, 'r') as f:
                oldpubkey = f.read().strip()
        except Exception:
            pass

        if pubkey == oldpubkey:
            return

        if not os.path.isdir(sshpath):
            os.mkdir(sshpath, mode=0o700)
        if not os.path.isdir(sshpath):
            raise CallError(f'{sshpath} is not a directory')
        with open(keysfile, 'w') as f:
            f.write(pubkey)
            f.write('\n')
        os.chmod(keysfile, 0o600)
        await run('/usr/sbin/chown',
                  '-R',
                  f'{user["username"]}:{group}',
                  sshpath,
                  check=False)
Пример #9
0
class DatastoreService(Service):
    class Config:
        private = True

    def _filters_to_queryset(self, filters, field_prefix=None):
        opmap = {
            '=': 'exact',
            '!=': 'exact',
            '>': 'gt',
            '>=': 'gte',
            '<': 'lt',
            '<=': 'lte',
            '~': 'regex',
            'in': 'in',
            'nin': 'in',
        }

        rv = []
        for f in filters:
            if not isinstance(f, (list, tuple)):
                raise ValueError('Filter must be a list: {0}'.format(f))
            if len(f) == 3:
                name, op, value = f
                # id is special
                if field_prefix and name != 'id':
                    name = field_prefix + name
                if op not in opmap:
                    raise Exception("Invalid operation: {0}".format(op))
                q = Q(**{'{0}__{1}'.format(name, opmap[op]): value})
                if op in ('!=', 'nin'):
                    q.negate()
                rv.append(q)
            elif len(f) == 2:
                op, value = f
                if op == 'OR':
                    or_value = None
                    for value in self._filters_to_queryset(
                            value, field_prefix=field_prefix):
                        if or_value is None:
                            or_value = value
                        else:
                            or_value |= value
                    rv.append(or_value)
                else:
                    raise ValueError('Invalid operation: {0}'.format(op))
            else:
                raise Exception("Invalid filter {0}".format(f))
        return rv

    def __get_model(self, name):
        """Helper method to get Model for given name
        e.g. network.interfaces -> Interfaces
        """
        app, model = name.split('.', 1)
        return apps.get_model(app, model)

    def __queryset_serialize(self, qs, extend=None, field_prefix=None):
        for i in qs:
            yield django_modelobj_serialize(self.middleware,
                                            i,
                                            extend=extend,
                                            field_prefix=field_prefix)

    @accepts(
        Str('name'),
        List('query-filters', register=True),
        Dict(
            'query-options',
            Str('extend'),
            Dict('extra', additional_attrs=True),
            List('order_by'),
            Bool('count'),
            Bool('get'),
            Str('prefix'),
            register=True,
        ),
    )
    def query(self, name, filters=None, options=None):
        """Query for items in a given collection `name`.

        `filters` is a list which each entry can be in one of the following formats:

            entry: simple_filter | conjuntion
            simple_filter: '[' attribute_name, OPERATOR, value ']'
            conjunction: '[' CONJUNTION, '[' simple_filter (',' simple_filter)* ']]'

            OPERATOR: ('=' | '!=' | '>' | '>=' | '<' | '<=' | '~' | 'in' | 'nin')
            CONJUNCTION: 'OR'

        e.g.

        `['OR', [ ['username', '=', 'root' ], ['uid', '=', 0] ] ]`

        `[ ['username', '=', 'root' ] ]`

        .. examples(websocket)::

          Querying for username "root" and returning a single item:

            :::javascript
            {
              "id": "d51da71b-bb48-4b8b-a8f7-6046fcc892b4",
              "msg": "method",
              "method": "datastore.query",
              "params": ["account.bsdusers", [ ["username", "=", "root" ] ], {"get": true}]
            }
        """
        model = self.__get_model(name)
        if options is None:
            options = {}
        else:
            # We do not want to make changes to original options
            # which might happen with "prefix"
            options = options.copy()

        qs = model.objects.all()

        extra = options.get('extra')
        if extra:
            qs = qs.extra(**extra)

        prefix = options.get('prefix')

        if filters:
            qs = qs.filter(*self._filters_to_queryset(filters, prefix))

        order_by = options.get('order_by')
        if order_by:
            if prefix:
                # Do not change original order_by
                order_by = order_by[:]
                for i, order in enumerate(order_by):
                    if order.startswith('-'):
                        order_by[i] = '-' + prefix + order[1:]
                    else:
                        order_by[i] = prefix + order
            qs = qs.order_by(*order_by)

        if options.get('count') is True:
            return qs.count()

        result = []
        for i in self.__queryset_serialize(qs,
                                           extend=options.get('extend'),
                                           field_prefix=options.get('prefix')):
            result.append(i)

        if options.get('get') is True:
            return result[0]

        return result

    @accepts(Str('name'), Ref('query-options'))
    def config(self, name, options=None):
        """
        Get configuration settings object for a given `name`.

        This is a shortcut for `query(name, {"get": true})`.
        """
        if options is None:
            options = {}
        options['get'] = True
        return self.query(name, None, options)

    @accepts(Str('name'), Dict('data', additional_attrs=True),
             Dict('options', Str('prefix')))
    def insert(self, name, data, options=None):
        """
        Insert a new entry to `name`.
        """
        data = data.copy()
        many_to_many_fields_data = {}
        options = options or {}
        prefix = options.get('prefix')
        model = self.__get_model(name)
        for field in chain(model._meta.fields, model._meta.many_to_many):
            if prefix:
                name = field.name.replace(prefix, '')
            else:
                name = field.name
            if name not in data:
                continue
            if isinstance(field, ForeignKey) and data[name] is not None:
                data[name] = field.rel.to.objects.get(pk=data[name])
            if isinstance(field, ManyToManyField):
                many_to_many_fields_data[field.name] = data.pop(name)
            else:

                # field.name is with prefix (if there's one) - we update data dict accordingly with db field names
                data[field.name] = data.pop(name)

        obj = model(**data)
        obj.save()

        for k, v in list(many_to_many_fields_data.items()):
            field = getattr(obj, k)
            field.add(*v)

        return obj.pk

    @accepts(Str('name'), Any('id'), Dict('data', additional_attrs=True),
             Dict('options', Str('prefix')))
    def update(self, name, id, data, options=None):
        """
        Update an entry `id` in `name`.
        """
        data = data.copy()
        many_to_many_fields_data = {}
        options = options or {}
        prefix = options.get('prefix')
        model = self.__get_model(name)
        obj = model.objects.get(pk=id)
        for field in chain(model._meta.fields, model._meta.many_to_many):
            if prefix:
                name = field.name.replace(prefix, '')
            else:
                name = field.name
            if name not in data:
                continue
            if isinstance(field, ForeignKey):
                data[name] = field.rel.to.objects.get(
                    pk=data[name]) if data[name] is not None else None
            if isinstance(field, ManyToManyField):
                many_to_many_fields_data[field.name] = data.pop(name)
            else:
                setattr(obj, field.name, data.pop(name))

        obj.save()

        for k, v in list(many_to_many_fields_data.items()):
            field = getattr(obj, k)
            field.clear()
            field.add(*v)

        return obj.pk

    @accepts(Str('name'), Any('id_or_filters'))
    def delete(self, name, id_or_filters):
        """
        Delete an entry `id` in `name`.
        """
        model = self.__get_model(name)
        if isinstance(id_or_filters, list):
            qs = model.objects.all()
            qs.filter(*self._filters_to_queryset(id_or_filters, None)).delete()
        else:
            model.objects.get(pk=id_or_filters).delete()
        return True

    def sql(self, query, params=None):
        cursor = connection.cursor()
        rv = None
        try:
            if params is None:
                res = cursor.executelocal(query)
            else:
                res = cursor.executelocal(query, params)
            rv = [
                dict([(res.description[i][0], value)
                      for i, value in enumerate(row)])
                for row in cursor.fetchall()
            ]
        except OperationalError as err:
            raise CallError(err)
        finally:
            cursor.close()
        return rv

    @accepts(List('queries'))
    def restore(self, queries):
        """
        Receives a list of SQL queries (usually a database dump)
        and executes it within a transaction.
        """
        return connection.dump_recv(queries)

    @accepts()
    def dump(self):
        """
        Dumps the database, returning a list of SQL commands.
        """
        # FIXME: This could return a few hundred KB of data,
        # we need to investigate a way of doing that in chunks.
        return connection.dump()
Пример #10
0
class AlertService(Service):
    def __init__(self, middleware):
        super().__init__(middleware)

        self.blocked_sources = defaultdict(set)
        self.sources_locks = {}

        self.blocked_failover_alerts_until = 0

    @private
    async def load(self):
        is_freenas = await self.middleware.call("system.is_freenas")

        main_sources_dir = os.path.join(get_middlewared_dir(), "alert",
                                        "source")
        sources_dirs = [
            os.path.join(overlay_dir, "alert", "source")
            for overlay_dir in self.middleware.overlay_dirs
        ]
        sources_dirs.insert(0, main_sources_dir)
        for sources_dir in sources_dirs:
            for module in load_modules(sources_dir):
                for cls in load_classes(
                        module, AlertSource,
                    (FilePresenceAlertSource, ThreadedAlertSource)):
                    source = cls(self.middleware)
                    ALERT_SOURCES[source.name] = source

        main_services_dir = os.path.join(
            os.path.dirname(os.path.realpath(__file__)), os.path.pardir,
            "alert", "service")
        services_dirs = [
            os.path.join(overlay_dir, "alert", "service")
            for overlay_dir in self.middleware.overlay_dirs
        ]
        services_dirs.insert(0, main_services_dir)
        for services_dir in services_dirs:
            for module in load_modules(services_dir):
                for cls in load_classes(
                        module, _AlertService,
                    (ThreadedAlertService, ProThreadedAlertService)):
                    ALERT_SERVICES_FACTORIES[cls.name()] = cls

    @private
    async def initialize(self, load=True):
        is_freenas = await self.middleware.call("system.is_freenas")

        self.node = "A"
        if not is_freenas:
            if await self.middleware.call("failover.node") == "B":
                self.node = "B"

        self.alerts = []
        if load:
            for alert in await self.middleware.call("datastore.query",
                                                    "system.alert"):
                del alert["id"]

                try:
                    alert["klass"] = AlertClass.class_by_name[alert["klass"]]
                except KeyError:
                    self.logger.info("Alert class %r is no longer present",
                                     alert["klass"])
                    continue

                alert["_uuid"] = alert.pop("uuid")
                alert["_source"] = alert.pop("source")
                alert["_key"] = alert.pop("key")
                alert["_text"] = alert.pop("text")

                alert = Alert(**alert)

                if not any(a.uuid == alert.uuid for a in self.alerts):
                    self.alerts.append(alert)

        self.alert_source_last_run = defaultdict(lambda: datetime.min)

        self.policies = {
            "IMMEDIATELY": AlertPolicy(),
            "HOURLY": AlertPolicy(lambda d: (d.date(), d.hour)),
            "DAILY": AlertPolicy(lambda d: (d.date())),
            "NEVER": AlertPolicy(lambda d: None),
        }
        for policy in self.policies.values():
            policy.receive_alerts(datetime.utcnow(), self.alerts)

    @private
    async def terminate(self):
        await self.flush_alerts()

    @accepts()
    async def list_policies(self):
        """
        List all alert policies which indicate the frequency of the alerts.
        """
        return POLICIES

    @accepts()
    async def list_categories(self):
        """
        List all types of alerts which the system can issue.
        """

        product_type = await self.middleware.call("alert.product_type")

        classes = [
            alert_class for alert_class in AlertClass.classes
            if product_type in alert_class.products
            and not alert_class.exclude_from_list
        ]

        return [{
            "id":
            alert_category.name,
            "title":
            alert_category_names[alert_category],
            "classes":
            sorted([{
                "id": alert_class.name,
                "title": alert_class.title,
                "level": alert_class.level.name,
            } for alert_class in classes
                    if alert_class.category == alert_category],
                   key=lambda klass: klass["title"])
        } for alert_category in AlertCategory if any(
            alert_class.category == alert_category for alert_class in classes)]

    @private
    async def list_sources(self):
        # TODO: this is a deprecated method for backward compatibility

        return [{
            "name": klass["id"],
            "title": klass["title"],
        } for klass in sum(
            [v["classes"] for v in await self.list_categories()], [])]

    @accepts()
    async def list(self):
        """
        List all types of alerts including active/dismissed currently in the system.
        """

        as_ = AlertSerializer(self.middleware)

        return [
            await as_.serialize(alert) for alert in sorted(
                self.alerts,
                key=lambda alert: (alert.klass.title, alert.datetime))
        ]

    @private
    async def node_map(self):
        nodes = {
            'A': 'Controller A',
            'B': 'Controller B',
        }
        if not await self.middleware.call('system.is_freenas'
                                          ) and await self.middleware.call(
                                              'failover.licensed'):
            node = await self.middleware.call('failover.node')
            status = await self.middleware.call('failover.status')
            if status == 'MASTER':
                if node == 'A':
                    nodes = {
                        'A': 'Active Controller (A)',
                        'B': 'Standby Controller (B)',
                    }
                else:
                    nodes = {
                        'A': 'Standby Controller (A)',
                        'B': 'Active Controller (B)',
                    }
            else:
                nodes[node] = f'{status.title()} Controller ({node})'

        return nodes

    def __alert_by_uuid(self, uuid):
        try:
            return [a for a in self.alerts if a.uuid == uuid][0]
        except IndexError:
            return None

    @accepts(Str("uuid"))
    async def dismiss(self, uuid):
        """
        Dismiss `id` alert.
        """

        alert = self.__alert_by_uuid(uuid)
        if alert is None:
            return

        if issubclass(alert.klass, DismissableAlertClass):
            related_alerts, unrelated_alerts = bisect(
                lambda a: (a.node, a.klass) == (alert.node, alert.klass),
                self.alerts)
            left_alerts = await alert.klass(self.middleware
                                            ).dismiss(related_alerts, alert)
            for deleted_alert in related_alerts:
                if deleted_alert not in left_alerts:
                    self._delete_on_dismiss(deleted_alert)
        elif issubclass(
                alert.klass,
                OneShotAlertClass) and not alert.klass.deleted_automatically:
            self._delete_on_dismiss(alert)
        else:
            alert.dismissed = True
            await self._send_alert_changed_event(alert)

    def _delete_on_dismiss(self, alert):
        self.alerts.remove(alert)

        for policy in self.policies.values():
            policy.last_key_value_alerts.pop(alert.uuid, None)

        self._send_alert_deleted_event(alert)

    @accepts(Str("uuid"))
    async def restore(self, uuid):
        """
        Restore `id` alert which had been dismissed.
        """

        alert = self.__alert_by_uuid(uuid)
        if alert is None:
            return

        alert.dismissed = False

        await self._send_alert_changed_event(alert)

    async def _send_alert_changed_event(self, alert):
        as_ = AlertSerializer(self.middleware)
        self.middleware.send_event('alert.list',
                                   'CHANGED',
                                   id=alert.uuid,
                                   fields=await as_.serialize(alert))

    def _send_alert_deleted_event(self, alert):
        self.middleware.send_event('alert.list',
                                   'CHANGED',
                                   id=alert.uuid,
                                   cleared=True)

    @periodic(60)
    @private
    @job(lock="process_alerts", transient=True, lock_queue_size=1)
    async def process_alerts(self, job):
        if not await self.__should_run_or_send_alerts():
            return

        valid_alerts = copy.deepcopy(self.alerts)
        await self.__run_alerts()

        self.__expire_alerts()

        if not await self.__should_run_or_send_alerts():
            self.alerts = valid_alerts
            return

        await self.middleware.call("alert.send_alerts")

    @private
    @job(lock="process_alerts", transient=True)
    async def send_alerts(self, job):
        classes = (await
                   self.middleware.call("alertclasses.config"))["classes"]

        now = datetime.utcnow()
        for policy_name, policy in self.policies.items():
            gone_alerts, new_alerts = policy.receive_alerts(now, self.alerts)

            for alert_service_desc in await self.middleware.call(
                    "datastore.query", "system.alertservice",
                [["enabled", "=", True]]):
                service_level = AlertLevel[alert_service_desc["level"]]

                service_alerts = [
                    alert for alert in self.alerts
                    if (get_alert_level(alert, classes).value >= service_level.
                        value and get_alert_policy(alert, classes) != "NEVER")
                ]
                service_gone_alerts = [
                    alert for alert in gone_alerts
                    if (get_alert_level(alert,
                                        classes).value >= service_level.value
                        and get_alert_policy(alert, classes) == policy_name)
                ]
                service_new_alerts = [
                    alert for alert in new_alerts
                    if (get_alert_level(alert,
                                        classes).value >= service_level.value
                        and get_alert_policy(alert, classes) == policy_name)
                ]
                for gone_alert in list(service_gone_alerts):
                    for new_alert in service_new_alerts:
                        if gone_alert.klass == new_alert.klass and gone_alert.key == new_alert.key:
                            service_gone_alerts.remove(gone_alert)
                            service_new_alerts.remove(new_alert)
                            break

                if not service_gone_alerts and not service_new_alerts:
                    continue

                factory = ALERT_SERVICES_FACTORIES.get(
                    alert_service_desc["type"])
                if factory is None:
                    self.logger.error("Alert service %r does not exist",
                                      alert_service_desc["type"])
                    continue

                try:
                    alert_service = factory(self.middleware,
                                            alert_service_desc["attributes"])
                except Exception:
                    self.logger.error(
                        "Error creating alert service %r with parameters=%r",
                        alert_service_desc["type"],
                        alert_service_desc["attributes"],
                        exc_info=True)
                    continue

                alerts = [
                    alert for alert in service_alerts if not alert.dismissed
                ]
                service_gone_alerts = [
                    alert for alert in service_gone_alerts
                    if not alert.dismissed
                ]
                service_new_alerts = [
                    alert for alert in service_new_alerts
                    if not alert.dismissed
                ]

                if alerts or service_gone_alerts or service_new_alerts:
                    try:
                        await alert_service.send(alerts, service_gone_alerts,
                                                 service_new_alerts)
                    except Exception:
                        self.logger.error("Error in alert service %r",
                                          alert_service_desc["type"],
                                          exc_info=True)

            if policy_name == "IMMEDIATELY":
                as_ = AlertSerializer(self.middleware)
                for alert in gone_alerts:
                    self._send_alert_deleted_event(alert)
                for alert in new_alerts:
                    self.middleware.send_event('alert.list',
                                               'ADDED',
                                               id=alert.uuid,
                                               fields=await
                                               as_.serialize(alert))

                for alert in new_alerts:
                    if alert.mail:
                        await self.middleware.call("mail.send", alert.mail)

                if not await self.middleware.call("system.is_freenas"):
                    new_hardware_alerts = [
                        alert for alert in new_alerts if alert.klass.hardware
                    ]
                    if new_hardware_alerts:
                        if await self.middleware.call(
                                "support.is_available_and_enabled"):
                            support = await self.middleware.call(
                                "support.config")
                            msg = [
                                f"* {alert.formatted}"
                                for alert in new_hardware_alerts
                            ]

                            serial = (await self.middleware.call("system.info")
                                      )["system_serial"]

                            for name, verbose_name in await self.middleware.call(
                                    "support.fields"):
                                value = support[name]
                                if value:
                                    msg += [
                                        "",
                                        "{}: {}".format(verbose_name, value)
                                    ]

                            msg = "\n".join(msg)

                            job = await self.middleware.call(
                                "support.new_ticket", {
                                    "title": "Automatic alert (%s)" % serial,
                                    "body": msg,
                                    "attach_debug": False,
                                    "category": "Hardware",
                                    "criticality": "Loss of Functionality",
                                    "environment": "Production",
                                    "name": "Automatic Alert",
                                    "email": "*****@*****.**",
                                    "phone": "-",
                                })
                            await job.wait()
                            if job.error:
                                await self.middleware.call(
                                    "alert.oneshot_create",
                                    "AutomaticAlertFailed", {
                                        "serial": serial,
                                        "alert": msg,
                                        "error": str(job.error)
                                    })

    def __uuid(self):
        return str(uuid.uuid4())

    async def __should_run_or_send_alerts(self):
        if await self.middleware.call('system.state') != 'READY':
            return False

        if (not await self.middleware.call('system.is_freenas')
                and await self.middleware.call('failover.licensed')
                and (await self.middleware.call('failover.status') == 'BACKUP'
                     or await self.middleware.call('failover.in_progress'))):
            return False

        return True

    async def __run_alerts(self):
        master_node = "A"
        backup_node = "B"
        product_type = await self.middleware.call("alert.product_type")
        run_on_backup_node = False
        run_failover_related = False
        if product_type == "ENTERPRISE":
            if await self.middleware.call("failover.licensed"):
                if await self.middleware.call("failover.node") == "B":
                    master_node = "B"
                    backup_node = "A"
                try:
                    remote_version = await self.middleware.call(
                        "failover.call_remote", "system.version")
                    remote_system_state = await self.middleware.call(
                        "failover.call_remote", "system.state")
                    remote_failover_status = await self.middleware.call(
                        "failover.call_remote", "failover.status")
                except Exception:
                    pass
                else:
                    if remote_version == await self.middleware.call(
                            "system.version"):
                        if remote_system_state == "READY" and remote_failover_status == "BACKUP":
                            run_on_backup_node = True

            run_failover_related = time.monotonic(
            ) > self.blocked_failover_alerts_until

        for k, source_lock in list(self.sources_locks.items()):
            if source_lock.expires_at <= time.monotonic():
                await self.unblock_source(k)

        for alert_source in ALERT_SOURCES.values():
            if product_type not in alert_source.products:
                continue

            if alert_source.failover_related and not run_failover_related:
                continue

            if not alert_source.schedule.should_run(
                    datetime.utcnow(),
                    self.alert_source_last_run[alert_source.name]):
                continue

            self.alert_source_last_run[alert_source.name] = datetime.utcnow()

            alerts_a = [
                alert for alert in self.alerts if alert.node == master_node
                and alert.source == alert_source.name
            ]
            locked = False
            if self.blocked_sources[alert_source.name]:
                self.logger.debug(
                    "Not running alert source %r because it is blocked",
                    alert_source.name)
                locked = True
            else:
                self.logger.trace("Running alert source: %r",
                                  alert_source.name)

                try:
                    alerts_a = await self.__run_source(alert_source.name)
                except UnavailableException:
                    pass
            for alert in alerts_a:
                alert.node = master_node

            alerts_b = []
            if run_on_backup_node and alert_source.run_on_backup_node:
                try:
                    alerts_b = [
                        alert for alert in self.alerts
                        if alert.node == backup_node
                        and alert.source == alert_source.name
                    ]
                    try:
                        if not locked:
                            alerts_b = await self.middleware.call(
                                "failover.call_remote", "alert.run_source",
                                [alert_source.name])

                            alerts_b = [
                                Alert(**dict(
                                    {
                                        k: v
                                        for k, v in alert.items() if k in [
                                            "args", "datetime",
                                            "last_occurrence", "dismissed",
                                            "mail"
                                        ]
                                    },
                                    klass=AlertClass.class_by_name[
                                        alert["klass"]],
                                    _source=alert["source"],
                                    _key=alert["key"])) for alert in alerts_b
                            ]
                    except CallError as e:
                        if e.errno in [
                                errno.ECONNABORTED, errno.ECONNREFUSED,
                                errno.ECONNRESET, errno.EHOSTDOWN,
                                errno.ETIMEDOUT,
                                CallError.EALERTCHECKERUNAVAILABLE
                        ]:
                            pass
                        else:
                            raise
                except ReserveFDException:
                    self.logger.debug('Failed to reserve a privileged port')
                except Exception:
                    alerts_b = [
                        Alert(AlertSourceRunFailedOnBackupNodeAlertClass,
                              args={
                                  "source_name": alert_source.name,
                                  "traceback": traceback.format_exc(),
                              },
                              _source=alert_source.name)
                    ]

            for alert in alerts_b:
                alert.node = backup_node

            for alert in alerts_a + alerts_b:
                self.__handle_alert(alert)

            self.alerts = (
                [a for a in self.alerts if a.source != alert_source.name] +
                alerts_a + alerts_b)

    def __handle_alert(self, alert):
        try:
            existing_alert = [
                a for a in self.alerts if (a.node, a.source, a.klass,
                                           a.key) == (alert.node, alert.source,
                                                      alert.klass, alert.key)
            ][0]
        except IndexError:
            existing_alert = None

        if existing_alert is None:
            alert.uuid = self.__uuid()
        else:
            alert.uuid = existing_alert.uuid
        if existing_alert is None:
            alert.datetime = alert.datetime or datetime.utcnow()
            if alert.datetime.tzinfo is not None:
                alert.datetime = alert.datetime.astimezone(
                    timezone.utc).replace(tzinfo=None)
        else:
            alert.datetime = existing_alert.datetime
        alert.last_occurrence = datetime.utcnow()
        if existing_alert is None:
            alert.dismissed = False
        else:
            alert.dismissed = existing_alert.dismissed

    def __expire_alerts(self):
        self.alerts = list(
            filter(lambda alert: not self.__should_expire_alert(alert),
                   self.alerts))

    def __should_expire_alert(self, alert):
        if issubclass(alert.klass, OneShotAlertClass):
            if alert.klass.expires_after is not None:
                return alert.last_occurrence < datetime.utcnow(
                ) - alert.klass.expires_after

        return False

    @private
    async def run_source(self, source_name):
        try:
            return [
                dict(alert.__dict__, klass=alert.klass.name)
                for alert in await self.__run_source(source_name)
            ]
        except UnavailableException:
            raise CallError("This alert checker is unavailable",
                            CallError.EALERTCHECKERUNAVAILABLE)

    @private
    async def block_source(self, source_name, timeout=3600):
        if source_name not in ALERT_SOURCES:
            raise CallError("Invalid alert source")

        lock = str(uuid.uuid4())
        self.blocked_sources[source_name].add(lock)
        self.sources_locks[lock] = AlertSourceLock(source_name,
                                                   time.monotonic() + timeout)
        return lock

    @private
    async def unblock_source(self, lock):
        source_lock = self.sources_locks.pop(lock, None)
        if source_lock:
            self.blocked_sources[source_lock.source_name].remove(lock)

    @private
    async def block_failover_alerts(self):
        # This values come from observation from support of how long a M-series boot can take.
        self.blocked_failover_alerts_until = time.monotonic() + 900

    async def __run_source(self, source_name):
        alert_source = ALERT_SOURCES[source_name]

        try:
            alerts = (await alert_source.check()) or []
        except UnavailableException:
            raise
        except Exception as e:
            if isinstance(e, CallError) and e.errno in [
                    errno.ECONNABORTED, errno.ECONNREFUSED, errno.ECONNRESET,
                    errno.EHOSTDOWN, errno.ETIMEDOUT
            ]:
                alerts = [
                    Alert(AlertSourceRunFailedAlertClass,
                          args={
                              "source_name": alert_source.name,
                              "traceback": str(e),
                          })
                ]
            else:
                alerts = [
                    Alert(AlertSourceRunFailedAlertClass,
                          args={
                              "source_name": alert_source.name,
                              "traceback": traceback.format_exc(),
                          })
                ]
        else:
            if not isinstance(alerts, list):
                alerts = [alerts]

        for alert in alerts:
            alert.source = source_name

        return alerts

    @periodic(3600, run_on_start=False)
    @private
    async def flush_alerts(self):
        if (not await self.middleware.call('system.is_freenas')
                and await self.middleware.call('failover.licensed')
                and await self.middleware.call('failover.status') == 'BACKUP'):
            return

        await self.middleware.call("datastore.delete", "system.alert", [])

        for alert in self.alerts:
            d = alert.__dict__.copy()
            d["klass"] = d["klass"].name
            del d["mail"]
            await self.middleware.call("datastore.insert", "system.alert", d)

    @private
    @accepts(Str("klass"), Any("args", null=True))
    @job(lock="process_alerts", transient=True)
    async def oneshot_create(self, job, klass, args):
        try:
            klass = AlertClass.class_by_name[klass]
        except KeyError:
            raise CallError(f"Invalid alert class: {klass!r}")

        if not issubclass(klass, OneShotAlertClass):
            raise CallError(
                f"Alert class {klass!r} is not a one-shot alert class")

        alert = await klass(self.middleware).create(args)
        if alert is None:
            return

        alert.source = ""
        alert.klass = alert.klass

        alert.node = self.node

        self.__handle_alert(alert)

        self.alerts = [a
                       for a in self.alerts if a.uuid != alert.uuid] + [alert]

        await self.middleware.call("alert.send_alerts")

    @private
    @accepts(Str("klass"), Any("query", null=True))
    @job(lock="process_alerts", transient=True)
    async def oneshot_delete(self, job, klass, query):
        try:
            klass = AlertClass.class_by_name[klass]
        except KeyError:
            raise CallError(f"Invalid alert source: {klass!r}")

        if not issubclass(klass, OneShotAlertClass):
            raise CallError(
                f"Alert class {klass!r} is not a one-shot alert source")

        related_alerts, unrelated_alerts = bisect(
            lambda a: (a.node, a.klass) == (self.node, klass), self.alerts)
        left_alerts = await klass(self.middleware
                                  ).delete(related_alerts, query)
        deleted = False
        for deleted_alert in related_alerts:
            if deleted_alert not in left_alerts:
                self.alerts.remove(deleted_alert)
                deleted = True

        if deleted:
            await self.middleware.call("alert.send_alerts")

    @private
    def alert_source_clear_run(self, name):
        alert_source = ALERT_SOURCES.get(name)
        if not alert_source:
            raise CallError("Alert source {name!r} not found.", errno.ENOENT)

        self.alert_source_last_run[alert_source.name] = datetime.min

    @private
    async def product_type(self):
        product_type = await self.middleware.call("system.product_type")
        # FIXME
        if product_type == "SCALE":
            product_type = "CORE"
        return product_type
Пример #11
0
class ConsulService(Service):

    INFLUXDB_API = [
        'host', 'username', 'password', 'database', 'series-name', 'enabled'
    ]
    SLACK_API = [
        'cluster-name', 'url', 'channel', 'username', 'icon-url', 'detailed',
        'enabled'
    ]
    MATTERMOST_API = [
        'cluster', 'url', 'username', 'password', 'team', 'channel', 'enabled'
    ]
    PAGERDUTY_API = ['service-key', 'client-name', 'enabled']
    HIPCHAT_API = [
        'from', 'cluster-name', 'base-url', 'room-id', 'auth-token', 'enabled'
    ]
    OPSGENIE_API = ['cluster-name', 'api-key', 'enabled']
    AWSSNS_API = ['reigion', 'topic-arn', 'enabled']
    VICTOROPS_API = ['api-key', 'routing-key', 'enabled']

    @accepts(Str('key'), Any('value'))
    async def set_kv(self, key, value):
        """
        Sets `key` with `value` in Consul KV.

        Returns:
                    bool: True if it added successful the value or otherwise False.
        """
        c = consul.aio.Consul()
        try:
            return await c.kv.put(str(key), str(value))
        except Exception as err:
            logger.error('===> Consul set_kv error: %s' % (err))
            return False

    @accepts(Str('key'))
    async def get_kv(self, key):
        """
        Gets value of `key` in Consul KV.

        Returns:
                    str: Return the value or an empty string.
        """
        c = consul.aio.Consul()
        index = None
        index, data = await c.kv.get(key, index=index)
        if data is not None:
            return data['Value'].decode("utf-8")
        else:
            return ""

    @accepts(Str('key'))
    async def delete_kv(self, key):
        """
        Delete a `key` in Consul KV.

        Returns:
                    bool: True if it could delete the data or otherwise False.
        """
        c = consul.aio.Consul()
        try:
            return await c.kv.delete(str(key))
        except Exception as err:
            logger.error('===> Consul delete_kv error: %s' % (err))
            return False

    @accepts()
    async def reload(self):
        """
        Reload consul agent.

        Returns:
                    bool: True if it could reload, otherwise False.
        """
        consul_error = await (await Popen(['consul', 'reload'],
                                          stdout=subprocess.PIPE,
                                          stderr=subprocess.PIPE)).wait()
        if consul_error == 0:
            logger.info("===> Reload Consul: {0}".format(consul_error))
            return True
        else:
            return False

    @accepts()
    async def create_fake_alert(self):
        seed = random.randrange(100000)
        fake_fd = "/usr/local/etc/consul.d/fake.json"
        fake_alert = {
            "service": {
                "name":
                "fake-" + str(seed),
                "tags": ["primary"],
                "address":
                "",
                "port":
                65535,
                "enableTagOverride":
                False,
                "checks": [{
                    "tcp": "localhost:65535",
                    "interval": "10s",
                    "timeout": "3s"
                }]
            }
        }
        with open(fake_fd, 'w') as fd:
            fd.write(json.dumps(fake_alert))

        return await self.reload()

    @accepts()
    async def remove_fake_alert(self):
        fake_fd = "/usr/local/etc/consul.d/fake.json"
        try:
            os.remove(fake_fd)
        except OSError as e:
            if e.errno != errno.ENOENT:
                raise

        return await self.reload()

    def _convert_keys(self, data):
        """
        Transforms key values that contains "_" to values with "-"

        Returns:
                    dict: With the values on keys using "-".
        """
        for key in list(data.keys()):
            new_key = key.replace("_", "-")
            if new_key != key:
                data[new_key] = data[key]
                del data[key]

        return data

    def _api_keywords(self, api_list, data):
        """
        Helper to convert the API list into a dict.

        Returns:
                    dict: With the API_LIST.
        """
        new_dict = {k: data.get(k, None) for k in api_list}

        return new_dict

    async def _insert_keys(self, prefix, data, api_keywords):
        """
        Helper to insert keys into consul.

        Note: because 'from' is a reserved word in Python, we can't
        use it directly and instead we use hfrom and convert it later.
        """
        new_dict = self._api_keywords(api_keywords, data)

        for k, v in list(new_dict.items()):
            if k == 'hfrom':
                k = 'from'
            await self.set_kv(prefix + k, v)

    async def _delete_keys(self, prefix, data, api_keywords):
        """
        Helper to delete keys into consul.

        Note: The same applies for 'from' like explained on _insert_keys().
        """
        new_dict = self._api_keywords(api_keywords, data)

        for k in list(new_dict.keys()):
            if k == 'hfrom':
                k = 'from'
            await self.delete_kv(prefix + k)

    async def do_create(self, data):
        """
        Helper to insert keys into consul based on the service API.
        """
        consul_prefix = 'consul-alerts/config/notifiers/'
        cdata = self._convert_keys(data)

        alert_service = data.pop('consulalert-type')
        consul_prefix = consul_prefix + alert_service.lower() + '/'

        if alert_service == 'InfluxDB':
            await self._insert_keys(consul_prefix, cdata, self.INFLUXDB_API)
        elif alert_service == 'Slack':
            await self._insert_keys(consul_prefix, cdata, self.SLACK_API)
        elif alert_service == 'Mattermost':
            await self._insert_keys(consul_prefix, cdata, self.MATTERMOST_API)
        elif alert_service == 'PagerDuty':
            await self._insert_keys(consul_prefix, cdata, self.PAGERDUTY_API)
        elif alert_service == 'HipChat':
            await self._insert_keys(consul_prefix, cdata, self.HIPCHAT_API)
        elif alert_service == 'OpsGenie':
            await self._insert_keys(consul_prefix, cdata, self.OPSGENIE_API)
        elif alert_service == 'AWS-SNS':
            await self._insert_keys(consul_prefix, cdata, self.AWSSNS_API)
        elif alert_service == 'VictorOps':
            await self._insert_keys(consul_prefix, cdata, self.VICTOROPS_API)

    async def do_delete(self, alert_service, data):
        """
        Helper to delete the keys from consul based on the service API.
        """
        consul_prefix = 'consul-alerts/config/notifiers/' + alert_service.lower(
        ) + '/'
        cdata = self._convert_keys(data)

        if alert_service == 'InfluxDB':
            await self._delete_keys(consul_prefix, cdata, self.INFLUXDB_API)
        elif alert_service == 'Slack':
            await self._delete_keys(consul_prefix, cdata, self.SLACK_API)
        elif alert_service == 'Mattermost':
            await self._delete_keys(consul_prefix, cdata, self.MATTERMOST_API)
        elif alert_service == 'PagerDuty':
            await self._delete_keys(consul_prefix, cdata, self.PAGERDUTY_API)
        elif alert_service == 'HipChat':
            await self._delete_keys(consul_prefix, cdata, self.HIPCHAT_API)
        elif alert_service == 'OpsGenie':
            await self._delete_keys(consul_prefix, cdata, self.OPSGENIE_API)
        elif alert_service == 'AWS-SNS':
            await self._delete_keys(consul_prefix, cdata, self.AWSSNS_API)
        elif alert_service == 'VictorOps':
            await self._delete_keys(consul_prefix, cdata, self.VICTOROPS_API)
Пример #12
0
class CatalogService(CRUDService):
    class Config:
        datastore = 'services.catalog'
        datastore_extend = 'catalog.catalog_extend'
        datastore_extend_context = 'catalog.catalog_extend_context'
        datastore_primary_key = 'label'
        datastore_primary_key_type = 'string'
        cli_namespace = 'app.catalog'

    ENTRY = Dict(
        'catalog_entry',
        Str('label',
            required=True,
            validators=[Match(r'^\w+[\w.-]*$')],
            max_length=60),
        Str('repository', required=True, empty=False),
        Str('branch', required=True, empty=False),
        Str('location', required=True),
        Str('id', required=True),
        List('preferred_trains'),
        Dict('trains', additional_attrs=True),
        Bool('healthy'),
        Bool('error'),
        Bool('builtin'),
        Bool('cached'),
        Dict(
            'caching_progress',
            Str('description', null=True),
            Any('extra', null=True),
            Float('percent', null=True),
            null=True,
        ),
        Dict('caching_job', null=True, additional_attrs=True),
    )

    @private
    async def catalog_extend_context(self, rows, extra):
        k8s_dataset = (await
                       self.middleware.call('kubernetes.config'))['dataset']
        catalogs_dir = os.path.join(
            '/mnt', k8s_dataset,
            'catalogs') if k8s_dataset else f'{TMP_IX_APPS_DIR}/catalogs'
        context = {
            'catalogs_dir': catalogs_dir,
            'extra': extra or {},
            'catalogs_context': {},
        }
        if extra.get('item_details'):
            item_sync_params = await self.middleware.call(
                'catalog.sync_items_params')
            item_jobs = await self.middleware.call(
                'core.get_jobs',
                [['method', '=', 'catalog.items'], ['state', '=', 'RUNNING']])
            for row in rows:
                label = row['label']
                catalog_info = {
                    'item_job':
                    await self.middleware.call(
                        'catalog.items', label, {
                            'cache':
                            True,
                            'cache_only':
                            await self.official_catalog_label() !=
                            row['label'],
                            'retrieve_all_trains':
                            extra.get('retrieve_all_trains', True),
                            'trains':
                            extra.get('trains', []),
                        }),
                    'cached':
                    label == OFFICIAL_LABEL or await self.middleware.call(
                        'catalog.cached', label, False) or await
                    self.middleware.call('catalog.cached', label, True),
                    'normalized_progress':
                    None,
                }
                if not catalog_info['cached']:
                    caching_job = filter_list(
                        item_jobs,
                        [['arguments', '=', [row['label'], item_sync_params]]])
                    if not caching_job:
                        caching_job_obj = await self.middleware.call(
                            'catalog.items', label, item_sync_params)
                        caching_job = caching_job_obj.__encode__()
                    else:
                        caching_job = caching_job[0]

                    catalog_info['normalized_progress'] = {
                        'caching_job': caching_job,
                        'caching_progress': caching_job['progress'],
                    }
                context['catalogs_context'][label] = catalog_info

        return context

    @private
    async def normalize_data_from_item_job(self, label, catalog_context):
        normalized = {
            'trains': {},
            'cached': catalog_context['cached'],
            'healthy': False,
            'error': True,
            'caching_progress': None,
            'caching_job': None,
        }
        item_job = catalog_context['item_job']
        await item_job.wait()
        if not item_job.error:
            normalized.update({
                'trains':
                item_job.result,
                'healthy':
                all(app['healthy'] for train in item_job.result
                    for app in item_job.result[train].values()),
                'cached':
                label == OFFICIAL_LABEL
                or await self.middleware.call('catalog.cached', label, False)
                or await self.middleware.call('catalog.cached', label, True),
                'error':
                False,
                'caching_progress':
                None,
                'caching_job':
                None,
            })
        return normalized

    @private
    async def catalog_extend(self, catalog, context):
        catalog.update({
            'location':
            os.path.join(
                context['catalogs_dir'],
                convert_repository_to_path(catalog['repository'],
                                           catalog['branch'])),
            'id':
            catalog['label'],
        })
        extra = context['extra']
        if extra.get('item_details'):
            catalog_context = context['catalogs_context'][catalog['label']]
            catalog.update(await self.normalize_data_from_item_job(
                catalog['id'], catalog_context))
            if catalog['cached']:
                return catalog
            else:
                catalog.update(catalog_context['normalized_progress'])
        return catalog

    @private
    async def common_validation(self, catalog, schema, data):
        found_trains = set(catalog['trains'])
        diff = set(data['preferred_trains']) - found_trains
        verrors = ValidationErrors()
        if diff:
            verrors.add(
                f'{schema}.preferred_trains',
                f'{", ".join(diff)} trains were not found in catalog.')
        if not data['preferred_trains']:
            verrors.add(
                f'{schema}.preferred_trains',
                'At least 1 preferred train must be specified for a catalog.')

        verrors.check()

    @accepts(
        Patch(
            'catalog_entry',
            'catalog_create',
            ('add', Bool('force', default=False)),
            ('rm', {
                'name': 'id'
            }),
            ('rm', {
                'name': 'trains'
            }),
            ('rm', {
                'name': 'healthy'
            }),
            ('rm', {
                'name': 'error'
            }),
            ('rm', {
                'name': 'builtin'
            }),
            ('rm', {
                'name': 'location'
            }),
            ('rm', {
                'name': 'cached'
            }),
            ('rm', {
                'name': 'caching_progress'
            }),
            ('rm', {
                'name': 'caching_job'
            }),
        ), )
    @job(lock=lambda args: f'catalog_create_{args[0]["label"]}')
    async def do_create(self, job, data):
        """
        `catalog_create.preferred_trains` specifies trains which will be displayed in the UI directly for a user.
        """
        verrors = ValidationErrors()
        # We normalize the label
        data['label'] = data['label'].upper()

        if await self.query([['id', '=', data['label']]]):
            verrors.add('catalog_create.label',
                        'A catalog with specified label already exists',
                        errno=errno.EEXIST)

        if await self.query([['repository', '=', data['repository']],
                             ['branch', '=', data['branch']]]):
            for k in ('repository', 'branch'):
                verrors.add(
                    f'catalog_create.{k}',
                    'A catalog with same repository/branch already exists',
                    errno=errno.EEXIST)

        verrors.check()

        if not data['preferred_trains']:
            data['preferred_trains'] = ['stable']

        if not data.pop('force'):
            job.set_progress(40, f'Validating {data["label"]!r} catalog')
            # We will validate the catalog now to ensure it's valid wrt contents / format
            path = os.path.join(
                TMP_IX_APPS_DIR, 'validate_catalogs',
                convert_repository_to_path(data['repository'], data['branch']))
            try:
                await self.middleware.call('catalog.update_git_repository', {
                    **data, 'location': path
                }, True)
                await self.middleware.call(
                    'catalog.validate_catalog_from_path', path)
                await self.common_validation(
                    {
                        'trains':
                        await self.middleware.call(
                            'catalog.retrieve_train_names', path)
                    }, 'catalog_create', data)
            except CallError as e:
                verrors.add('catalog_create.label',
                            f'Failed to validate catalog: {e}')
            finally:
                await self.middleware.run_in_thread(shutil.rmtree,
                                                    path,
                                                    ignore_errors=True)
        else:
            job.set_progress(50, 'Skipping validation of catalog')

        verrors.check()

        job.set_progress(60, 'Completed Validation')

        await self.middleware.call('datastore.insert', self._config.datastore,
                                   data)
        job.set_progress(70, f'Successfully added {data["label"]!r} catalog')

        job.set_progress(80, f'Syncing {data["label"]} catalog')
        sync_job = await self.middleware.call('catalog.sync', data['label'])
        await sync_job.wait()
        if sync_job.error:
            raise CallError(
                f'Catalog was added successfully but failed to sync: {sync_job.error}'
            )

        job.set_progress(100, f'Successfully synced {data["label"]!r} catalog')

        return await self.get_instance(data['label'])

    @accepts(Str('id'),
             Dict('catalog_update', List('preferred_trains'), update=True))
    async def do_update(self, id, data):
        catalog = await self.query([['id', '=', id]], {
            'extra': {
                'item_details': True
            },
            'get': True
        })
        await self.common_validation(catalog, 'catalog_update', data)

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, data)

        return await self.get_instance(id)

    def do_delete(self, id):
        catalog = self.middleware.call_sync('catalog.get_instance', id)
        if catalog['builtin']:
            raise CallError('Builtin catalogs cannot be deleted')

        ret = self.middleware.call_sync('datastore.delete',
                                        self._config.datastore, id)

        if os.path.exists(catalog['location']):
            shutil.rmtree(catalog['location'], ignore_errors=True)

        # Let's delete any unhealthy alert if we had one
        self.middleware.call_sync('alert.oneshot_delete', 'CatalogNotHealthy',
                                  id)
        self.middleware.call_sync('alert.oneshot_delete', 'CatalogSyncFailed',
                                  id)

        # Remove cached content of the catalog in question so that if a catalog is created again
        # with same label but different repo/branch, we don't reuse old cache
        self.middleware.call_sync('cache.pop', get_cache_key(id, True))
        self.middleware.call_sync('cache.pop', get_cache_key(id, False))

        return ret

    @private
    async def official_catalog_label(self):
        return OFFICIAL_LABEL
Пример #13
0
class UserService(CRUDService):

    class Config:
        datastore = 'account.bsdusers'
        datastore_extend = 'user.user_extend'
        datastore_prefix = 'bsdusr_'

    @private
    async def user_extend(self, user):

        # Get group membership
        user['groups'] = [gm['group']['id'] for gm in await self.middleware.call('datastore.query', 'account.bsdgroupmembership', [('user', '=', user['id'])], {'prefix': 'bsdgrpmember_'})]

        # Get authorized keys
        keysfile = f'{user["home"]}/.ssh/authorized_keys'
        user['sshpubkey'] = None
        if os.path.exists(keysfile):
            try:
                with open(keysfile, 'r') as f:
                    user['sshpubkey'] = f.read()
            except Exception:
                pass
        return user

    @accepts(Dict(
        'user_create',
        Int('uid'),
        Str('username', required=True),
        Int('group'),
        Bool('group_create', default=False),
        Str('home', default='/nonexistent'),
        Str('home_mode', default='755'),
        Str('shell', default='/bin/csh'),
        Str('full_name', required=True),
        Str('email'),
        Str('password'),
        Bool('password_disabled', default=False),
        Bool('locked', default=False),
        Bool('microsoft_account', default=False),
        Bool('sudo', default=False),
        Str('sshpubkey'),
        List('groups'),
        Dict('attributes', additional_attrs=True),
        register=True,
    ))
    async def do_create(self, data):

        verrors = ValidationErrors()

        if (
            not data.get('group') and not data.get('group_create')
        ) or (
            data.get('group') is not None and data.get('group_create')
        ):
            verrors.add('group', f'You need to either provide a group or group_create', errno.EINVAL)

        await self.__common_validation(verrors, data)

        if data.get('sshpubkey') and not data['home'].startswith('/mnt'):
            verrors.add('sshpubkey', 'Home directory is not writable, leave this blank"')

        if verrors:
            raise verrors

        groups = data.pop('groups') or []
        create = data.pop('group_create')

        if create:
            group = await self.middleware.call('group.query', [('group', '=', data['username'])])
            if group:
                group = group[0]
            else:
                group = await self.middleware.call('group.create', {'name': data['username']})
                group = (await self.middleware.call('group.query', [('id', '=', group)]))[0]
            data['group'] = group['id']
        else:
            group = await self.middleware.call('group.query', [('id', '=', data['group'])])
            if not group:
                raise CallError(f'Group {data["group"]} not found')
            group = group[0]

        # Is this a new directory or not? Let's not nuke existing directories,
        # e.g. /, /root, /mnt/tank/my-dataset, etc ;).
        new_homedir = False
        home_mode = data.pop('home_mode')
        if data['home'] != '/nonexistent':
            try:
                os.makedirs(data['home'], mode=int(home_mode, 8))
                os.chown(data['home'], data['uid'], group['gid'])
            except FileExistsError:
                if not os.path.isdir(data['home']):
                    raise CallError(
                        'Path for home directory already '
                        'exists and is not a directory',
                        errno.EEXIST
                    )
            except OSError as oe:
                raise CallError(
                    'Failed to create the home directory '
                    f'({data["home"]}) for user: {oe}'
                )
            else:
                new_homedir = True
            if os.stat(data['home']).st_dev == os.stat('/mnt').st_dev:
                raise CallError(
                    f'Path for the home directory (data["home"]) '
                    'must be under a volume or dataset'
                )

        if not data.get('uid'):
            data['uid'] = await self.get_next_uid()

        pk = None  # Make sure pk exists to rollback in case of an error
        try:

            password = await self.__set_password(data)

            sshpubkey = data.pop('sshpubkey', None)  # datastore does not have sshpubkey
            pk = await self.middleware.call('datastore.insert', 'account.bsdusers', data, {'prefix': 'bsdusr_'})

            await self.__set_groups(pk, groups)

        except Exception:
            if pk is not None:
                await self.middleware.call('datastore.delete', 'account.bsdusers', pk)
            if new_homedir:
                # Be as atomic as possible when creating the user if
                # commands failed to execute cleanly.
                shutil.rmtree(data['home'])
            raise

        await self.middleware.call('service.reload', 'user')

        await self.__set_smbpasswd(data['username'], password)

        if os.path.exists(data['home']):
            for f in os.listdir(SKEL_PATH):
                if f.startswith('dot'):
                    dest_file = os.path.join(data['home'], f[3:])
                else:
                    dest_file = os.path.join(data['home'], f)
                if not os.path.exists(dest_file):
                    shutil.copyfile(os.path.join(SKEL_PATH, f), dest_file)
                    os.chown(dest_file, data['uid'], group['gid'])

        data['sshpubkey'] = sshpubkey
        await self.__update_sshpubkey(data, group['group'])

        return pk

    @accepts(
        Int('id'),
        Patch(
            'user_create',
            'user_update',
            ('attr', {'update': True}),
            ('rm', {'name': 'group_create'}),
        ),
    )
    async def do_update(self, pk, data):

        user = await self._get_instance(pk)

        verrors = ValidationErrors()

        if 'group' in data:
            group = await self.middleware.call('datastore.query', 'account.bsdgroups', [('id', '=', data['group'])])
            if not group:
                verrors.add('group', f'Group {data["group"]} not found', errno.ENOENT)
            group = group[0]
        else:
            group = user['group']
            user['group'] = group['id']

        await self.__common_validation(verrors, data, pk=pk)

        home = data.get('home') or user['home']
        # root user (uid 0) is an exception to the rule
        if data.get('sshpubkey') and not home.startswith('/mnt') and user['uid'] != 0:
            verrors.add('sshpubkey', 'Home directory is not writable, leave this blank"')

        # Do not allow attributes to be changed for builtin user
        if user['builtin']:
            for i in ('group', 'home', 'home_mode', 'uid', 'username'):
                if i in data:
                    verrors.add(i, 'This attribute cannot be changed')

        if verrors:
            raise verrors

        # Copy the home directory if it changed
        if (
            'home' in data and
            data['home'] not in (user['home'], '/nonexistent') and
            not data["home"].startswith(f'{user["home"]}/')
        ):
            home_copy = True
            home_old = user['home']
        else:
            home_copy = False

        user.update(data)

        password = await self.__set_password(user)

        await self.__update_sshpubkey(user, group['bsdgrp_group'])
        user.pop('sshpubkey', None)

        home_mode = user.pop('home_mode', None)
        if home_mode is not None:
            if not user['builtin'] and os.path.exists(user['home']):
                try:
                    os.chmod(user['home'], int(home_mode, 8))
                except OSError:
                    self.logger.warn('Failed to set homedir mode', exc_info=True)

        if home_copy:
            def do_home_copy():
                subprocess.run(f"su - {user['username']} -c '/bin/cp -a {home_old}/* {user['home']}/'")
            asyncio.ensure_future(self.middleware.run_in_thread(do_home_copy))

        if 'groups' in user:
            groups = user.pop('groups')
            await self.__set_groups(pk, groups)

        await self.middleware.call('datastore.update', 'account.bsdusers', pk, user, {'prefix': 'bsdusr_'})

        await self.middleware.call('service.reload', 'user')

        await self.__set_smbpasswd(user['username'], password)

        return pk

    @accepts(Int('id'), Dict('options', Bool('delete_group', default=True)))
    async def do_delete(self, pk, options=None):

        user = await self._get_instance(pk)

        if user['builtin']:
            raise CallError('Cannot delete a built-in user', errno.EINVAL)

        if options['delete_group'] and not user['group']['bsdgrp_builtin']:
            count = await self.middleware.call('datastore.query', 'account.bsdgroupmembership', [('group', '=', user['group']['id'])], {'prefix': 'bsdgrpmember_', 'count': True})
            count2 = await self.middleware.call('datastore.query', 'account.bsdusers', [('group', '=', user['group']['id']), ('id', '!=', pk)], {'prefix': 'bsdusr_', 'count': True})
            if count == 0 and count2 == 0:
                try:
                    await self.middleware.call('group.delete', user['group']['id'])
                except Exception:
                    self.logger.warn(f'Failed to delete primary group of {user["username"]}', exc_info=True)

        await run('smbpasswd', '-x', user['username'], check=False)

        if await self.middleware.call('notifier.common', 'system', 'domaincontroller_enabled'):
            await self.middleware.call('notifier.samba4', 'user_delete', user['username'])

        # TODO: add a hook in CIFS service
        cifs = await self.middleware.call('datastore.query', 'services.cifs', [], {'prefix': 'cifs_srv_'})
        if cifs:
            cifs = cifs[0]
            if cifs['guest'] == user['username']:
                await self.middleware.call('datastore.update', 'services.cifs', cifs['id'], {'guest': 'nobody'}, {'prefix': 'cifs_srv_'})

        await self.middleware.call('datastore.delete', 'account.bsdusers', pk)
        await self.middleware.call('service.reload', 'user')

        return pk

    @item_method
    @accepts(
        Int('id'),
        Str('key'),
        Any('value'),
    )
    async def set_attribute(self, pk, key, value):
        """
        Set user general purpose `attributes` dictionary `key` to `value`.

        e.g. Setting key="foo" value="var" will result in {"attributes": {"foo": "bar"}}
        """
        user = await self._get_instance(pk)
        user.pop('group')

        user['attributes'][key] = value
        await self.middleware.call('datastore.update', 'account.bsdusers', pk, user, {'prefix': 'bsdusr_'})

        return True

    @item_method
    @accepts(
        Int('id'),
        Str('key'),
    )
    async def pop_attribute(self, pk, key):
        """
        Remove user general purpose `attributes` dictionary `key`.
        """
        user = await self._get_instance(pk)
        user.pop('group')

        if key in user['attributes']:
            user['attributes'].pop(key)
            await self.middleware.call('datastore.update', 'account.bsdusers', pk, user, {'prefix': 'bsdusr_'})
            return True
        else:
            return False

    @accepts()
    async def get_next_uid(self):
        """
        Get the next available/free uid.
        """
        last_uid = 999
        for i in await self.middleware.call('datastore.query', 'account.bsdusers', [('builtin', '=', False)], {'order_by': ['uid'], 'prefix': 'bsdusr_'}):
            # If the difference between the last uid and the current one is
            # bigger than 1, it means we have a gap and can use it.
            if i['uid'] - last_uid > 1:
                return last_uid + 1
            last_uid = i['uid']
        return last_uid + 1

    async def __common_validation(self, verrors, data, pk=None):

        exclude_filter = [('id', '!=', pk)] if pk else []

        if 'username' in data:
            pw_checkname(verrors, 'username', data['username'])

            if await self.middleware.call('datastore.query', 'account.bsdusers', [('username', '=', data['username'])] + exclude_filter, {'prefix': 'bsdusr_'}):
                verrors.add('username', f'A user with the username "{data["username"]}" already exists', errno.EEXIST)

        password = data.get('password')
        if password and '?' in password:
            # See bug #4098
            verrors.add(
                'password',
                'Passwords containing a question mark (?) are currently not '
                'allowed due to problems with SMB.',
                errno.EINVAL
            )
        elif not pk and not password and not data.get('password_disabled'):
            verrors.add('password', 'Password is required')
        elif data.get('password_disabled') and password:
            verrors.add('password_disabled', 'Password disabled, leave password blank')

        if 'home' in data:
            if ':' in data['home']:
                verrors.add('home', 'Home directory cannot contain colons')
            if not data['home'].startswith('/mnt/') and data['home'] != '/nonexistent':
                verrors.add('home', 'Home directory has to start with /mnt/ or be /nonexistent')

        if 'groups' in data:
            groups = data.get('groups') or []
            if groups and len(groups) > 64:
                verrors.add('groups', 'A user cannot belong to more than 64 auxiliary groups')

        if 'full_name' in data and ':' in data['full_name']:
            verrors.add('full_name', '":" character is not allowed in Full Name')

    async def __set_password(self, data):
        if 'password' not in data:
            return
        password = data.pop('password')
        if password:
            data['unixhash'] = crypted_password(password)
            # See http://samba.org.ru/samba/docs/man/manpages/smbpasswd.5.html
            data['smbhash'] = f'{data["username"]}:{data["uid"]}:{"X" * 32}:{nt_password(password)}:[U          ]:LCT-{int(time.time()):X}:'
        else:
            data['unixhash'] = '*'
            data['smbhash'] = '*'
        return password

    async def __set_smbpasswd(self, username, password):
        """
        Currently the way we set samba passwords is using smbpasswd
        and that can only happen after the user exists in master.passwd.
        That is the reason we have two methods/steps to set password.
        """
        if not password:
            return
        proc = await Popen(['smbpasswd', '-D', '0', '-s', '-a', username], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
        await proc.communicate(input=f'{password}\n{password}\n'.encode())

    async def __set_groups(self, pk, groups):

        groups = set(groups)
        existing_ids = set()
        for gm in await self.middleware.call('datastore.query', 'account.bsdgroupmembership', [('user', '=', pk)], {'prefix': 'bsdgrpmember_'}):
            if gm['id'] not in groups:
                await self.middleware.call('datastore.delete', 'account.bsdgroupmembership', gm['id'])
            else:
                existing_ids.add(gm['id'])

        for _id in groups - existing_ids:
            group = await self.middleware.call('datastore.query', 'account.bsdgroups', [('id', '=', _id)], {'prefix': 'bsdgrp_'})
            if not group:
                raise CallError(f'Group {_id} not found', errno.ENOENT)
            await self.middleware.call(
                'datastore.insert',
                'account.bsdgroupmembership',
                {'group': _id, 'user': pk},
                {'prefix': 'bsdgrpmember_'}
            )

    async def __update_sshpubkey(self, user, group):
        if 'sshpubkey' not in user:
            return
        keysfile = f'{user["home"]}/.ssh/authorized_keys'
        pubkey = user.get('sshpubkey')
        if pubkey is None:
            if os.path.exists(keysfile):
                try:
                    os.unlink(keysfile)
                except OSError:
                    pass
        else:
            oldpubkey = ''
            try:
                with open(keysfile, 'r') as f:
                    oldpubkey = f.read()
            except Exception:
                pass
            pubkey = pubkey.strip() + '\n'
            if pubkey != oldpubkey:
                sshpath = f'{user["home"]}/.ssh'
                if not os.path.isdir(sshpath):
                    os.makedirs(sshpath)
                    os.chmod(sshpath, 0o700)
                if not os.path.isdir(sshpath):
                    raise CallError(f'{sshpath} is not a directory')
                if pubkey == '' and os.path.exists(keysfile):
                    os.unlink(keysfile)
                else:
                    with open(keysfile, 'w') as f:
                        f.write(pubkey)
                    os.chmod(keysfile, 0o700)
                    await run('chown', '-R', f'{user["username"]}:{group}', sshpath, check=False)
Пример #14
0
class AlertService(Service):
    def __init__(self, middleware):
        super().__init__(middleware)

        self.node = "A"

        self.alerts = defaultdict(lambda: defaultdict(dict))

        self.alert_source_last_run = defaultdict(lambda: datetime.min)

        self.policies = {
            "IMMEDIATELY": AlertPolicy(),
            "HOURLY": AlertPolicy(lambda d: (d.date(), d.hour)),
            "DAILY": AlertPolicy(lambda d: (d.date())),
            "NEVER": AlertPolicy(lambda d: None),
        }

    @private
    async def initialize(self):
        if not await self.middleware.call("system.is_freenas"):
            if await self.middleware.call("failover.node") == "B":
                self.node = "B"

        for alert in await self.middleware.call("datastore.query",
                                                "system.alert"):
            del alert["id"]
            alert["level"] = AlertLevel(alert["level"])

            alert = Alert(**alert)

            self.alerts[alert.node][alert.source][alert.key] = alert

        for policy in self.policies.values():
            policy.receive_alerts(datetime.utcnow(), self.alerts)

        main_sources_dir = os.path.join(
            os.path.dirname(os.path.realpath(__file__)), os.path.pardir,
            "alert", "source")
        sources_dirs = [
            os.path.join(overlay_dir, "alert", "source")
            for overlay_dir in self.middleware.overlay_dirs
        ]
        sources_dirs.insert(0, main_sources_dir)
        for sources_dir in sources_dirs:
            for module in load_modules(sources_dir):
                for cls in load_classes(
                        module, AlertSource,
                    (FilePresenceAlertSource, ThreadedAlertSource,
                     OneShotAlertSource)):
                    source = cls(self.middleware)
                    ALERT_SOURCES[source.name] = source

        main_services_dir = os.path.join(
            os.path.dirname(os.path.realpath(__file__)), os.path.pardir,
            "alert", "service")
        services_dirs = [
            os.path.join(overlay_dir, "alert", "service")
            for overlay_dir in self.middleware.overlay_dirs
        ]
        services_dirs.insert(0, main_services_dir)
        for services_dir in services_dirs:
            for module in load_modules(services_dir):
                for cls in load_classes(
                        module, _AlertService,
                    (ThreadedAlertService, ProThreadedAlertService)):
                    ALERT_SERVICES_FACTORIES[cls.name()] = cls

    @private
    async def terminate(self):
        await self.flush_alerts()

    @accepts()
    async def list_policies(self):
        return POLICIES

    @accepts()
    async def list_sources(self):
        return [{
            "name": source.name,
            "title": source.title,
        } for source in sorted(ALERT_SOURCES.values(),
                               key=lambda source: source.title.lower())]

    @accepts()
    def list(self):
        return [
            dict(alert.__dict__,
                 id=f"{alert.node};{alert.source};{alert.key}",
                 level=alert.level.name,
                 formatted=alert.formatted,
                 one_shot=isinstance(ALERT_SOURCES.get(alert.source, None),
                                     OneShotAlertSource))
            for alert in sorted(self.__get_all_alerts(),
                                key=lambda alert: alert.title)
        ]

    @accepts(Str("id"))
    async def dismiss(self, id):
        node, source, key = id.split(";", 2)
        try:
            alert = self.alerts[node][source][key]
        except KeyError:
            return

        alert_source = ALERT_SOURCES.get(source)
        if alert_source and isinstance(alert_source, DismissableAlertSource):
            self.alerts[node][source] = {
                alert.key: alert
                for alert in await alert_source.dismiss(
                    self.alerts[node][source], key)
            }
        elif alert_source and isinstance(alert_source, OneShotAlertSource):
            self.alerts[node][source].pop(key, None)
        else:
            alert.dismissed = True

    @accepts(Str("id"))
    def restore(self, id):
        node, source, key = id.split(";", 2)
        try:
            alert = self.alerts[node][source][key]
        except KeyError:
            return
        alert.dismissed = False

    @periodic(60)
    @job(lock="process_alerts", transient=True)
    async def process_alerts(self, job):
        if not await self.middleware.call("system.ready"):
            return

        if (not await self.middleware.call('system.is_freenas')
                and await self.middleware.call('failover.licensed')
                and await self.middleware.call('failover.status') == 'BACKUP'):
            return

        await self.__run_alerts()

        await self.middleware.call("alert.send_alerts")

    @private
    @job(lock="process_alerts", transient=True)
    async def send_alerts(self, job):
        default_settings = (
            await
            self.middleware.call("alertdefaultsettings.config"))["settings"]

        all_alerts = self.__get_all_alerts()

        now = datetime.now()
        for policy_name, policy in self.policies.items():
            gone_alerts, new_alerts = policy.receive_alerts(now, self.alerts)

            for alert_service_desc in await self.middleware.call(
                    "datastore.query", "system.alertservice"):
                service_settings = dict(default_settings,
                                        **alert_service_desc["settings"])

                service_gone_alerts = [
                    alert for alert in gone_alerts if service_settings.get(
                        alert.source, DEFAULT_POLICY) == policy_name
                ]
                service_new_alerts = [
                    alert for alert in new_alerts if service_settings.get(
                        alert.source, DEFAULT_POLICY) == policy_name
                ]

                if not service_gone_alerts and not service_new_alerts:
                    continue

                factory = ALERT_SERVICES_FACTORIES.get(
                    alert_service_desc["type"])
                if factory is None:
                    self.logger.error("Alert service %r does not exist",
                                      alert_service_desc["type"])
                    continue

                try:
                    alert_service = factory(self.middleware,
                                            alert_service_desc["attributes"])
                except Exception:
                    self.logger.error(
                        "Error creating alert service %r with parameters=%r",
                        alert_service_desc["type"],
                        alert_service_desc["attributes"],
                        exc_info=True)
                    continue

                if all_alerts or service_gone_alerts or service_new_alerts:
                    try:
                        await alert_service.send(all_alerts,
                                                 service_gone_alerts,
                                                 service_new_alerts)
                    except Exception:
                        self.logger.error("Error in alert service %r",
                                          alert_service_desc["type"],
                                          exc_info=True)

            if policy_name == "IMMEDIATELY":
                for alert in new_alerts:
                    if alert.mail:
                        await self.middleware.call("mail.send", alert.mail)

                if not await self.middleware.call("system.is_freenas"):
                    new_hardware_alerts = [
                        alert for alert in new_alerts
                        if ALERT_SOURCES[alert.source].hardware
                    ]
                    if new_hardware_alerts:
                        if await self.middleware.call(
                                "support.is_available_and_enabled"):
                            support = await self.middleware.call(
                                "support.config")
                            msg = [
                                f"* {alert.formatted}"
                                for alert in new_hardware_alerts
                            ]

                            serial = (await self.middleware.call("system.info")
                                      )["system_serial"]

                            for name, verbose_name in await self.middleware.call(
                                    "support.fields"):
                                value = support[name]
                                if value:
                                    msg += [
                                        "",
                                        "{}: {}".format(verbose_name, value)
                                    ]

                            try:
                                await self.middleware.call(
                                    "support.new_ticket", {
                                        "title":
                                        "Automatic alert (%s)" % serial,
                                        "body": "\n".join(msg),
                                        "attach_debug": False,
                                        "category": "Hardware",
                                        "criticality": "Loss of Functionality",
                                        "environment": "Production",
                                        "name": "Automatic Alert",
                                        "email": "*****@*****.**",
                                        "phone": "-",
                                    })
                            except Exception:
                                self.logger.error(
                                    f"Failed to create a support ticket",
                                    exc_info=True)

    async def __run_alerts(self):
        master_node = "A"
        backup_node = "B"
        run_on_backup_node = False
        if not await self.middleware.call("system.is_freenas"):
            if await self.middleware.call("failover.licensed"):
                master_node = await self.middleware.call("failover.node")
                try:
                    backup_node = await self.middleware.call(
                        "failover.call_remote", "failover.node")
                    remote_version = await self.middleware.call(
                        "failover.call_remote", "system.version")
                    remote_failover_status = await self.middleware.call(
                        "failover.call_remote", "failover.status")
                except Exception:
                    pass
                else:
                    if remote_version == await self.middleware.call(
                            "system.version"):
                        if remote_failover_status == "BACKUP":
                            run_on_backup_node = True

        for alert_source in ALERT_SOURCES.values():
            if isinstance(alert_source, OneShotAlertSource):
                continue

            if not alert_source.schedule.should_run(
                    datetime.utcnow(),
                    self.alert_source_last_run[alert_source.name]):
                continue

            self.alert_source_last_run[alert_source.name] = datetime.utcnow()

            self.logger.trace("Running alert source: %r", alert_source.name)

            try:
                alerts_a = await self.__run_source(alert_source.name)
            except UnavailableException:
                alerts_a = list(self.alerts["A"][alert_source.name].values())
            for alert in alerts_a:
                alert.node = master_node

            alerts_b = []
            if run_on_backup_node and alert_source.run_on_backup_node:
                try:
                    try:
                        alerts_b = await self.middleware.call(
                            "failover.call_remote", "alert.run_source",
                            [alert_source.name])
                    except CallError as e:
                        if e.errno == CallError.EALERTCHECKERUNAVAILABLE:
                            alerts_b = list(
                                self.alerts["B"][alert_source.name].values())
                        else:
                            raise
                    else:
                        alerts_b = [
                            Alert(**dict(alert,
                                         level=(AlertLevel(alert["level"])
                                                if alert["level"] is not None
                                                else alert["level"])))
                            for alert in alerts_b
                        ]
                except Exception:
                    alerts_b = [
                        Alert(
                            title=
                            "Unable to run alert source %(source_name)r on backup node\n%(traceback)s",
                            args={
                                "source_name": alert_source.name,
                                "traceback": traceback.format_exc(),
                            },
                            key="__remote_call_exception__",
                            level=AlertLevel.CRITICAL)
                    ]
            for alert in alerts_b:
                alert.node = backup_node

            for alert in alerts_a + alerts_b:
                self.__handle_alert(alert_source, alert)

            self.alerts["A"][alert_source.name] = {
                alert.key: alert
                for alert in alerts_a
            }
            self.alerts["B"][alert_source.name] = {
                alert.key: alert
                for alert in alerts_b
            }

    def __handle_alert(self, alert_source, alert):
        existing_alert = self.alerts[alert.node][alert_source.name].get(
            alert.key)

        alert.source = alert_source.name
        if existing_alert is None:
            alert.datetime = alert.datetime or datetime.utcnow()
        else:
            alert.datetime = existing_alert.datetime
        alert.level = alert.level or alert_source.level
        alert.title = alert.title or alert_source.title
        if existing_alert is None:
            alert.dismissed = False
        else:
            alert.dismissed = existing_alert.dismissed

    @private
    async def run_source(self, source_name):
        try:
            return [
                dict(alert.__dict__,
                     level=alert.level.value
                     if alert.level is not None else alert.level)
                for alert in await self.__run_source(source_name)
            ]
        except UnavailableException:
            raise CallError("This alert checker is unavailable",
                            CallError.EALERTCHECKERUNAVAILABLE)

    async def __run_source(self, source_name):
        alert_source = ALERT_SOURCES[source_name]

        try:
            alerts = (await alert_source.check()) or []
        except UnavailableException:
            raise
        except Exception:
            alerts = [
                Alert(
                    title=
                    "Unable to run alert source %(source_name)r\n%(traceback)s",
                    args={
                        "source_name": alert_source.name,
                        "traceback": traceback.format_exc(),
                    },
                    key="__unhandled_exception__",
                    level=AlertLevel.CRITICAL)
            ]
        else:
            if not isinstance(alerts, list):
                alerts = [alerts]

        return alerts

    @periodic(3600)
    async def flush_alerts(self):
        if (not await self.middleware.call('system.is_freenas')
                and await self.middleware.call('failover.licensed')
                and await self.middleware.call('failover.status') == 'BACKUP'):
            return

        await self.middleware.call("datastore.delete", "system.alert", [])

        for alert in self.__get_all_alerts():
            d = alert.__dict__.copy()
            d["level"] = d["level"].value
            del d["mail"]
            await self.middleware.call("datastore.insert", "system.alert", d)

    def __get_all_alerts(self):
        return sum([
            sum([list(vv.values()) for vv in v.values()], [])
            for v in self.alerts.values()
        ], [])

    @private
    @accepts(Str("source"), Any("args", null=True))
    @job(lock="process_alerts", transient=True)
    async def oneshot_create(self, job, source, args):
        try:
            alert_source = ALERT_SOURCES[source]
        except KeyError:
            raise CallError(f"Invalid alert source: {source!r}")

        if not isinstance(alert_source, OneShotAlertSource):
            raise CallError(
                f"Alert source {source!r} is not a one-shot alert source")

        alert = await alert_source.create(args)
        if alert is None:
            return

        alert.node = self.node

        self.__handle_alert(alert_source, alert)

        self.alerts[alert.node][alert_source.name][alert.key] = alert

        await self.middleware.call("alert.send_alerts")

    @private
    @accepts(Str("source"), Any("query", null=True))
    @job(lock="process_alerts", transient=True)
    async def oneshot_delete(self, job, source, query):
        try:
            alert_source = ALERT_SOURCES[source]
        except KeyError:
            raise CallError(f"Invalid alert source: {source!r}")

        if not isinstance(alert_source, OneShotAlertSource):
            raise CallError(
                f"Alert source {source!r} is not a one-shot alert source")

        alerts = set(alert.key for alert in await alert_source.delete(
            self.alerts[self.node][alert_source.name].values(), query))
        for k in list(self.alerts[self.node][alert_source.name].keys()):
            if k not in alerts:
                self.alerts[self.node][alert_source.name].pop(k, None)

        await self.middleware.call("alert.send_alerts")

    @private
    def alert_source_clear_run(self, name):
        alert_source = ALERT_SOURCES.get(name)
        if not alert_source:
            raise CallError("Alert source {name!r} not found.", errno.ENOENT)

        self.alert_source_last_run[alert_source.name] = datetime.min
Пример #15
0
class AlertService(Service):
    def __init__(self, middleware):
        super().__init__(middleware)

        self.node = "A"

        self.alerts = []

        self.alert_source_last_run = defaultdict(lambda: datetime.min)

        self.policies = {
            "IMMEDIATELY": AlertPolicy(),
            "HOURLY": AlertPolicy(lambda d: (d.date(), d.hour)),
            "DAILY": AlertPolicy(lambda d: (d.date())),
            "NEVER": AlertPolicy(lambda d: None),
        }

    @private
    async def initialize(self):
        if not await self.middleware.call("system.is_freenas"):
            if await self.middleware.call("failover.node") == "B":
                self.node = "B"

        main_sources_dir = os.path.join(
            os.path.dirname(os.path.realpath(__file__)), os.path.pardir,
            "alert", "source")
        sources_dirs = [
            os.path.join(overlay_dir, "alert", "source")
            for overlay_dir in self.middleware.overlay_dirs
        ]
        sources_dirs.insert(0, main_sources_dir)
        for sources_dir in sources_dirs:
            for module in load_modules(sources_dir):
                for cls in load_classes(
                        module, AlertSource,
                    (FilePresenceAlertSource, ThreadedAlertSource)):
                    source = cls(self.middleware)
                    ALERT_SOURCES[source.name] = source

        main_services_dir = os.path.join(
            os.path.dirname(os.path.realpath(__file__)), os.path.pardir,
            "alert", "service")
        services_dirs = [
            os.path.join(overlay_dir, "alert", "service")
            for overlay_dir in self.middleware.overlay_dirs
        ]
        services_dirs.insert(0, main_services_dir)
        for services_dir in services_dirs:
            for module in load_modules(services_dir):
                for cls in load_classes(
                        module, _AlertService,
                    (ThreadedAlertService, ProThreadedAlertService)):
                    ALERT_SERVICES_FACTORIES[cls.name()] = cls

        for alert in await self.middleware.call("datastore.query",
                                                "system.alert"):
            del alert["id"]

            try:
                alert["klass"] = AlertClass.class_by_name[alert["klass"]]
            except KeyError:
                self.logger.info("Alert class %r is no longer present",
                                 alert["klass"])
                continue

            alert["_uuid"] = alert.pop("uuid")
            alert["_source"] = alert.pop("source")
            alert["_key"] = alert.pop("key")
            alert["_text"] = alert.pop("text")

            alert = Alert(**alert)

            self.alerts.append(alert)

        for policy in self.policies.values():
            policy.receive_alerts(datetime.utcnow(), self.alerts)

    @private
    async def terminate(self):
        await self.flush_alerts()

    @accepts()
    async def list_policies(self):
        """
        List all alert policies which indicate the frequency of the alerts.
        """
        return POLICIES

    @accepts()
    async def list_categories(self):
        """
        List all types of alert sources which the system can issue.
        """

        return [{
            "id":
            alert_category.name,
            "title":
            alert_category_names[alert_category],
            "classes":
            sorted([{
                "id": alert_class.name,
                "title": alert_class.title,
            } for alert_class in AlertClass.classes
                    if alert_class.category == alert_category],
                   key=lambda klass: klass["title"])
        } for alert_category in AlertCategory
                if any(alert_class.category == alert_category
                       for alert_class in AlertClass.classes)]

    @private
    async def list_sources(self):
        # TODO: this is a deprecated method for backward compatibility

        return [{
            "name": klass["id"],
            "title": klass["title"],
        } for klass in sum(
            [v["classes"] for v in await self.list_categories()], [])]

    @accepts()
    async def list(self):
        """
        List all types of alerts including active/dismissed currently in the system.
        """

        classes = (await
                   self.middleware.call("alertclasses.config"))["classes"]

        return [
            dict(alert.__dict__,
                 id=alert.uuid,
                 klass=alert.klass.name,
                 level=classes.get(alert.klass.name,
                                   {}).get("level", alert.klass.level.name),
                 formatted=alert.formatted,
                 one_shot=issubclass(alert.klass, OneShotAlertClass)) for alert
            in sorted(self.alerts,
                      key=lambda alert: (alert.klass.title, alert.datetime))
        ]

    def __alert_by_uuid(self, uuid):
        try:
            return [a for a in self.alerts if a.uuid == uuid][0]
        except IndexError:
            return None

    @accepts(Str("uuid"))
    async def dismiss(self, uuid):
        """
        Dismiss `id` alert.
        """

        alert = self.__alert_by_uuid(uuid)
        if alert is None:
            return

        if issubclass(alert.klass, DismissableAlertClass):
            related_alerts, unrelated_alerts = bisect(
                lambda a: (a.node, a.klass) == (alert.node, alert.klass),
                self.alerts)
            self.alerts = (unrelated_alerts + await alert.klass(
                self.middleware).dismiss(related_alerts, alert))
        elif issubclass(alert.klass, OneShotAlertClass):
            self.alerts = [a for a in self.alerts if a.uuid != uuid]
        else:
            alert.dismissed = True

    @accepts(Str("uuid"))
    def restore(self, uuid):
        """
        Restore `id` alert which had been dismissed.
        """

        alert = self.__alert_by_uuid(uuid)
        if alert is None:
            return

        alert.dismissed = False

    @periodic(60)
    @private
    @job(lock="process_alerts", transient=True)
    async def process_alerts(self, job):
        if not await self.middleware.call("system.ready"):
            return

        if (not await self.middleware.call('system.is_freenas')
                and await self.middleware.call('failover.licensed')
                and await self.middleware.call('failover.status') == 'BACKUP'):
            return

        await self.__run_alerts()

        await self.middleware.call("alert.send_alerts")

    @private
    @job(lock="process_alerts", transient=True)
    async def send_alerts(self, job):
        classes = (await
                   self.middleware.call("alertclasses.config"))["classes"]

        now = datetime.now()
        for policy_name, policy in self.policies.items():
            gone_alerts, new_alerts = policy.receive_alerts(now, self.alerts)

            for alert_service_desc in await self.middleware.call(
                    "datastore.query", "system.alertservice"):
                service_gone_alerts = [
                    alert for alert in gone_alerts
                    if (AlertLevel[classes.get(alert.klass.name, {}).get(
                        "level", alert.klass.level.name)].value >= AlertLevel[
                            alert_service_desc["level"]].value
                        and classes.get(alert.klass.name, {}).get(
                            "policy", DEFAULT_POLICY) == policy_name)
                ]
                service_new_alerts = [
                    alert for alert in new_alerts
                    if (AlertLevel[classes.get(alert.klass.name, {}).get(
                        "level", alert.klass.level.name)].value >= AlertLevel[
                            alert_service_desc["level"]].value
                        and classes.get(alert.klass.name, {}).get(
                            "policy", DEFAULT_POLICY) == policy_name)
                ]

                if not service_gone_alerts and not service_new_alerts:
                    continue

                factory = ALERT_SERVICES_FACTORIES.get(
                    alert_service_desc["type"])
                if factory is None:
                    self.logger.error("Alert service %r does not exist",
                                      alert_service_desc["type"])
                    continue

                try:
                    alert_service = factory(self.middleware,
                                            alert_service_desc["attributes"])
                except Exception:
                    self.logger.error(
                        "Error creating alert service %r with parameters=%r",
                        alert_service_desc["type"],
                        alert_service_desc["attributes"],
                        exc_info=True)
                    continue

                if self.alerts or service_gone_alerts or service_new_alerts:
                    try:
                        await alert_service.send(self.alerts,
                                                 service_gone_alerts,
                                                 service_new_alerts)
                    except Exception:
                        self.logger.error("Error in alert service %r",
                                          alert_service_desc["type"],
                                          exc_info=True)

            if policy_name == "IMMEDIATELY":
                for alert in new_alerts:
                    if alert.mail:
                        await self.middleware.call("mail.send", alert.mail)

                if not await self.middleware.call("system.is_freenas"):
                    new_hardware_alerts = [
                        alert for alert in new_alerts if alert.klass.hardware
                    ]
                    if new_hardware_alerts:
                        if await self.middleware.call(
                                "support.is_available_and_enabled"):
                            support = await self.middleware.call(
                                "support.config")
                            msg = [
                                f"* {alert.formatted}"
                                for alert in new_hardware_alerts
                            ]

                            serial = (await self.middleware.call("system.info")
                                      )["system_serial"]

                            for name, verbose_name in await self.middleware.call(
                                    "support.fields"):
                                value = support[name]
                                if value:
                                    msg += [
                                        "",
                                        "{}: {}".format(verbose_name, value)
                                    ]

                            try:
                                await self.middleware.call(
                                    "support.new_ticket", {
                                        "title":
                                        "Automatic alert (%s)" % serial,
                                        "body": "\n".join(msg),
                                        "attach_debug": False,
                                        "category": "Hardware",
                                        "criticality": "Loss of Functionality",
                                        "environment": "Production",
                                        "name": "Automatic Alert",
                                        "email": "*****@*****.**",
                                        "phone": "-",
                                    })
                            except Exception:
                                self.logger.error(
                                    f"Failed to create a support ticket",
                                    exc_info=True)

    def __uuid(self):
        return str(uuid.uuid4())

    async def __run_alerts(self):
        master_node = "A"
        backup_node = "B"
        run_on_backup_node = False
        if not await self.middleware.call("system.is_freenas"):
            if await self.middleware.call("failover.licensed"):
                master_node = await self.middleware.call("failover.node")
                try:
                    backup_node = await self.middleware.call(
                        "failover.call_remote", "failover.node")
                    remote_version = await self.middleware.call(
                        "failover.call_remote", "system.version")
                    remote_failover_status = await self.middleware.call(
                        "failover.call_remote", "failover.status")
                except Exception:
                    pass
                else:
                    if remote_version == await self.middleware.call(
                            "system.version"):
                        if remote_failover_status == "BACKUP":
                            run_on_backup_node = True

        for alert_source in ALERT_SOURCES.values():
            if not alert_source.schedule.should_run(
                    datetime.utcnow(),
                    self.alert_source_last_run[alert_source.name]):
                continue

            self.alert_source_last_run[alert_source.name] = datetime.utcnow()

            self.logger.trace("Running alert source: %r", alert_source.name)

            try:
                alerts_a = await self.__run_source(alert_source.name)
            except UnavailableException:
                alerts_a = [
                    alert for alert in self.alerts if alert.node == master_node
                    and alert.source == alert_source.name
                ]
            for alert in alerts_a:
                alert.node = master_node

            alerts_b = []
            if run_on_backup_node and alert_source.run_on_backup_node:
                try:
                    try:
                        alerts_b = await self.middleware.call(
                            "failover.call_remote", "alert.run_source",
                            [alert_source.name])
                    except CallError as e:
                        if e.errno == CallError.EALERTCHECKERUNAVAILABLE:
                            alerts_b = [
                                alert for alert in self.alerts
                                if alert.node == backup_node
                                and alert.source == alert_source.name
                            ]
                        else:
                            raise
                    else:
                        alerts_b = [
                            Alert(**dict(
                                alert,
                                klass=AlertClass.class_by_name[alert["klass"]],
                                _uuid=alert.pop("id"),
                                _source=alert.pop("source"),
                                _key=alert.pop("key"),
                                _text=alert.pop("text"))) for alert in alerts_b
                        ]
                except Exception:
                    alerts_b = [
                        Alert(AlertSourceRunFailedOnBackupNodeAlertClass,
                              args={
                                  "source_name": alert_source.name,
                                  "traceback": traceback.format_exc(),
                              },
                              _source=alert_source.name)
                    ]
            for alert in alerts_b:
                alert.node = backup_node

            for alert in alerts_a + alerts_b:
                self.__handle_alert(alert)

            self.alerts = (
                [a for a in self.alerts if a.source != alert_source.name] +
                alerts_a + alerts_b)

    def __handle_alert(self, alert):
        try:
            existing_alert = [
                a for a in self.alerts if (a.node, a.source, a.klass,
                                           a.key) == (alert.node, alert.source,
                                                      alert.klass, alert.key)
            ][0]
        except IndexError:
            existing_alert = None

        if existing_alert is None:
            alert.uuid = self.__uuid()
        else:
            alert.uuid = existing_alert.uuid
        if existing_alert is None:
            alert.datetime = alert.datetime or datetime.utcnow()
        else:
            alert.datetime = existing_alert.datetime
        if existing_alert is None:
            alert.dismissed = False
        else:
            alert.dismissed = existing_alert.dismissed

    @private
    async def run_source(self, source_name):
        try:
            return [
                dict(alert.__dict__, klass=alert.klass.name)
                for alert in await self.__run_source(source_name)
            ]
        except UnavailableException:
            raise CallError("This alert checker is unavailable",
                            CallError.EALERTCHECKERUNAVAILABLE)

    async def __run_source(self, source_name):
        alert_source = ALERT_SOURCES[source_name]

        try:
            alerts = (await alert_source.check()) or []
        except UnavailableException:
            raise
        except Exception:
            alerts = [
                Alert(AlertSourceRunFailedAlertClass,
                      args={
                          "source_name": alert_source.name,
                          "traceback": traceback.format_exc(),
                      })
            ]
        else:
            if not isinstance(alerts, list):
                alerts = [alerts]

        for alert in alerts:
            alert.source = source_name

        return alerts

    @periodic(3600)
    @private
    async def flush_alerts(self):
        if (not await self.middleware.call('system.is_freenas')
                and await self.middleware.call('failover.licensed')
                and await self.middleware.call('failover.status') == 'BACKUP'):
            return

        await self.middleware.call("datastore.delete", "system.alert", [])

        for alert in self.alerts:
            d = alert.__dict__.copy()
            d["klass"] = d["klass"].name
            del d["mail"]
            await self.middleware.call("datastore.insert", "system.alert", d)

    @private
    @accepts(Str("klass"), Any("args", null=True))
    @job(lock="process_alerts", transient=True)
    async def oneshot_create(self, job, klass, args):
        try:
            klass = AlertClass.class_by_name[klass]
        except KeyError:
            raise CallError(f"Invalid alert source: {klass!r}")

        if not issubclass(klass, OneShotAlertClass):
            raise CallError(
                f"Alert class {klass!r} is not a one-shot alert source")

        alert = await klass(self.middleware).create(args)
        if alert is None:
            return

        alert.source = ""
        alert.klass = alert.klass

        alert.node = self.node

        self.__handle_alert(alert)

        self.alerts = [a
                       for a in self.alerts if a.uuid != alert.uuid] + [alert]

        await self.middleware.call("alert.send_alerts")

    @private
    @accepts(Str("klass"), Any("query", null=True))
    @job(lock="process_alerts", transient=True)
    async def oneshot_delete(self, job, klass, query):
        try:
            klass = AlertClass.class_by_name[klass]
        except KeyError:
            raise CallError(f"Invalid alert source: {klass!r}")

        if not issubclass(klass, OneShotAlertClass):
            raise CallError(
                f"Alert class {klass!r} is not a one-shot alert source")

        related_alerts, unrelated_alerts = bisect(
            lambda a: (a.node, a.klass) == (self.node, klass), self.alerts)
        self.alerts = (
            unrelated_alerts +
            await klass(self.middleware).delete(related_alerts, query))

        await self.middleware.call("alert.send_alerts")

    @private
    def alert_source_clear_run(self, name):
        alert_source = ALERT_SOURCES.get(name)
        if not alert_source:
            raise CallError("Alert source {name!r} not found.", errno.ENOENT)

        self.alert_source_last_run[alert_source.name] = datetime.min
Пример #16
0
class DatastoreService(Service):
    class Config:
        private = True

    def _filters_to_queryset(self, filters, field_prefix=None):
        opmap = {
            '=': 'exact',
            '!=': 'exact',
            '>': 'gt',
            '>=': 'gte',
            '<': 'lt',
            '<=': 'lte',
            '~': 'regex',
            'in': 'in',
            'nin': 'in',
            '^': 'startswith',
            '$': 'endswith',
        }

        rv = []
        for f in filters:
            if not isinstance(f, (list, tuple)):
                raise ValueError('Filter must be a list: {0}'.format(f))
            if len(f) == 3:
                name, op, value = f
                # id is special
                if field_prefix and name != 'id':
                    name = field_prefix + name
                if op not in opmap:
                    raise ValueError("Invalid operation: {0}".format(op))
                q = Q(**{'{0}__{1}'.format(name, opmap[op]): value})
                if op in ('!=', 'nin'):
                    q.negate()
                rv.append(q)
            elif len(f) == 2:
                op, value = f
                if op == 'OR':
                    or_value = None
                    for value in self._filters_to_queryset(
                            value, field_prefix=field_prefix):
                        if or_value is None:
                            or_value = value
                        else:
                            or_value |= value
                    rv.append(or_value)
                else:
                    raise ValueError('Invalid operation: {0}'.format(op))
            else:
                raise ValueError("Invalid filter {0}".format(f))
        return rv

    def __get_model(self, name):
        """Helper method to get Model for given name
        e.g. network.interfaces -> Interfaces
        """
        app, model = name.split('.', 1)
        return apps.get_model(app, model)

    def __queryset_serialize(self, qs, extend, extend_context, field_prefix,
                             select):
        if extend_context:
            extend_context_value = self.middleware.call_sync(extend_context)
        else:
            extend_context_value = None
        for i in qs:
            yield django_modelobj_serialize(
                self.middleware,
                i,
                extend=extend,
                extend_context=extend_context,
                extend_context_value=extend_context_value,
                field_prefix=field_prefix,
                select=select)

    @accepts(
        Str('name'),
        List('query-filters', default=None, null=True, register=True),
        Dict(
            'query-options',
            Str('extend', default=None, null=True),
            Str('extend_context', default=None, null=True),
            Str('prefix', default=None, null=True),
            Dict('extra', additional_attrs=True),
            List('order_by', default=[]),
            List('select', default=[]),
            Bool('count', default=False),
            Bool('get', default=False),
            Int('limit', default=0),
            Int('offset', default=0),
            default=None,
            null=True,
            register=True,
        ),
    )
    def query(self, name, filters=None, options=None):
        """Query for items in a given collection `name`.

        `filters` is a list which each entry can be in one of the following formats:

            entry: simple_filter | conjuntion
            simple_filter: '[' attribute_name, OPERATOR, value ']'
            conjunction: '[' CONJUNTION, '[' simple_filter (',' simple_filter)* ']]'

            OPERATOR: ('=' | '!=' | '>' | '>=' | '<' | '<=' | '~' | 'in' | 'nin')
            CONJUNCTION: 'OR'

        e.g.

        `['OR', [ ['username', '=', 'root' ], ['uid', '=', 0] ] ]`

        `[ ['username', '=', 'root' ] ]`

        .. examples(websocket)::

          Querying for username "root" and returning a single item:

            :::javascript
            {
              "id": "d51da71b-bb48-4b8b-a8f7-6046fcc892b4",
              "msg": "method",
              "method": "datastore.query",
              "params": ["account.bsdusers", [ ["username", "=", "root" ] ], {"get": true}]
            }
        """
        model = self.__get_model(name)
        if options is None:
            options = {}
        else:
            # We do not want to make changes to original options
            # which might happen with "prefix"
            options = options.copy()

        qs = model.objects.all()

        extra = options.get('extra')
        if extra:
            qs = qs.extra(**extra)

        prefix = options.get('prefix')

        if filters:
            qs = qs.filter(*self._filters_to_queryset(filters, prefix))

        order_by = options.get('order_by')
        if order_by:
            if prefix:
                # Do not change original order_by
                order_by = order_by[:]
                for i, order in enumerate(order_by):
                    if order.startswith('-'):
                        order_by[i] = '-' + prefix + order[1:]
                    else:
                        order_by[i] = prefix + order
            qs = qs.order_by(*order_by)

        if options.get('count') is True:
            return qs.count()

        if options.get('offset'):
            qs = qs[options['offset']:]

        if options.get('limit'):
            qs = qs[:options['limit']]

        result = []
        for i in self.__queryset_serialize(
                qs,
                options.get('extend'),
                options.get('extend_context'),
                options.get('prefix'),
                options.get('select'),
        ):
            result.append(i)

        if options.get('get') is True:
            try:
                return result[0]
            except IndexError:
                raise MatchNotFound()

        return result

    @accepts(Str('name'), Ref('query-options'))
    def config(self, name, options=None):
        """
        Get configuration settings object for a given `name`.

        This is a shortcut for `query(name, {"get": true})`.
        """
        if options is None:
            options = {}
        options['get'] = True
        return self.query(name, None, options)

    def validate_data_keys(self, data, model, schema, prefix):
        verrors = ValidationErrors()
        fields = list(
            map(lambda f: f.name.replace(prefix or '', '', 1),
                chain(model._meta.fields, model._meta.many_to_many)))

        # _id is a special condition in filter where the key in question can be a related descriptor in django
        # i.e share_id - so we remove _id and check if the field is present in `fields` list
        for key in filter(
                lambda v: all(c not in fields for c in
                              (v, v
                               if not v.endswith('_id') else v[:-3])), data):
            verrors.add(f'{schema}.{key}', f'{key} field not recognized')

        verrors.check()

    @accepts(Str('name'), Dict('data', additional_attrs=True),
             Dict('options', Str('prefix', null=True)))
    def insert(self, name, data, options=None):
        """
        Insert a new entry to `name`.
        """
        data = data.copy()
        many_to_many_fields_data = {}
        options = options or {}
        prefix = options.get('prefix')
        model = self.__get_model(name)
        self.validate_data_keys(data, model, 'datastore_insert', prefix)

        for field in chain(model._meta.fields, model._meta.many_to_many):
            if prefix:
                name = field.name.replace(prefix, '', 1)
            else:
                name = field.name
            if name not in data:
                continue
            if isinstance(field, ForeignKey) and data[name] is not None:
                data[name] = field.rel.to.objects.get(pk=data[name])
            if isinstance(field, ManyToManyField):
                many_to_many_fields_data[field.name] = data.pop(name)
            else:

                # field.name is with prefix (if there's one) - we update data dict accordingly with db field names
                data[field.name] = data.pop(name)

        obj = model(**data)
        obj.save()

        for k, v in list(many_to_many_fields_data.items()):
            field = getattr(obj, k)
            field.add(*v)

        return obj.pk

    @accepts(Str('name'), Any('id'), Dict('data', additional_attrs=True),
             Dict('options', Str('prefix', null=True)))
    def update(self, name, id, data, options=None):
        """
        Update an entry `id` in `name`.
        """
        data = data.copy()
        many_to_many_fields_data = {}
        options = options or {}
        prefix = options.get('prefix')
        model = self.__get_model(name)
        self.validate_data_keys(data, model, 'datastore_update', prefix)

        if isinstance(id, (list, tuple)):
            obj = model.objects.filter(*self._filters_to_queryset(id))
            if obj.count() != 1:
                raise CallError(f'{obj.count()} found, expecting one.')
            obj = obj[0]
        else:
            obj = model.objects.get(pk=id)
        for field in chain(model._meta.fields, model._meta.many_to_many):
            if prefix:
                name = field.name.replace(prefix, '', 1)
            else:
                name = field.name
            if name not in data:
                continue
            if isinstance(field, ForeignKey):
                data[name] = field.rel.to.objects.get(
                    pk=data[name]) if data[name] is not None else None
            if isinstance(field, ManyToManyField):
                many_to_many_fields_data[field.name] = data.pop(name)
            else:
                setattr(obj, field.name, data.pop(name))

        obj.save()

        for k, v in list(many_to_many_fields_data.items()):
            field = getattr(obj, k)
            field.clear()
            field.add(*v)

        return obj.pk

    @accepts(Str('name'), Any('id_or_filters'))
    def delete(self, name, id_or_filters):
        """
        Delete an entry `id` in `name`.
        """
        model = self.__get_model(name)
        if isinstance(id_or_filters, list):
            qs = model.objects.all()
            qs.filter(*self._filters_to_queryset(id_or_filters, None)).delete()
        else:
            model.objects.get(pk=id_or_filters).delete()
        return True

    def sql(self, query, params=None):
        cursor = connection.cursor()
        try:
            if params is None:
                res = cursor.executelocal(query)
            else:
                res = cursor.executelocal(query, params)
            rv = [
                dict([(res.description[i][0], value)
                      for i, value in enumerate(row)])
                for row in cursor.fetchall()
            ]
        except OperationalError as err:
            raise CallError(err)
        finally:
            cursor.close()
        return rv

    @accepts(List('queries'))
    def restore(self, queries):
        """
        Receives a list of SQL queries (usually a database dump)
        and executes it within a transaction.
        """
        return connection.dump_recv(queries)

    @accepts()
    def dump(self):
        """
        Dumps the database, returning a list of SQL commands.
        """
        # FIXME: This could return a few hundred KB of data,
        # we need to investigate a way of doing that in chunks.
        return connection.dump()

    @accepts()
    async def dump_json(self):
        models = []
        for model in django.apps.apps.get_models():
            if not model.__module__.startswith("freenasUI."):
                continue

            try:
                entries = await self.middleware.call(
                    "datastore.sql", f"SELECT * FROM {model._meta.db_table}")
            except CallError as e:
                self.logger.debug("%r", e)
                continue

            models.append({
                "table_name":
                model._meta.db_table,
                "verbose_name":
                str(model._meta.verbose_name),
                "fields": [{
                    "name": field.column,
                    "verbose_name": str(field.verbose_name),
                    "database_type": field.db_type(connection),
                } for field in model._meta.get_fields()
                           if not field.is_relation],
                "entries":
                entries,
            })

        return models
Пример #17
0
class DatastoreService(Service, FilterMixin, SchemaMixin):
    class Config:
        private = True

    @accepts(
        Str('name'),
        Dict('data', additional_attrs=True),
        Dict(
            'options',
            Bool('ha_sync', default=True),
            Str('prefix', default=''),
        ),
    )
    async def insert(self, name, data, options):
        """
        Insert a new entry to `name`.
        """
        table = self._get_table(name)
        insert, relationships = self._extract_relationships(
            table, options['prefix'], data)

        for column in table.c:
            if column.default is not None:
                insert.setdefault(column.name, column.default.arg)
            if not column.nullable:
                if isinstance(column.type, (types.String, types.Text)):
                    insert.setdefault(column.name, '')

        pk_column = self._get_pk(table)
        return_last_insert_rowid = type(pk_column.type) == sqltypes.Integer
        result = await self.middleware.call(
            'datastore.execute_write',
            table.insert().values(**insert),
            {
                'ha_sync': options['ha_sync'],
                'return_last_insert_rowid': return_last_insert_rowid,
            },
        )
        if return_last_insert_rowid:
            pk = result
        else:
            pk = insert[pk_column.name]

        await self._handle_relationships(pk, relationships)

        await self.middleware.call('datastore.send_insert_events', name,
                                   insert)

        return pk

    @accepts(
        Str('name'),
        Any('id_or_filters'),
        Dict('data', additional_attrs=True),
        Dict(
            'options',
            Bool('ha_sync', default=True),
            Str('prefix', default=''),
        ),
    )
    async def update(self, name, id_or_filters, data, options):
        """
        Update an entry `id` in `name`.
        """
        table = self._get_table(name)
        data = data.copy()

        if isinstance(id_or_filters, list):
            rows = await self.middleware.call('datastore.query', name,
                                              id_or_filters,
                                              {'prefix': options['prefix']})
            if len(rows) != 1:
                raise RuntimeError(f'{len(rows)} found, expecting one')

            id = rows[0][self._get_pk(table).name]
        else:
            id = id_or_filters

        for column in table.c:
            if column.foreign_keys:
                if column.name[:-3] in data:
                    data[column.name] = data.pop(column.name[:-3])

        update, relationships = self._extract_relationships(
            table, options['prefix'], data)

        if update:
            result = await self.middleware.call(
                'datastore.execute_write',
                table.update().values(**update).where(
                    self._where_clause(table, id,
                                       {'prefix': options['prefix']})),
                {
                    'ha_sync': options['ha_sync'],
                },
            )
            if result.rowcount != 1:
                raise RuntimeError('No rows were updated')

            await self.middleware.call('datastore.send_update_events', name,
                                       id)

        await self._handle_relationships(id, relationships)

        return id

    def _extract_relationships(self, table, prefix, data):
        relationships = self._get_relationships(table)

        insert = {}
        insert_relationships = []
        for k, v in data.items():
            relationship = relationships.get(prefix + k)
            if relationship:
                insert_relationships.append((relationship, v))
            else:
                insert[self._get_col(table, k, prefix).name] = v

        return insert, insert_relationships

    async def _handle_relationships(self, pk, relationships):
        for relationship, values in relationships:
            assert len(relationship.synchronize_pairs) == 1
            assert len(relationship.secondary_synchronize_pairs) == 1

            local_pk, relationship_local_pk = relationship.synchronize_pairs[0]
            remote_pk, relationship_remote_pk = relationship.secondary_synchronize_pairs[
                0]

            await self.middleware.call(
                'datastore.execute_write',
                relationship_local_pk.table.delete().where(
                    relationship_local_pk == pk))

            for value in values:
                await self.middleware.call(
                    'datastore.execute_write',
                    relationship_local_pk.table.insert().values({
                        relationship_local_pk.name:
                        pk,
                        relationship_remote_pk.name:
                        value,
                    }))

    def _where_clause(self, table, id_or_filters, options):
        if isinstance(id_or_filters, list):
            return and_(*self._filters_to_queryset(id_or_filters, table,
                                                   options['prefix'], {}))
        else:
            return self._get_pk(table) == id_or_filters

    @accepts(
        Str('name'),
        Any('id_or_filters'),
        Dict(
            'options',
            Bool('ha_sync', default=True),
            Str('prefix', default=''),
        ),
    )
    async def delete(self, name, id_or_filters, options):
        """
        Delete an entry `id` in `name`.
        """
        table = self._get_table(name)

        await self.middleware.call(
            'datastore.execute_write',
            table.delete().where(
                self._where_clause(table, id_or_filters,
                                   {'prefix': options['prefix']})),
            {
                'ha_sync': options['ha_sync'],
            },
        )

        # FIXME: Sending events for batch deletes not implemented yet
        if not isinstance(id_or_filters, list):
            await self.middleware.call('datastore.send_delete_events', name,
                                       id_or_filters)

        return True
Пример #18
0
class FailoverService(Service):

    CLIENT = RemoteClient()

    @private
    async def remote_ip(self):
        node = await self.middleware.call('failover.node')
        if node == 'A':
            remote = '169.254.10.2'
        elif node == 'B':
            remote = '169.254.10.1'
        else:
            raise CallError(f'Node {node} invalid for call_remote',
                            errno.EHOSTUNREACH)
        return remote

    @accepts(
        Str('method'),
        List('args'),
        Dict(
            'options',
            Int('timeout'),
            Bool('job', default=False),
            Bool('job_return', default=None, null=True),
            Any('callback'),
        ),
    )
    def call_remote(self, method, args, options):
        """
        Call a method in the other node.
        """
        options = options or {}
        job_return = options.get('job_return')
        if job_return is not None:
            options['job'] = 'RETURN'
        try:
            return self.CLIENT.call(method, *args, **options)
        except CallTimeout:
            raise CallError('Call timeout', errno.ETIMEDOUT)

    @private
    def get_remote_os_version(self):

        if self.CLIENT.remote_ip is not None:
            return self.CLIENT.get_remote_os_version()

    @private
    def sendfile(self, token, src, dst):
        self.CLIENT.sendfile(token, src, dst)

    @private
    async def ensure_remote_client(self):
        if self.CLIENT.remote_ip is not None:
            return
        try:
            self.CLIENT.remote_ip = await self.middleware.call(
                'failover.remote_ip')
            self.CLIENT.middleware = self.middleware
            start_daemon_thread(target=self.CLIENT.run)
        except CallError:
            pass

    @private
    def remote_connected(self):
        return self.CLIENT.is_connected()

    @private
    def remote_subscribe(self, name, callback):
        self.CLIENT.subscribe(name, callback)

    @private
    def remote_on_connect(self, callback):
        self.CLIENT.register_connect(callback)

    @private
    def remote_on_disconnect(self, callback):
        self.CLIENT.register_disconnect(callback)
Пример #19
0
class VMWareService(CRUDService):
    class Config:
        datastore = 'storage.vmwareplugin'
        cli_namespace = 'storage.vmware'

    @private
    async def validate_data(self, data, schema_name):
        verrors = ValidationErrors()

        await resolve_hostname(self.middleware, verrors,
                               f'{schema_name}.hostname', data['hostname'])

        if data['filesystem'] not in (
                await self.middleware.call('pool.filesystem_choices')):
            verrors.add(f'{schema_name}.filesystem', 'Invalid ZFS filesystem')

        datastore = data.get('datastore')
        try:
            ds = await self.middleware.run_in_thread(
                self.get_datastores, {
                    'hostname': data.get('hostname'),
                    'username': data.get('username'),
                    'password': data.get('password'),
                })

            if data.get('datastore') not in ds:
                verrors.add(
                    f'{schema_name}.datastore',
                    f'Datastore "{datastore}" not found on the server')
        except Exception as e:
            verrors.add(f'{schema_name}.datastore',
                        'Failed to connect: ' + str(e))

        if verrors:
            raise verrors

    @accepts(
        Dict('vmware_create',
             Str('datastore', required=True),
             Str('filesystem', required=True),
             Str('hostname', required=True),
             Str('password', private=True, required=True),
             Str('username', required=True),
             register=True))
    async def do_create(self, data):
        """
        Create VMWare snapshot.

        `hostname` is a valid IP address / hostname of a VMWare host. When clustering, this is the vCenter server for
        the cluster.

        `username` and `password` are the credentials used to authorize access to the VMWare host.

        `datastore` is a valid datastore name which exists on the VMWare host.
        """
        await self.validate_data(data, 'vmware_create')

        data['id'] = await self.middleware.call('datastore.insert',
                                                self._config.datastore, data)

        return await self._get_instance(data['id'])

    @accepts(Int('id', required=True),
             Patch('vmware_create', 'vmware_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        """
        Update VMWare snapshot of `id`.
        """
        old = await self._get_instance(id)
        new = old.copy()

        new.update(data)

        await self.validate_data(new, 'vmware_update')

        await self.middleware.call(
            'datastore.update',
            self._config.datastore,
            id,
            new,
        )

        return await self._get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete VMWare snapshot of `id`.
        """

        response = await self.middleware.call('datastore.delete',
                                              self._config.datastore, id)

        return response

    @accepts(
        Dict(
            'vmware-creds',
            Str('hostname', required=True),
            Str('username', required=True),
            Str('password', private=True, required=True),
        ))
    def get_datastores(self, data):
        """
        Get datastores from VMWare.
        """
        return sorted(list(self.__get_datastores(data).keys()))

    @accepts(
        Dict(
            'vmware-creds',
            Str('hostname', required=True),
            Str('username', required=True),
            Str('password', private=True, required=True),
        ))
    def match_datastores_with_datasets(self, data):
        """
        Requests datastores from vCenter server and tries to match them with local filesystems.

        Returns a list of datastores, a list of local filesystems and guessed relationship between them.

        .. examples(websocket)::

            :::javascript
            {
              "id": "d51da71b-bb48-4b8b-a8f7-6046fcc892b4",
              "msg": "method",
              "method": "vmware.match_datastores_with_datasets",
              "params": [{"hostname": "10.215.7.104", "username": "******", "password": "******"}]
            }

            returns

            {
              "datastores": [
                {
                  "name": "10.215.7.102",
                  "description": "NFS mount '/mnt/tank' on 10.215.7.102",
                  "filesystems": ["tank"]
                },
                {
                  "name": "datastore1",
                  "description": "mpx.vmhba0:C0:T0:L0",
                  "filesystems": []
                },
                {
                  "name": "zvol",
                  "description": "iSCSI extent naa.6589cfc000000b3f0a891a2c4e187594",
                  "filesystems": ["tank/vol"]
                }
              ],
              "filesystems": [
                {
                  "type": "FILESYSTEM",
                  "name": "tank",
                  "description": "NFS mount '/mnt/tank' on 10.215.7.102"
                },
                {
                  "type": "VOLUME",
                  "name": "tank/vol",
                  "description": "iSCSI extent naa.6589cfc000000b3f0a891a2c4e187594"
                }
              ]
            }
        """

        datastores = []
        for k, v in self.__get_datastores(data).items():
            if v["type"] == "NFS":
                description = f"NFS mount {v['remote_path']!r} on {' or '.join(v['remote_hostnames'])}"
                matches = [
                    f"{hostname}:{v['remote_path']}"
                    for hostname in v["remote_hostnames"]
                ]
            elif v["type"] == "VMFS":
                description = (f"iSCSI extent {', '.join(v['extent'])}" if any(
                    extent.startswith("naa.")
                    for extent in v["extent"]) else ", ".join(v["extent"]))
                matches = v["extent"]
            else:
                continue

            datastores.append({
                "name": k,
                "description": description,
                "matches": matches,
            })

        ip_addresses = sum([[
            alias["address"] for alias in interface["state"]["aliases"]
            if alias["type"] in ["INET", "INET6"]
        ] for interface in self.middleware.call_sync("interface.query")], [])
        iscsi_extents = defaultdict(list)
        for extent in self.middleware.call_sync("iscsi.extent.query"):
            if extent["path"].startswith("zvol/"):
                zvol = extent["path"][len("zvol/"):]
                iscsi_extents[zvol].append(f"naa.{extent['naa'][2:]}")
        filesystems = []
        for fs in self.middleware.call_sync("pool.dataset.query", [
            ("pool", "in",
             [vol["name"] for vol in self.middleware.call_sync("pool.query")]),
        ]):
            if fs["type"] == "FILESYSTEM":
                filesystems.append({
                    "type":
                    "FILESYSTEM",
                    "name":
                    fs["name"],
                    "description":
                    f"NFS mount {fs['mountpoint']!r} on {' or '.join(ip_addresses)}",
                    "matches": [
                        f"{ip_address}:{fs['mountpoint']}"
                        for ip_address in ip_addresses
                    ],
                })

            if fs["type"] == "VOLUME":
                filesystems.append({
                    "type":
                    "VOLUME",
                    "name":
                    fs["name"],
                    "description":
                    (f"iSCSI extent {', '.join(iscsi_extents[fs['name']])}"
                     if iscsi_extents[fs["name"]] else "Not shared via iSCSI"),
                    "matches":
                    iscsi_extents[fs["name"]],
                })

        for datastore in datastores:
            datastore["filesystems"] = [
                filesystem["name"] for filesystem in filesystems
                if set(filesystem["matches"]) & set(datastore["matches"])
            ]
            datastore.pop("matches")

        for filesystem in filesystems:
            filesystem.pop("matches")

        return {
            "datastores":
            sorted(datastores, key=lambda datastore: datastore["name"]),
            "filesystems":
            sorted(filesystems, key=lambda filesystem: filesystem["name"]),
        }

    def __get_datastores(self, data):
        self.middleware.call_sync('network.general.will_perform_activity',
                                  'vmware')

        try:
            ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
            ssl_context.verify_mode = ssl.CERT_NONE
            server_instance = connect.SmartConnect(
                host=data['hostname'],
                user=data['username'],
                pwd=data['password'],
                sslContext=ssl_context,
            )
        except (vim.fault.InvalidLogin, vim.fault.NoPermission,
                vim.fault.RestrictedVersion) as e:
            raise CallError(e.msg, errno.EPERM)
        except vmodl.RuntimeFault as e:
            raise CallError(e.msg)
        except (socket.gaierror, socket.error, OSError) as e:
            raise CallError(str(e), e.errno)

        content = server_instance.RetrieveContent()
        objview = content.viewManager.CreateContainerView(
            content.rootFolder, [vim.HostSystem], True)

        esxi_hosts = objview.view
        objview.Destroy()

        datastores = {}
        for esxi_host in esxi_hosts:
            storage_system = esxi_host.configManager.storageSystem

            if storage_system.fileSystemVolumeInfo is None:
                continue

            for host_mount_info in storage_system.fileSystemVolumeInfo.mountInfo:
                if host_mount_info.volume.type == 'VMFS':
                    datastores[host_mount_info.volume.name] = {
                        'type':
                        host_mount_info.volume.type,
                        'uuid':
                        host_mount_info.volume.uuid,
                        'capacity':
                        host_mount_info.volume.capacity,
                        'vmfs_version':
                        host_mount_info.volume.version,
                        'extent': [
                            partition.diskName
                            for partition in host_mount_info.volume.extent
                        ],
                        'local':
                        host_mount_info.volume.local,
                        'ssd':
                        host_mount_info.volume.ssd
                    }
                elif host_mount_info.volume.type in ('NFS', 'NFS41'):
                    datastores[host_mount_info.volume.name] = {
                        'type': host_mount_info.volume.type,
                        'capacity': host_mount_info.volume.capacity,
                        'remote_host': host_mount_info.volume.remoteHost,
                        'remote_path': host_mount_info.volume.remotePath,
                        'remote_hostnames':
                        host_mount_info.volume.remoteHostNames,
                        'username': host_mount_info.volume.userName,
                    }
                elif host_mount_info.volume.type in ('other', 'OTHER', 'VFFS'):
                    # Ignore VFFS type, it does not store VM's
                    # Ignore other type, it does not seem to be meaningful
                    pass
                else:
                    self.logger.debug(
                        f'Unknown volume type "{host_mount_info.volume.type}": {host_mount_info.volume}'
                    )
                    continue

        connect.Disconnect(server_instance)

        return datastores

    @accepts(Int('pk'))
    async def get_virtual_machines(self, pk):
        """
        Returns Virtual Machines on the VMWare host identified by `pk`.
        """
        await self.middleware.call('network.general.will_perform_activity',
                                   'vmware')

        item = await self.query([('id', '=', pk)], {'get': True})

        ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
        ssl_context.verify_mode = ssl.CERT_NONE
        server_instance = connect.SmartConnect(
            host=item['hostname'],
            user=item['username'],
            pwd=item['password'],
            sslContext=ssl_context,
        )

        content = server_instance.RetrieveContent()
        objview = content.viewManager.CreateContainerView(
            content.rootFolder, [vim.VirtualMachine], True)
        vm_view = objview.view
        objview.Destroy()

        vms = {}
        for vm in vm_view:
            data = {
                'uuid': vm.config.uuid,
                'name': vm.name,
                'power_state': vm.summary.runtime.powerState,
            }
            vms[vm.config.uuid] = data
        return vms

    @accepts(Str('dataset'), Bool('recursive'))
    def dataset_has_vms(self, dataset, recursive):
        """
        Returns "true" if `dataset` is configured with a VMWare snapshot
        """
        return len(self._dataset_get_vms(dataset, recursive)) > 0

    def _dataset_get_vms(self, dataset, recursive):
        f = ["filesystem", "=", dataset]
        if recursive:
            f = [
                "OR",
                [
                    f,
                    ["filesystem", "^", dataset + "/"],
                ],
            ]
        return self.middleware.call_sync("vmware.query", [f])

    @private
    def snapshot_begin(self, dataset, recursive):
        self.middleware.call_sync('network.general.will_perform_activity',
                                  'vmware')

        # If there's a VMWare Plugin object for this filesystem
        # snapshot the VMs before taking the ZFS snapshot.
        # Once we've taken the ZFS snapshot we're going to log back in
        # to VMWare and destroy all the VMWare snapshots we created.
        # We do this because having VMWare snapshots in existence impacts
        # the performance of your VMs.
        qs = self._dataset_get_vms(dataset, recursive)

        # Generate a unique snapshot name that won't collide with anything that exists on the VMWare side.
        vmsnapname = str(uuid.uuid4())

        # Generate a helpful description that is visible on the VMWare side.  Since we
        # are going to be creating VMWare snaps, if one gets left dangling this will
        # help determine where it came from.
        vmsnapdescription = f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S')} TrueNAS Created Snapshot"

        # We keep track of snapshots per VMWare "task" because we are going to iterate
        # over all the VMWare tasks for a given ZFS filesystem, do all the VMWare snapshotting
        # then take the ZFS snapshot, then iterate again over all the VMWare "tasks" and undo
        # all the snaps we created in the first place.
        vmsnapobjs = []
        for vmsnapobj in qs:
            # Data structures that will be used to keep track of VMs that are snapped,
            # as wel as VMs we tried to snap and failed, and VMs we realized we couldn't
            # snapshot.
            snapvms = []
            snapvmfails = []
            snapvmskips = []

            try:
                ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
                ssl_context.verify_mode = ssl.CERT_NONE
                si = connect.SmartConnect(host=vmsnapobj["hostname"],
                                          user=vmsnapobj["username"],
                                          pwd=vmsnapobj["password"],
                                          sslContext=ssl_context)
                content = si.RetrieveContent()
            except Exception as e:
                self.logger.warn("VMware login to %s failed",
                                 vmsnapobj["hostname"],
                                 exc_info=True)
                self._alert_vmware_login_failed(vmsnapobj, e)
                continue

            # There's no point to even consider VMs that are paused or powered off.
            vm_view = content.viewManager.CreateContainerView(
                content.rootFolder, [vim.VirtualMachine], True)
            for vm in vm_view.view:
                if vm.summary.runtime.powerState != "poweredOn":
                    continue

                if self._doesVMDependOnDataStore(vm, vmsnapobj["datastore"]):
                    try:
                        if self._canSnapshotVM(vm):
                            if not self._findVMSnapshotByName(vm, vmsnapname):
                                # have we already created a snapshot of the VM for this volume
                                # iteration? can happen if the VM uses two datasets (a and b)
                                # where both datasets are mapped to the same ZFS volume in TrueNAS.
                                VimTask.WaitForTask(
                                    vm.CreateSnapshot_Task(
                                        name=vmsnapname,
                                        description=vmsnapdescription,
                                        memory=False,
                                        quiesce=True,
                                    ))
                            else:
                                self.logger.debug(
                                    "Not creating snapshot %s for VM %s because it "
                                    "already exists", vmsnapname, vm)
                        else:
                            # TODO:
                            # we can try to shutdown the VM, if the user provided us an ok to do
                            # so (might need a new list property in obj to know which VMs are
                            # fine to shutdown and a UI to specify such exceptions)
                            # otherwise can skip VM snap and then make a crash-consistent zfs
                            # snapshot for this VM
                            self.logger.info(
                                "Can't snapshot VM %s that depends on "
                                "datastore %s and filesystem %s. "
                                "Possibly using PT devices. Skipping.",
                                vm.name, vmsnapobj["datastore"], dataset)
                            snapvmskips.append(vm.config.uuid)
                    except Exception as e:
                        self.logger.warning("Snapshot of VM %s failed",
                                            vm.name,
                                            exc_info=True)
                        self.middleware.call_sync(
                            "alert.oneshot_create",
                            "VMWareSnapshotCreateFailed", {
                                "hostname": vmsnapobj["hostname"],
                                "vm": vm.name,
                                "snapshot": vmsnapname,
                                "error": self._vmware_exception_message(e),
                            })
                        snapvmfails.append([vm.config.uuid, vm.name])

                    snapvms.append(vm.config.uuid)

            connect.Disconnect(si)

            vmsnapobjs.append({
                "vmsnapobj": vmsnapobj,
                "snapvms": snapvms,
                "snapvmfails": snapvmfails,
                "snapvmskips": snapvmskips,
            })

        # At this point we've completed snapshotting VMs.

        if not vmsnapobjs:
            return None

        return {
            "vmsnapname":
            vmsnapname,
            "vmsnapobjs":
            vmsnapobjs,
            "vmsynced":
            vmsnapobjs and all(
                len(vmsnapobj["snapvms"]) > 0
                and len(vmsnapobj["snapvmfails"]) == 0
                for vmsnapobj in vmsnapobjs)
        }

    @private
    def snapshot_end(self, context):
        self.middleware.call_sync('network.general.will_perform_activity',
                                  'vmware')

        vmsnapname = context["vmsnapname"]

        for elem in context["vmsnapobjs"]:
            vmsnapobj = elem["vmsnapobj"]

            try:
                ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
                ssl_context.verify_mode = ssl.CERT_NONE
                si = connect.SmartConnect(host=vmsnapobj["hostname"],
                                          user=vmsnapobj["username"],
                                          pwd=vmsnapobj["password"],
                                          sslContext=ssl_context)
                self._delete_vmware_login_failed_alert(vmsnapobj)
            except Exception as e:
                self.logger.warning("VMware login failed to %s",
                                    vmsnapobj["hostname"])
                self._alert_vmware_login_failed(vmsnapobj, e)
                continue

            # vm is an object, so we'll dereference that object anywhere it's user facing.
            for vm_uuid in elem["snapvms"]:
                vm = si.content.searchIndex.FindByUuid(None, vm_uuid, True)
                if not vm:
                    self.logger.debug("Could not find VM %s", vm_uuid)
                    continue
                if [vm_uuid, vm.name] not in elem[
                        "snapvmfails"] and vm_uuid not in elem["snapvmskips"]:
                    # The test above is paranoia.  It shouldn't be possible for a vm to
                    # be in more than one of the three dictionaries.
                    snap = self._findVMSnapshotByName(vm, vmsnapname)
                    try:
                        if snap:
                            VimTask.WaitForTask(snap.RemoveSnapshot_Task(True))
                    except Exception as e:
                        self.logger.debug(
                            "Exception removing snapshot %s on %s",
                            vmsnapname,
                            vm.name,
                            exc_info=True)
                        self.middleware.call_sync(
                            "alert.oneshot_create",
                            "VMWareSnapshotDeleteFailed", {
                                "hostname": vmsnapobj["hostname"],
                                "vm": vm.name,
                                "snapshot": vmsnapname,
                                "error": self._vmware_exception_message(e),
                            })

            connect.Disconnect(si)

    @private
    @job()
    def periodic_snapshot_task_begin(self, job, task_id):
        task = self.middleware.call_sync("pool.snapshottask.query",
                                         [["id", "=", task_id]], {"get": True})

        return self.snapshot_begin(task["dataset"], task["recursive"])

    @private
    @accepts(Any("context", private=True))
    @job()
    def periodic_snapshot_task_end(self, job, context):
        return self.snapshot_end(context)

    # Check if a VM is using a certain datastore
    def _doesVMDependOnDataStore(self, vm, dataStore):
        try:
            # simple case, VM config data is on a datastore.
            # not sure how critical it is to snapshot the store that has config data, but best to do so
            for i in vm.datastore:
                if i.info.name.startswith(dataStore):
                    return True
            # check if VM has disks on the data store
            # we check both "diskDescriptor" and "diskExtent" types of files
            for device in vm.config.hardware.device:
                if device.backing is None:
                    continue
                if hasattr(device.backing, 'fileName'):
                    if device.backing.datastore.info.name == dataStore:
                        return True
        except Exception:
            self.logger.debug('Exception in doesVMDependOnDataStore',
                              exc_info=True)

        return False

    # check if VMware can snapshot a VM
    def _canSnapshotVM(self, vm):
        try:
            # check for PCI pass-through devices
            for device in vm.config.hardware.device:
                if isinstance(device, vim.VirtualPCIPassthrough):
                    return False
            # consider supporting more cases of VMs that can't be snapshoted
            # https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=1006392
        except Exception:
            self.logger.debug('Exception in canSnapshotVM', exc_info=True)

        return True

    def _findVMSnapshotByName(self, vm, snapshotName):
        try:
            if vm.snapshot is None:
                return None

            for tree in vm.snapshot.rootSnapshotList:
                result = self._findVMSnapshotByNameInTree(tree, snapshotName)
                if result:
                    return result
        except Exception:
            self.logger.debug('Exception in _findVMSnapshotByName',
                              exc_info=True)

        return None

    def _findVMSnapshotByNameInTree(self, tree, snapshotName):
        if tree.name == snapshotName:
            return tree.snapshot

        for i in tree.childSnapshotList:
            if i.name == snapshotName:
                return i.snapshot

            if hasattr(i, "childSnapshotList"):
                result = self._findVMSnapshotByNameInTree(i, snapshotName)
                if result:
                    return result

        return None

    def _vmware_exception_message(self, e):
        if hasattr(e, "msg"):
            return e.msg
        else:
            return str(e)

    def _alert_vmware_login_failed(self, vmsnapobj, e):
        self.middleware.call_sync(
            "alert.oneshot_create", "VMWareLoginFailed", {
                "hostname": vmsnapobj["hostname"],
                "error": self._vmware_exception_message(e),
            })

    def _delete_vmware_login_failed_alert(self, vmsnapobj):
        self.middleware.call_sync("alert.oneshot_delete", "VMWareLoginFailed",
                                  vmsnapobj["hostname"])
Пример #20
0
class CacheService(Service):
    class Config:
        private = True

    def __init__(self, *args, **kwargs):
        super(CacheService, self).__init__(*args, **kwargs)
        self.__cache = {}
        self.kv_tuple = namedtuple('Cache', ['value', 'timeout'])

    @accepts(Str('key'))
    def has_key(self, key):
        """
        Check if given `key` is in cache.
        """
        return key in self.__cache

    @accepts(Str('key'))
    def get(self, key):
        """
        Get `key` from cache.

        Raises:
            KeyError: not found in the cache
        """

        if self.__cache[key].timeout > 0:
            self.get_timeout(key)

        return self.__cache[key].value

    @accepts(Str('key'), Any('value'), Int('timeout', default=0))
    def put(self, key, value, timeout):
        """
        Put `key` of `value` in the cache.
        """

        if timeout != 0:
            timeout = time.monotonic() + timeout

        v = self.kv_tuple(value=value, timeout=timeout)
        self.__cache[key] = v

    @accepts(Str('key'))
    def pop(self, key):
        """
        Removes and returns `key` from cache.
        """
        cache = self.__cache.pop(key, None)

        if cache is not None:
            cache = cache.value

        return cache

    @private
    def get_timeout(self, key):
        """
        Check if 'key' has expired
        """
        now = time.monotonic()
        value, timeout = self.__cache[key]

        if now >= timeout:
            # Bust the cache
            del self.__cache[key]

            raise KeyError(f'{key} has expired')

    @private
    def get_or_put(self, key, timeout, method):
        try:
            return self.get(key)
        except KeyError:
            value = method()
            self.put(key, value, timeout)
            return value
Пример #21
0
class ConsulService(Service):

    INFLUXDB_API = [
        'host', 'username', 'password', 'database', 'series-name', 'enabled'
    ]
    SLACK_API = [
        'cluster-name', 'url', 'channel', 'username', 'icon-url', 'detailed',
        'enabled'
    ]
    MATTERMOST_API = [
        'cluster', 'url', 'username', 'password', 'team', 'channel', 'enabled'
    ]
    PAGERDUTY_API = ['service-key', 'client-name', 'enabled']
    HIPCHAT_API = [
        'from', 'cluster-name', 'base-url', 'room-id', 'auth-token', 'enabled'
    ]
    OPSGENIE_API = ['cluster-name', 'api-key', 'enabled']
    AWSSNS_API = ['reigion', 'topic-arn', 'enabled']
    VICTOROPS_API = ['api-key', 'routing-key', 'enabled']

    @accepts(Str('key'), Any('value'))
    def set_kv(self, key, value):
        """
        Sets `key` with `value` in Consul KV.

        Returns:
                    bool: True if it added successful the value or otherwise False.
        """
        c = consul.Consul()
        return c.kv.put(str(key), str(value))

    @accepts(Str('key'))
    def get_kv(self, key):
        """
        Gets value of `key` in Consul KV.

        Returns:
                    str: Return the value or an empty string.
        """
        c = consul.Consul()
        index = None
        index, data = c.kv.get(key, index=index)
        if data is not None:
            return data['Value'].decode("utf-8")
        else:
            return ""

    @accepts(Str('key'))
    def delete_kv(self, key):
        """
        Delete a `key` in Consul KV.

        Returns:
                    bool: True if it could delete the data or otherwise False.
        """
        c = consul.Consul()
        return c.kv.delete(str(key))

    def _convert_keys(self, data):
        """
        Transforms key values that contains "_" to values with "-"

        Returns:
                    dict: With the values on keys using "-".
        """
        for key in data.keys():
            new_key = key.replace("_", "-")
            if new_key != key:
                data[new_key] = data[key]
                del data[key]

        return data

    def _api_keywords(self, api_list, data):
        """
        Helper to convert the API list into a dict.

        Returns:
                    dict: With the API_LIST.
        """
        new_dict = {k: data.get(k, None) for k in api_list}

        return new_dict

    def _insert_keys(self, prefix, data, api_keywords):
        """
        Helper to insert keys into consul.

        Note: because 'from' is a reserved word in Python, we can't
        use it directly and instead we use hfrom and convert it later.
        """
        new_dict = self._api_keywords(api_keywords, data)

        for k, v in new_dict.items():
            if k == 'hfrom':
                k = 'from'
            self.set_kv(prefix + k, v)

    def _delete_keys(self, prefix, data, api_keywords):
        """
        Helper to delete keys into consul.

        Note: The same applies for 'from' like explained on _insert_keys().
        """
        new_dict = self._api_keywords(api_keywords, data)

        for k in new_dict.keys():
            if k == 'hfrom':
                k = 'from'
            self.delete_kv(prefix + k)

    def do_create(self, data):
        """
        Helper to insert keys into consul based on the service API.
        """
        consul_prefix = 'consul-alerts/config/notifiers/'
        cdata = self._convert_keys(data)

        alert_service = data.pop('consulalert-type')
        consul_prefix = consul_prefix + alert_service.lower() + '/'

        if alert_service == 'InfluxDB':
            self._insert_keys(consul_prefix, cdata, self.INFLUXDB_API)
        elif alert_service == 'Slack':
            self._insert_keys(consul_prefix, cdata, self.SLACK_API)
        elif alert_service == 'Mattermost':
            self._insert_keys(consul_prefix, cdata, self.MATTERMOST_API)
        elif alert_service == 'PagerDuty':
            self._insert_keys(consul_prefix, cdata, self.PAGERDUTY_API)
        elif alert_service == 'HipChat':
            self._insert_keys(consul_prefix, cdata, self.HIPCHAT_API)
        elif alert_service == 'OpsGenie':
            self._insert_keys(consul_prefix, cdata, self.OPSGENIE_API)
        elif alert_service == 'AWS-SNS':
            self._insert_keys(consul_prefix, cdata, self.AWSSNS_API)
        elif alert_service == 'VictorOps':
            self._insert_keys(consul_prefix, cdata, self.VICTOROPS_API)

    def do_delete(self, alert_service, data):
        """
        Helper to delete the keys from consul based on the service API.
        """
        consul_prefix = 'consul-alerts/config/notifiers/' + alert_service.lower(
        ) + '/'
        cdata = self._convert_keys(data)

        if alert_service == 'InfluxDB':
            self._delete_keys(consul_prefix, cdata, self.INFLUXDB_API)
        elif alert_service == 'Slack':
            self._delete_keys(consul_prefix, cdata, self.SLACK_API)
        elif alert_service == 'Mattermost':
            self._delete_keys(consul_prefix, cdata, self.MATTERMOST_API)
        elif alert_service == 'PagerDuty':
            self._delete_keys(consul_prefix, cdata, self.PAGERDUTY_API)
        elif alert_service == 'HipChat':
            self._delete_keys(consul_prefix, cdata, self.HIPCHAT_API)
        elif alert_service == 'OpsGenie':
            self._delete_keys(consul_prefix, cdata, self.OPSGENIE_API)
        elif alert_service == 'AWS-SNS':
            self._delete_keys(consul_prefix, cdata, self.AWSSNS_API)
        elif alert_service == 'VictorOps':
            self._delete_keys(consul_prefix, cdata, self.VICTOROPS_API)