Exemplo n.º 1
0
class TunableService(CRUDService):
    class Config:
        datastore = 'system.tunable'
        datastore_prefix = 'tun_'
        datastore_extend = 'tunable.upper'

    def __init__(self, *args, **kwargs):
        super(TunableService, self).__init__(*args, **kwargs)
        self.__default_sysctl = {}

    @private
    async def get_default_value(self, oid):
        return self.__default_sysctl[oid]

    @private
    async def set_default_value(self, oid, value):
        if oid not in self.__default_sysctl:
            self.__default_sysctl[oid] = value

    @accepts(Dict(
        'tunable_create',
        Str('var', validators=[Match(r'^[\w\.]+$')], required=True),
        Str('value', required=True),
        Str('type', enum=['LOADER', 'RC', 'SYSCTL'], required=True),
        Str('comment'),
        Bool('enabled', default=True),
        register=True
    ))
    async def do_create(self, data):
        """
        Create a Tunable.

        `var` represents name of the sysctl/loader/rc variable.

        `type` should be one of the following:
        1) LOADER     -     Configure `var` for loader(8)
        2) RC         -     Configure `var` for rc(8)
        3) SYSCTL     -     Configure `var` for sysctl(8)
        """
        await self.clean(data, 'tunable_create')
        await self.validate(data, 'tunable_create')
        await self.lower(data)

        data['id'] = await self.middleware.call(
            'datastore.insert',
            self._config.datastore,
            data,
            {'prefix': self._config.datastore_prefix}
        )

        await self.middleware.call('service.reload', data['type'])

        return await self._get_instance(data['id'])

    @accepts(
        Int('id'),
        Patch(
            'tunable_create',
            'tunable_update',
            ('attr', {'update': True})
        )
    )
    async def do_update(self, id, data):
        """
        Update Tunable of `id`.
        """
        old = await self._get_instance(id)

        new = old.copy()
        new.update(data)

        await self.clean(new, 'tunable_update', old=old)
        await self.validate(new, 'tunable_update')

        await self.lower(new)

        await self.middleware.call(
            'datastore.update',
            self._config.datastore,
            id,
            new,
            {'prefix': self._config.datastore_prefix}
        )

        await self.middleware.call('service.reload', new['type'])

        return await self._get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete Tunable of `id`.
        """
        tunable = await self._get_instance(id)
        await self.lower(tunable)
        if tunable['type'] == 'sysctl':
            # Restore the default value, if it is possible.
            value_default = None
            try:
                value_default = await self.get_default_value(tunable["var"])
            except KeyError:
                pass
            if value_default is not None:
                ret = subprocess.run(
                    ['sysctl', f'{tunable["var"]}="{value_default}"'],
                    capture_output=True
                )
                if ret.returncode:
                    self.middleware.logger.debug(
                        'Failed to set sysctl %s -> %s: %s',
                        tunable['var'], tunable['value'], ret.stderr.decode(),
                    )

        response = await self.middleware.call(
            'datastore.delete',
            self._config.datastore,
            id
        )

        await self.middleware.call('service.reload', tunable['type'].lower())

        return response

    @private
    async def lower(self, data):
        data['type'] = data['type'].lower()

        return data

    @private
    async def upper(self, data):
        data['type'] = data['type'].upper()

        return data

    @private
    async def clean(self, tunable, schema_name, old=None):
        verrors = ValidationErrors()
        skip_dupe = False
        tun_comment = tunable.get('comment')
        tun_value = tunable['value']
        tun_var = tunable['var']

        if tun_comment is not None:
            tunable['comment'] = tun_comment.strip()

        if '"' in tun_value or "'" in tun_value:
            verrors.add(f"{schema_name}.value",
                        'Quotes in value are not allowed')

        if schema_name == 'tunable_update' and old:
            old_tun_var = old['var']

            if old_tun_var == tun_var:
                # They aren't trying to change to a new name, just updating
                skip_dupe = True

        if not skip_dupe:
            tun_vars = await self.middleware.call(
                'datastore.query', self._config.datastore, [('tun_var', '=',
                                                             tun_var)])

            if tun_vars:
                verrors.add(f"{schema_name}.value",
                            'This variable already exists')

        if verrors:
            raise verrors

        return tunable

    @private
    async def validate(self, tunable, schema_name):
        sysctl_re = \
            re.compile('[a-z][a-z0-9_]+\.([a-z0-9_]+\.)*[a-z0-9_]+', re.I)

        loader_re = \
            re.compile('[a-z][a-z0-9_]+\.*([a-z0-9_]+\.)*[a-z0-9_]+', re.I)

        verrors = ValidationErrors()
        tun_var = tunable['var'].lower()
        tun_type = tunable['type'].lower()

        if tun_type == 'loader' or tun_type == 'rc':
            err_msg = "Value can start with a letter and end with an alphanumeric. Aphanumeric and underscore" \
                      " characters are allowed"
        else:
            err_msg = 'Value can start with a letter and end with an alphanumeric. A period (.) once is a must.' \
                      ' Alphanumeric and underscore characters are allowed'

        if (
            tun_type in ('loader', 'rc') and
            not loader_re.match(tun_var)
        ) or (
            tun_type == 'sysctl' and
            not sysctl_re.match(tun_var)
        ):
            verrors.add(f"{schema_name}.var", err_msg)

        if verrors:
            raise verrors
Exemplo n.º 2
0
class ACLBase(ServicePartBase):

    @accepts(
        Dict(
            'filesystem_acl',
            Str('path', required=True),
            Int('uid', null=True, default=None),
            Int('gid', null=True, default=None),
            List(
                'dacl',
                items=[
                    Dict(
                        'aclentry',
                        Str('tag', enum=['owner@', 'group@', 'everyone@', 'USER', 'GROUP']),
                        Int('id', null=True),
                        Str('type', enum=['ALLOW', 'DENY']),
                        Dict(
                            'perms',
                            Bool('READ_DATA'),
                            Bool('WRITE_DATA'),
                            Bool('APPEND_DATA'),
                            Bool('READ_NAMED_ATTRS'),
                            Bool('WRITE_NAMED_ATTRS'),
                            Bool('EXECUTE'),
                            Bool('DELETE_CHILD'),
                            Bool('READ_ATTRIBUTES'),
                            Bool('WRITE_ATTRIBUTES'),
                            Bool('DELETE'),
                            Bool('READ_ACL'),
                            Bool('WRITE_ACL'),
                            Bool('WRITE_OWNER'),
                            Bool('SYNCHRONIZE'),
                            Str('BASIC', enum=['FULL_CONTROL', 'MODIFY', 'READ', 'TRAVERSE']),
                        ),
                        Dict(
                            'flags',
                            Bool('FILE_INHERIT'),
                            Bool('DIRECTORY_INHERIT'),
                            Bool('NO_PROPAGATE_INHERIT'),
                            Bool('INHERIT_ONLY'),
                            Bool('INHERITED'),
                            Str('BASIC', enum=['INHERIT', 'NOINHERIT']),
                        ),
                    ),
                    Dict(
                        'posix1e_ace',
                        Bool('default', default=False),
                        Str('tag', enum=['USER_OBJ', 'GROUP_OBJ', 'USER', 'GROUP', 'OTHER', 'MASK']),
                        Int('id', default=-1),
                        Dict(
                            'perms',
                            Bool('READ', default=False),
                            Bool('WRITE', default=False),
                            Bool('EXECUTE', default=False),
                        ),
                    )
                ],
                default=[]
            ),
            Dict(
                'nfs41_flags',
                Bool('autoinherit', default=False),
                Bool('protected', default=False),
            ),
            Str('acltype', enum=[x.name for x in ACLType], default=ACLType.NFS4.name),
            Dict(
                'options',
                Bool('stripacl', default=False),
                Bool('recursive', default=False),
                Bool('traverse', default=False),
                Bool('canonicalize', default=True)
            )
        )
    )
    @job(lock="perm_change")
    def setacl(self, job, data):
        """
        Set ACL of a given path. Takes the following parameters:
        `path` full path to directory or file.

        `dacl` ACL entries. Formatting depends on the underlying `acltype`. NFS4ACL requires
        NFSv4 entries. POSIX1e requires POSIX1e entries.

        `uid` the desired UID of the file user. If set to None (the default), then user is not changed.

        `gid` the desired GID of the file group. If set to None (the default), then group is not changed.

        `recursive` apply the ACL recursively

        `traverse` traverse filestem boundaries (ZFS datasets)

        `strip` convert ACL to trivial. ACL is trivial if it can be expressed as a file mode without
        losing any access rules.

        `canonicalize` reorder ACL entries so that they are in concanical form as described
        in the Microsoft documentation MS-DTYP 2.4.5 (ACL). This only applies to NFSv4 ACLs.

        For case of NFSv4 ACLs  USER_OBJ, GROUP_OBJ, and EVERYONE with owner@, group@, everyone@ for
        consistency with getfacl and setfacl. If one of aforementioned special tags is used, 'id' must
        be set to None.

        An inheriting empty everyone@ ACE is appended to non-trivial ACLs in order to enforce Windows
        expectations regarding permissions inheritance. This entry is removed from NT ACL returned
        to SMB clients when 'ixnas' samba VFS module is enabled.
        """

    @accepts(
        Str('path'),
        Bool('simplified', default=True),
    )
    def getacl(self, path, simplified=True):
        """
        Return ACL of a given path. This may return a POSIX1e ACL or a NFSv4 ACL. The acl type is indicated
        by the `ACLType` key.

        Errata about ACLType NFSv4:

        `simplified` returns a shortened form of the ACL permset and flags.

        `TRAVERSE` sufficient rights to traverse a directory, but not read contents.

        `READ` sufficient rights to traverse a directory, and read file contents.

        `MODIFIY` sufficient rights to traverse, read, write, and modify a file. Equivalent to modify_set.

        `FULL_CONTROL` all permissions.

        If the permisssions do not fit within one of the pre-defined simplified permissions types, then
        the full ACL entry will be returned.

        In all cases we replace USER_OBJ, GROUP_OBJ, and EVERYONE with owner@, group@, everyone@ for
        consistency with getfacl and setfacl. If one of aforementioned special tags is used, 'id' must
        be set to None.

        An inheriting empty everyone@ ACE is appended to non-trivial ACLs in order to enforce Windows
        expectations regarding permissions inheritance. This entry is removed from NT ACL returned
        to SMB clients when 'ixnas' samba VFS module is enabled. We also remove it here to avoid confusion.
        """

    @accepts(
        Dict(
            'filesystem_ownership',
            Str('path', required=True),
            Int('uid', null=True, default=None),
            Int('gid', null=True, default=None),
            Dict(
                'options',
                Bool('recursive', default=False),
                Bool('traverse', default=False)
            )
        )
    )
    @job(lock="perm_change")
    def chown(self, job, data):
        """
        Change owner or group of file at `path`.

        `uid` and `gid` specify new owner of the file. If either
        key is absent or None, then existing value on the file is not
        changed.

        `recursive` performs action recursively, but does
        not traverse filesystem mount points.

        If `traverse` and `recursive` are specified, then the chown
        operation will traverse filesystem mount points.
        """

    @accepts(
        Dict(
            'filesystem_permission',
            Str('path', required=True),
            UnixPerm('mode', null=True),
            Int('uid', null=True, default=None),
            Int('gid', null=True, default=None),
            Dict(
                'options',
                Bool('stripacl', default=False),
                Bool('recursive', default=False),
                Bool('traverse', default=False),
            )
        )
    )
    @job(lock="perm_change")
    def setperm(self, job, data):
        """
        Remove extended ACL from specified path.

        If `mode` is specified then the mode will be applied to the
        path and files and subdirectories depending on which `options` are
        selected. Mode should be formatted as string representation of octal
        permissions bits.

        `uid` the desired UID of the file user. If set to None (the default), then user is not changed.

        `gid` the desired GID of the file group. If set to None (the default), then group is not changed.

        `stripacl` setperm will fail if an extended ACL is present on `path`,
        unless `stripacl` is set to True.

        `recursive` remove ACLs recursively, but do not traverse dataset
        boundaries.

        `traverse` remove ACLs from child datasets.

        If no `mode` is set, and `stripacl` is True, then non-trivial ACLs
        will be converted to trivial ACLs. An ACL is trivial if it can be
        expressed as a file mode without losing any access rules.

        """

    @accepts()
    async def default_acl_choices(self):
        """
        Get list of default ACL types.
        """

    @accepts(
        Str('acl_type', default='OPEN', enum=ACLDefault.options()),
        Str('share_type', default='NONE', enum=['NONE', 'AFP', 'SMB', 'NFS']),
    )
    async def get_default_acl(self, acl_type, share_type):
        """
Exemplo n.º 3
0
class RsyncTaskService(TaskPathService):

    share_task_type = 'Rsync'

    class Config:
        datastore = 'tasks.rsync'
        datastore_prefix = 'rsync_'
        datastore_extend = 'rsynctask.rsync_task_extend'
        datastore_extend_context = 'rsynctask.rsync_task_extend_context'
        cli_namespace = 'task.rsync'

    ENTRY = Patch(
        'rsync_task_create',
        'rsync_task_entry',
        ('rm', {
            'name': 'validate_rpath'
        }),
        ('add', Int('id')),
        ('add', Bool('locked')),
        ('add', Dict('job', null=True, additional_attrs=True)),
    )

    @private
    async def rsync_task_extend(self, data, context):
        try:
            data['extra'] = shlex.split(data['extra'].replace('"',
                                                              r'"\"').replace(
                                                                  "'", r'"\"'))
        except ValueError:
            # This is to handle the case where the extra value is misconfigured for old cases
            # Moving on, we are going to verify that it can be split successfully using shlex
            data['extra'] = data['extra'].split()

        for field in ('mode', 'direction'):
            data[field] = data[field].upper()
        Cron.convert_db_format_to_schedule(data)
        data['job'] = context['jobs'].get(data['id'])
        return data

    @private
    async def rsync_task_extend_context(self, rows, extra):
        jobs = {}
        for j in await self.middleware.call("core.get_jobs",
                                            [("method", "=", "rsynctask.run")],
                                            {"order_by": ["id"]}):
            try:
                task_id = int(j["arguments"][0])
            except (IndexError, ValueError):
                continue

            if task_id in jobs and jobs[task_id]["state"] == "RUNNING":
                continue

            jobs[task_id] = j

        return {
            "jobs": jobs,
        }

    @private
    async def validate_rsync_task(self, data, schema):
        verrors = ValidationErrors()

        # Windows users can have spaces in their usernames
        # http://www.freebsd.org/cgi/query-pr.cgi?pr=164808

        username = data.get('user')
        if ' ' in username:
            verrors.add(f'{schema}.user', 'User names cannot have spaces')
            raise verrors

        user = None
        with contextlib.suppress(KeyError):
            user = await self.middleware.call('dscache.get_uncached_user',
                                              username)

        if not user:
            verrors.add(f'{schema}.user',
                        f'Provided user "{username}" does not exist')
            raise verrors

        await self.validate_path_field(data, schema, verrors)

        remote_host = data.get('remotehost')
        if not remote_host:
            verrors.add(f'{schema}.remotehost', 'Please specify a remote host')

        data['extra'] = ' '.join(data['extra'])
        try:
            shlex.split(data['extra'].replace('"',
                                              r'"\"').replace("'", r'"\"'))
        except ValueError as e:
            verrors.add(f'{schema}.extra', f'Please specify valid value: {e}')

        mode = data.get('mode')
        if not mode:
            verrors.add(f'{schema}.mode', 'This field is required')

        remote_module = data.get('remotemodule')
        if mode == 'MODULE' and not remote_module:
            verrors.add(f'{schema}.remotemodule', 'This field is required')

        if mode == 'SSH':
            remote_port = data.get('remoteport')
            if not remote_port:
                verrors.add(f'{schema}.remoteport', 'This field is required')

            remote_path = data.get('remotepath')
            if not remote_path:
                verrors.add(f'{schema}.remotepath', 'This field is required')

            search = os.path.join(user['pw_dir'], '.ssh', 'id_[edr]*')
            exclude_from_search = os.path.join(user['pw_dir'], '.ssh',
                                               'id_[edr]*pub')
            key_files = set(glob.glob(search)) - set(
                glob.glob(exclude_from_search))
            if not key_files:
                verrors.add(
                    f'{schema}.user',
                    'In order to use rsync over SSH you need a user'
                    ' with a private key (DSA/ECDSA/RSA) set up in home dir.')
            else:
                for file in glob.glob(search):
                    if '.pub' not in file:
                        # file holds a private key and it's permissions should be 600
                        if os.stat(file).st_mode & 0o077 != 0:
                            verrors.add(
                                f'{schema}.user',
                                f'Permissions {oct(os.stat(file).st_mode & 0o777)} for {file} are too open. Please '
                                f'correct them by running chmod 600 {file}')

            if (data['enabled'] and data['validate_rpath'] and remote_path
                    and remote_host and remote_port):
                if '@' in remote_host:
                    remote_username, remote_host = remote_host.rsplit('@', 1)
                else:
                    remote_username = username

                try:
                    async with await asyncio.wait_for(
                            asyncssh.connect(remote_host,
                                             port=remote_port,
                                             username=remote_username,
                                             client_keys=key_files,
                                             known_hosts=None),
                            timeout=5,
                    ) as conn:
                        await conn.run(f'test -d {shlex.quote(remote_path)}',
                                       check=True)
                except asyncio.TimeoutError:

                    verrors.add(
                        f'{schema}.remotehost',
                        'SSH timeout occurred. Remote path cannot be validated.'
                    )

                except OSError as e:

                    if e.errno == 113:
                        verrors.add(
                            f'{schema}.remotehost',
                            f'Connection to the remote host {remote_host} on port {remote_port} failed.'
                        )
                    else:
                        verrors.add(f'{schema}.remotehost', e.__str__())

                except asyncssh.DisconnectError as e:

                    verrors.add(
                        f'{schema}.remotehost',
                        f'Disconnect Error[ error code {e.code} ] was generated when trying to '
                        f'communicate with remote host {remote_host} and remote user {remote_username}.'
                    )

                except asyncssh.ProcessError as e:

                    if e.code == '1':
                        verrors.add(
                            f'{schema}.remotepath',
                            'The Remote Path you specified does not exist or is not a directory.'
                            'Either create one yourself on the remote machine or uncheck the '
                            'validate_rpath field')
                    else:
                        verrors.add(
                            f'{schema}.remotepath',
                            f'Connection to Remote Host was successful but failed to verify '
                            f'Remote Path. {e.__str__()}')

                except asyncssh.Error as e:

                    if e.__class__.__name__ in e.__str__():
                        exception_reason = e.__str__()
                    else:
                        exception_reason = e.__class__.__name__ + ' ' + e.__str__(
                        )
                    verrors.add(
                        f'{schema}.remotepath',
                        f'Remote Path could not be validated. An exception was raised. {exception_reason}'
                    )
            elif data['enabled'] and data['validate_rpath']:
                verrors.add(
                    f'{schema}.remotepath',
                    'Remote path could not be validated because of missing fields'
                )

        data.pop('validate_rpath', None)

        # Keeping compatibility with legacy UI
        for field in ('mode', 'direction'):
            data[field] = data[field].lower()

        return verrors, data

    @accepts(
        Dict(
            'rsync_task_create',
            Str('path', required=True, max_length=RSYNC_PATH_LIMIT),
            Str('user', required=True),
            Str('remotehost'),
            Int('remoteport'),
            Str('mode', enum=['MODULE', 'SSH'], default='MODULE'),
            Str('remotemodule'),
            Str('remotepath'),
            Bool('validate_rpath', default=True),
            Str('direction', enum=['PULL', 'PUSH'], default='PUSH'),
            Str('desc'),
            Cron(
                'schedule',
                defaults={'minute': '00'},
            ),
            Bool('recursive'),
            Bool('times'),
            Bool('compress'),
            Bool('archive'),
            Bool('delete'),
            Bool('quiet'),
            Bool('preserveperm'),
            Bool('preserveattr'),
            Bool('delayupdates'),
            List('extra', items=[Str('extra')]),
            Bool('enabled', default=True),
            register=True,
        ))
    async def do_create(self, data):
        """
        Create a Rsync Task.

        See the comment in Rsyncmod about `path` length limits.

        `remotehost` is ip address or hostname of the remote system. If username differs on the remote host,
        "username@remote_host" format should be used.

        `mode` represents different operating mechanisms for Rsync i.e Rsync Module mode / Rsync SSH mode.

        `remotemodule` is the name of remote module, this attribute should be specified when `mode` is set to MODULE.

        `remotepath` specifies the path on the remote system.

        `validate_rpath` is a boolean which when sets validates the existence of the remote path.

        `direction` specifies if data should be PULLED or PUSHED from the remote system.

        `compress` when set reduces the size of the data which is to be transmitted.

        `archive` when set makes rsync run recursively, preserving symlinks, permissions, modification times, group,
        and special files.

        `delete` when set deletes files in the destination directory which do not exist in the source directory.

        `preserveperm` when set preserves original file permissions.

        .. examples(websocket)::

          Create a Rsync Task which pulls data from a remote system every 5 minutes.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "rsynctask.create",
                "params": [{
                    "enabled": true,
                    "schedule": {
                        "minute": "5",
                        "hour": "*",
                        "dom": "*",
                        "month": "*",
                        "dow": "*"
                    },
                    "desc": "Test rsync task",
                    "user": "******",
                    "mode": "MODULE",
                    "remotehost": "[email protected]",
                    "compress": true,
                    "archive": true,
                    "direction": "PULL",
                    "path": "/mnt/vol1/rsync_dataset",
                    "remotemodule": "remote_module1"
                }]
            }
        """
        verrors, data = await self.validate_rsync_task(data,
                                                       'rsync_task_create')
        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(data)

        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})
        await self.middleware.call('service.restart', 'cron')

        return await self.get_instance(data['id'])

    @accepts(Int('id', validators=[Range(min=1)]),
             Patch('rsync_task_create', 'rsync_task_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        """
        Update Rsync Task of `id`.
        """
        old = await self.query(filters=[('id', '=', id)],
                               options={'get': True})
        old.pop('job')

        new = old.copy()
        new.update(data)

        verrors, data = await self.validate_rsync_task(new,
                                                       'rsync_task_update')
        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(new)
        new.pop(self.locked_field)

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})
        await self.middleware.call('service.restart', 'cron')

        return await self.get_instance(id)

    async def do_delete(self, id):
        """
        Delete Rsync Task of `id`.
        """
        res = await self.middleware.call('datastore.delete',
                                         self._config.datastore, id)
        await self.middleware.call('service.restart', 'cron')
        return res

    @private
    async def commandline(self, id):
        """
        Helper method to generate the rsync command avoiding code duplication.
        """
        rsync = await self.get_instance(id)
        path = shlex.quote(rsync['path'])

        line = ['rsync']
        for name, flag in (
            ('archive', '-a'),
            ('compress', '-zz'),
            ('delayupdates', '--delay-updates'),
            ('delete', '--delete-delay'),
            ('preserveattr', '-X'),
            ('preserveperm', '-p'),
            ('recursive', '-r'),
            ('times', '-t'),
        ):
            if rsync[name]:
                line.append(flag)
        if rsync['extra']:
            line.append(' '.join(rsync['extra']))

        # Do not use username if one is specified in host field
        # See #5096 for more details
        if '@' in rsync['remotehost']:
            remote = rsync['remotehost']
        else:
            remote = f'"{rsync["user"]}"@{rsync["remotehost"]}'

        if rsync['mode'] == 'MODULE':
            module_args = [path, f'{remote}::"{rsync["remotemodule"]}"']
            if rsync['direction'] != 'PUSH':
                module_args.reverse()
            line += module_args
        else:
            line += [
                '-e',
                f'"ssh -p {rsync["remoteport"]} -o BatchMode=yes -o StrictHostKeyChecking=yes"'
            ]
            path_args = [
                path, f'{remote}:"{shlex.quote(rsync["remotepath"])}"'
            ]
            if rsync['direction'] != 'PUSH':
                path_args.reverse()
            line += path_args

        if rsync['quiet']:
            line += ['>', '/dev/null', '2>&1']

        return ' '.join(line)

    @item_method
    @accepts(Int('id'))
    @returns()
    @job(lock=lambda args: args[-1], lock_queue_size=1, logs=True)
    def run(self, job, id):
        """
        Job to run rsync task of `id`.

        Output is saved to job log excerpt (not syslog).
        """
        self.middleware.call_sync('network.general.will_perform_activity',
                                  'rsync')

        rsync = self.middleware.call_sync('rsynctask.get_instance', id)
        if rsync['locked']:
            self.middleware.call_sync('rsynctask.generate_locked_alert', id)
            return

        commandline = self.middleware.call_sync('rsynctask.commandline', id)

        cp = run_command_with_user_context(commandline, rsync['user'],
                                           lambda v: job.logs_fd.write(v))

        for klass in ('RsyncSuccess',
                      'RsyncFailed') if not rsync['quiet'] else ():
            self.middleware.call_sync('alert.oneshot_delete', klass,
                                      rsync['id'])

        if cp.returncode not in RsyncReturnCode.nonfatals():
            err = None
            if cp.returncode == RsyncReturnCode.STREAMIO and rsync['compress']:
                err = (
                    "rsync command with compression enabled failed with STREAMIO error. "
                    "This may indicate that remote server lacks support for the new-style "
                    "compression used by TrueNAS.")

            if not rsync['quiet']:
                self.middleware.call_sync(
                    'alert.oneshot_create', 'RsyncFailed', {
                        'id': rsync['id'],
                        'direction': rsync['direction'],
                        'path': rsync['path'],
                    })

            if err:
                msg = f'{err} Check logs for further information'
            else:
                try:
                    rc_name = RsyncReturnCode(cp.returncode).name
                except ValueError:
                    rc_name = 'UNKNOWN'

                msg = (f'rsync command returned {cp.returncode} - {rc_name}. '
                       'Check logs for further information.')
            raise CallError(msg)

        elif not rsync['quiet']:
            self.middleware.call_sync(
                'alert.oneshot_create', 'RsyncSuccess', {
                    'id': rsync['id'],
                    'direction': rsync['direction'],
                    'path': rsync['path'],
                })
Exemplo n.º 4
0
class BackupS3Service(Service):
    class Config:
        namespace = 'backup.s3'

    @private
    async def get_client(self, id):
        credential = await self.middleware.call('datastore.query',
                                                'system.cloudcredentials',
                                                [('id', '=', id)],
                                                {'get': True})

        client = boto3.client(
            's3',
            aws_access_key_id=credential['attributes'].get('access_key'),
            aws_secret_access_key=credential['attributes'].get('secret_key'),
        )
        return client

    @accepts(Int('id'))
    async def get_buckets(self, id):
        """Returns buckets from a given S3 credential."""
        client = await self.get_client(id)
        buckets = []
        for bucket in client.list_buckets()['Buckets']:
            buckets.append({
                'name': bucket['Name'],
                'creation_date': bucket['CreationDate'],
            })

        return buckets

    @accepts(Int('id'), Str('name'))
    async def get_bucket_location(self, id, name):
        """
        Returns bucket `name` location (region) from credential `id`.
        """
        client = await self.get_client(id)
        response = client.get_bucket_location(Bucket=name)
        return response['LocationConstraint']

    @private
    async def sync(self, job, backup, credential):
        # Use a temporary file to store s3cmd config file
        with tempfile.NamedTemporaryFile(mode='w+') as f:
            # Make sure only root can read it ad there is sensitive data
            os.chmod(f.name, 0o600)

            f.write(
                textwrap.dedent("""
                [remote]
                type = s3
                env_auth = false
                access_key_id = {access_key}
                secret_access_key = {secret_key}
                region = {region}
                """).format(
                    access_key=credential['attributes']['access_key'],
                    secret_key=credential['attributes']['secret_key'],
                    region=backup['attributes']['region'] or '',
                ))
            f.flush()

            args = [
                '/usr/local/bin/rclone',
                '--config',
                f.name,
                '-v',
                '--stats',
                '1s',
                'sync',
            ]

            remote_path = 'remote:{}{}'.format(
                backup['attributes']['bucket'],
                '/{}'.format(backup['attributes']['folder'])
                if backup['attributes'].get('folder') else '',
            )

            if backup['direction'] == 'PUSH':
                args.extend([backup['path'], remote_path])
            else:
                args.extend([remote_path, backup['path']])

            proc = await Popen(
                args,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
            )
            check_task = asyncio.ensure_future(rclone_check_progress(
                job, proc))
            await proc.wait()
            if proc.returncode != 0:
                await asyncio.wait_for(check_task, None)
                raise ValueError('rclone failed: {}'.format(
                    check_task.result()))
            return True

    @private
    async def put(self, backup, filename, read_fd):
        client = await self.get_client(backup['id'])
        folder = backup['attributes']['folder'] or ''
        key = os.path.join(folder, filename)
        parts = []
        idx = 1

        try:
            with os.fdopen(read_fd, 'rb') as f:
                mp = client.create_multipart_upload(
                    Bucket=backup['attributes']['bucket'], Key=key)

                while True:
                    chunk = f.read(CHUNK_SIZE)
                    if chunk == b'':
                        break

                    resp = client.upload_part(
                        Bucket=backup['attributes']['bucket'],
                        Key=key,
                        PartNumber=idx,
                        UploadId=mp['UploadId'],
                        ContentLength=CHUNK_SIZE,
                        Body=chunk)

                    parts.append({'ETag': resp['ETag'], 'PartNumber': idx})

                    idx += 1

                client.complete_multipart_upload(
                    Bucket=backup['attributes']['bucket'],
                    Key=key,
                    UploadId=mp['UploadId'],
                    MultipartUpload={'Parts': parts})
        finally:
            pass

    @private
    async def get(self, backup, filename, write_fd):
        client = await self.get_client(backup['id'])
        folder = backup['attributes']['folder'] or ''
        key = os.path.join(folder, filename)
        obj = client.get_object(Bucket=backup['attributes']['bucket'], Key=key)

        with os.fdopen(write_fd, 'wb') as f:
            while True:
                chunk = obj['Body'].read(CHUNK_SIZE)
                if chunk == b'':
                    break
                f.write(chunk)

    @private
    async def ls(self, cred_id, bucket, path):
        client = await self.get_client(cred_id)
        obj = client.list_objects_v2(
            Bucket=bucket,
            Prefix=path,
        )
        if obj['KeyCount'] == 0:
            return []
        return obj['Contents']
Exemplo n.º 5
0
class BackupGCSService(Service):
    class Config:
        namespace = 'backup.gcs'

    def __get_client(self, id):
        credential = self.middleware.call_sync('datastore.query',
                                               'system.cloudcredentials',
                                               [('id', '=', id)],
                                               {'get': True})

        with tempfile.NamedTemporaryFile(mode='w+') as f:
            # Make sure only root can read it as there is sensitive data
            os.chmod(f.name, 0o600)
            f.write(json.dumps(credential['attributes']['keyfile']))
            f.flush()
            client = storage.Client.from_service_account_json(f.name)

        return client

    @accepts(Int('id'))
    def get_buckets(self, id):
        """Returns buckets from a given B2 credential."""
        client = self.__get_client(id)
        buckets = []
        for i in client.list_buckets():
            buckets.append(i._properties)
        return buckets

    @private
    async def sync(self, job, backup, credential):
        # Use a temporary file to store rclone file
        with tempfile.NamedTemporaryFile(
                mode='w+') as f, tempfile.NamedTemporaryFile(
                    mode='w+') as keyf:
            # Make sure only root can read it as there is sensitive data
            os.chmod(f.name, 0o600)
            os.chmod(keyf.name, 0o600)

            keyf.write(json.dumps(credential['attributes']['keyfile']))
            keyf.flush()

            f.write("""[remote]
type = google cloud storage
client_id =
client_secret =
project_number =
service_account_file = {keyfile}
""".format(keyfile=keyf.name, ))
            f.flush()

            args = [
                '/usr/local/bin/rclone',
                '--config',
                f.name,
                '-v',
                '--stats',
                '1s',
                'sync',
            ]

            remote_path = 'remote:{}{}'.format(
                backup['attributes']['bucket'],
                '/{}'.format(backup['attributes']['folder'])
                if backup['attributes'].get('folder') else '',
            )

            if backup['direction'] == 'PUSH':
                args.extend([backup['path'], remote_path])
            else:
                args.extend([remote_path, backup['path']])

            proc = await Popen(
                args,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
            )
            check_task = asyncio.ensure_future(rclone_check_progress(
                job, proc))
            await proc.wait()
            if proc.returncode != 0:
                await asyncio.wait_for(check_task, None)
                raise ValueError('rclone failed: {}'.format(
                    check_task.result()))
            return True
Exemplo n.º 6
0
class PoolResilverService(ConfigService):
    class Config:
        namespace = 'pool.resilver'
        datastore = 'storage.resilver'
        datastore_extend = 'pool.resilver.resilver_extend'

    async def resilver_extend(self, data):
        data['begin'] = data['begin'].strftime('%H:%M')
        data['end'] = data['end'].strftime('%H:%M')
        data['weekday'] = [int(v) for v in data['weekday'].split(',')]
        return data

    async def validate_fields_and_update(self, data, schema):
        verrors = ValidationErrors()

        begin = data.get('begin')
        if begin:
            data['begin'] = time(int(begin.split(':')[0]),
                                 int(begin.split(':')[1]))

        end = data.get('end')
        if end:
            data['end'] = time(int(end.split(':')[0]), int(end.split(':')[1]))

        weekdays = data.get('weekday')
        if weekdays:
            if len([day for day in weekdays if day not in range(1, 8)]) > 0:
                verrors.add(
                    f'{schema}.weekday',
                    'The week days should be in range of 1-7 inclusive')
            else:
                data['weekday'] = ','.join([str(day) for day in weekdays])

        return verrors, data

    @accepts(
        Dict('pool_resilver', Str('begin', validators=[Time()]),
             Str('end', validators=[Time()]), Bool('enabled'),
             List('weekday', items=[Int('weekday')])))
    async def do_update(self, data):
        config = await self.config()
        original_config = config.copy()
        config.update(data)

        verrors, new_config = await self.validate_fields_and_update(
            config, 'pool_resilver_update')
        if verrors:
            raise verrors

        # before checking if any changes have been made, original_config needs to be mapped to new_config
        original_config['weekday'] = ','.join(
            [str(day) for day in original_config['weekday']])
        original_config['begin'] = time(
            *(int(value) for value in original_config['begin'].split(':')))
        original_config['end'] = time(
            *(int(value) for value in original_config['end'].split(':')))
        if len(set(original_config.items()) ^ set(new_config.items())) > 0:
            # data has changed
            await self.middleware.call('datastore.update',
                                       self._config.datastore,
                                       new_config['id'], new_config)

            await self.middleware.call('service.restart', 'cron',
                                       {'onetime': False})
            await self.middleware.call('pool.configure_resilver_priority')

        return await self.config()
Exemplo n.º 7
0
        return process.returncode == 0

    def _ping6_host(self, host, timeout):
        if osc.IS_LINUX:
            process = run(['ping6', '-w', f'{timeout}', host])
        else:
            process = run(['ping6', '-X', f'{timeout}', host])

        return process.returncode == 0

    @accepts(
        Dict(
            'options',
            Str('type', enum=['ICMP', 'ICMPV4', 'ICMPV6'], default='ICMP'),
            Str('hostname', required=True),
            Int('timeout', validators=[Range(min=1, max=60)], default=4),
        ),
    )
    def ping_remote(self, options):
        """
        Method that will send an ICMP echo request to "hostname"
        and will wait up to "timeout" for a reply.
        """
        ip = None
        ip_found = True
        verrors = ValidationErrors()
        try:
            ip = IpAddress()
            ip(options['hostname'])
            ip = options['hostname']
        except ValueError:
Exemplo n.º 8
0
class SMARTTestService(CRUDService):

    class Config:
        datastore = 'tasks.smarttest'
        datastore_extend = 'smart.test.smart_test_extend'
        datastore_prefix = 'smarttest_'
        namespace = 'smart.test'
        cli_namespace = 'task.smart_test'

    ENTRY = Patch(
        'smart_task_create', 'smart_task_entry',
        ('add', Int('id')),
    )

    @private
    async def smart_test_extend(self, data):
        disks = data.pop('disks')
        data['disks'] = [disk['disk_identifier'] for disk in disks]
        test_type = {
            'L': 'LONG',
            'S': 'SHORT',
            'C': 'CONVEYANCE',
            'O': 'OFFLINE',
        }
        data['type'] = test_type[data.pop('type')]
        Cron.convert_db_format_to_schedule(data)
        return data

    @private
    async def validate_data(self, data, schema):
        verrors = ValidationErrors()

        smart_tests = await self.query(filters=[('type', '=', data['type'])])
        configured_disks = [d for test in smart_tests for d in test['disks']]
        disks_dict = await self.disk_choices()

        disks = data.get('disks')
        used_disks = []
        invalid_disks = []
        for disk in disks:
            if disk in configured_disks:
                used_disks.append(disks_dict[disk])
            if disk not in disks_dict.keys():
                invalid_disks.append(disk)

        if used_disks:
            verrors.add(
                f'{schema}.disks',
                f'The following disks already have tests for this type: {", ".join(used_disks)}'
            )

        if invalid_disks:
            verrors.add(
                f'{schema}.disks',
                f'The following disks are invalid: {", ".join(invalid_disks)}'
            )

        return verrors

    @accepts(Bool('full_disk', default=False))
    async def disk_choices(self, full_disk):
        """
        Returns disk choices for S.M.A.R.T. test.

        `full_disk` will return full disk objects instead of just names.
        """
        return {
            disk['identifier']: disk if full_disk else disk['name']
            for disk in await self.middleware.call('disk.query', [['devname', '!^', 'nv']])
        }

    @accepts(
        Dict(
            'smart_task_create',
            Cron(
                'schedule',
                exclude=['minute']
            ),
            Str('desc'),
            Bool('all_disks', default=False),
            List('disks', items=[Str('disk')]),
            Str('type', enum=['LONG', 'SHORT', 'CONVEYANCE', 'OFFLINE'], required=True),
            register=True
        )
    )
    async def do_create(self, data):
        """
        Create a SMART Test Task.

        `disks` is a list of valid disks which should be monitored in this task.

        `type` is specified to represent the type of SMART test to be executed.

        `all_disks` when enabled sets the task to cover all disks in which case `disks` is not required.

        .. examples(websocket)::

          Create a SMART Test Task which executes after every 30 minutes.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "smart.test.create",
                "params": [{
                    "schedule": {
                        "minute": "30",
                        "hour": "*",
                        "dom": "*",
                        "month": "*",
                        "dow": "*"
                    },
                    "all_disks": true,
                    "type": "OFFLINE",
                    "disks": []
                }]
            }
        """
        data['type'] = data.pop('type')[0]
        verrors = await self.validate_data(data, 'smart_test_create')

        if data['all_disks']:
            if data.get('disks'):
                verrors.add(
                    'smart_test_create.disks',
                    'This test is already enabled for all disks'
                )
        else:
            if not data.get('disks'):
                verrors.add(
                    'smart_test_create.disks',
                    'This field is required'
                )

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(data)

        data['id'] = await self.middleware.call(
            'datastore.insert',
            self._config.datastore,
            data,
            {'prefix': self._config.datastore_prefix}
        )

        asyncio.ensure_future(self._service_change('smartd', 'restart'))

        return await self.get_instance(data['id'])

    async def do_update(self, id, data):
        """
        Update SMART Test Task of `id`.
        """
        old = await self.query(filters=[('id', '=', id)], options={'get': True})
        new = old.copy()
        new.update(data)

        new['type'] = new.pop('type')[0]
        old['type'] = old.pop('type')[0]
        new_disks = [disk for disk in new['disks'] if disk not in old['disks']]
        deleted_disks = [disk for disk in old['disks'] if disk not in new['disks']]
        if old['type'] == new['type']:
            new['disks'] = new_disks
        verrors = await self.validate_data(new, 'smart_test_update')

        new['disks'] = [disk for disk in chain(new_disks, old['disks']) if disk not in deleted_disks]

        if new['all_disks']:
            if new.get('disks'):
                verrors.add(
                    'smart_test_update.disks',
                    'This test is already enabled for all disks'
                )
        else:
            if not new.get('disks'):
                verrors.add(
                    'smart_test_update.disks',
                    'This field is required'
                )

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(new)

        await self.middleware.call(
            'datastore.update',
            self._config.datastore,
            id,
            new,
            {'prefix': self._config.datastore_prefix}
        )

        asyncio.ensure_future(self._service_change('smartd', 'restart'))

        return await self.get_instance(id)

    async def do_delete(self, id):
        """
        Delete SMART Test Task of `id`.
        """
        response = await self.middleware.call(
            'datastore.delete',
            self._config.datastore,
            id
        )

        asyncio.ensure_future(self._service_change('smartd', 'restart'))

        return response

    @accepts(
        List(
            'disks', items=[
                Dict(
                    'disk_run',
                    Str('identifier', required=True),
                    Str('mode', enum=['FOREGROUND', 'BACKGROUND'], default='BACKGROUND'),
                    Str('type', enum=['LONG', 'SHORT', 'CONVEYANCE', 'OFFLINE'], required=True),
                )
            ]
        )
    )
    @returns(List('smart_manual_test', items=[Dict(
        'smart_manual_test_disk_response',
        Str('disk', required=True),
        Str('identifier', required=True),
        Str('error', required=True, null=True),
        Datetime('expected_result_time'),
        Int('job'),
    )]))
    async def manual_test(self, disks):
        """
        Run manual SMART tests for `disks`.

        `type` indicates what type of SMART test will be ran and must be specified.
        """
        verrors = ValidationErrors()
        test_disks_list = []
        if not disks:
            verrors.add(
                'disks',
                'Please specify at least one disk.'
            )
        else:
            disks_choices = await self.disk_choices(True)
            devices = await self.middleware.call('device.get_storage_devices_topology')

            for index, disk in enumerate(disks):
                if current_disk := disks_choices.get(disk['identifier']):
                    test_disks_list.append({
                        'disk': current_disk['name'],
                        **disk
                    })
                else:
                    verrors.add(
                        f'disks.{index}.identifier',
                        f'{disk["identifier"]} is not valid. Please provide a valid disk identifier.'
                    )
                    continue

                if current_disk['name'] is None:
                    verrors.add(
                        f'disks.{index}.identifier',
                        f'Test cannot be performed for {disk["identifier"]} disk. Failed to retrieve name.'
                    )

                device = devices.get(current_disk['name'])
                if not device:
                    verrors.add(
                        f'disks.{index}.identifier',
                        f'Test cannot be performed for {disk["identifier"]}. Unable to retrieve disk details.'
                    )

        verrors.check()

        return await asyncio_map(self.__manual_test, test_disks_list, 16)
Exemplo n.º 9
0
class AuthService(Service):
    def __init__(self, *args, **kwargs):
        super(AuthService, self).__init__(*args, **kwargs)
        self.authtokens = AuthTokens()

    @accepts(Str('username'), Str('password'))
    async def check_user(self, username, password):
        """
        Verify username and password
        """
        if username != 'root':
            return False
        try:
            user = await self.middleware.call(
                'datastore.query', 'account.bsdusers',
                [('bsdusr_username', '=', username)], {'get': True})
        except IndexError:
            return False
        if user['bsdusr_unixhash'] in ('x', '*'):
            return False
        return crypt.crypt(password,
                           user['bsdusr_unixhash']) == user['bsdusr_unixhash']

    @accepts(Int('ttl', required=False), Dict('attrs', additional_attrs=True))
    def generate_token(self, ttl=None, attrs=None):
        """Generate a token to be used for authentication."""
        if ttl is None:
            ttl = 600
        return self.authtokens.new(ttl, attrs=attrs)['id']

    @private
    def get_token(self, token_id):
        return self.authtokens.get_token(token_id)

    @no_auth_required
    @accepts(Str('username'), Str('password'))
    @pass_app
    async def login(self, app, username, password):
        """Authenticate session using username and password.
        Currently only root user is allowed.
        """
        valid = await self.check_user(username, password)
        if valid:
            app.authenticated = True
        return valid

    @accepts()
    @pass_app
    async def logout(self, app):
        """
        Deauthenticates an app and if a token exists, removes that from the
        session.
        """
        sessionid = app.sessionid
        token = self.authtokens.get_token_by_sessionid(sessionid)
        app.authenticated = False

        if token:
            self.authtokens.pop_token(token["id"])

        return True

    @no_auth_required
    @accepts(Str('token'))
    @pass_app
    def token(self, app, token):
        """Authenticate using a given `token` id."""
        def update_token(app, message):
            """
            On every new message from the registered connection
            make sure the token is still valid, updating last time or
            removing authentication
            """
            token = self.authtokens.get_token_by_sessionid(app.sessionid)
            if token is None:
                return
            if int(time.time()) - token['ttl'] < token['last']:
                token['last'] = int(time.time())
            else:
                self.authtokens.pop_token(token['id'])
                app.authenticated = False

        def remove_session(app):
            """
            On connection close, remove session id from token
            """
            self.authtokens.remove_session(app.sessionid)

        token = self.authtokens.get_token(token)
        if token is None:
            return False
        """
        If token exists and is still valid (TTL) do the following:
          - authenticate the connection
          - add the session id to token
          - register connection callbacks to update/remove token
        """
        if int(time.time()) - token['ttl'] < token['last']:
            token['last'] = int(time.time())
            self.authtokens.add_session(app.sessionid, token)
            app.register_callback('on_message', update_token)
            app.register_callback('on_close', remove_session)
            app.authenticated = True
            return True
        else:
            self.authtokens.pop_token(token['id'])
            return False
Exemplo n.º 10
0
class DiskService(CRUDService):

    class Config:
        datastore = 'storage.disk'
        datastore_prefix = 'disk_'
        datastore_extend = 'disk.disk_extend'
        datastore_extend_context = 'disk.disk_extend_context'
        event_register = False
        event_send = False
        cli_namespace = 'storage.disk'

    @filterable
    async def query(self, filters, options):
        """
        Query disks.

        The following extra options are supported:

             include_expired: true - will also include expired disks (default: false)
             passwords: true - will not hide KMIP password for the disks (default: false)
             pools: true - will join pool name for each disk (default: false)
        """
        filters = filters or []
        options = options or {}
        if not options.get('extra', {}).get('include_expired', False):
            filters += [('expiretime', '=', None)]

        return await super().query(filters, options)

    @private
    async def disk_extend(self, disk, context):
        disk.pop('enabled', None)
        for key in ['acousticlevel', 'advpowermgmt', 'hddstandby']:
            disk[key] = disk[key].upper()
        try:
            disk['size'] = int(disk['size'])
        except ValueError:
            disk['size'] = None
        if disk['multipath_name']:
            disk['devname'] = f'multipath/{disk["multipath_name"]}'
        else:
            disk['devname'] = disk['name']
        self._expand_enclosure(disk)
        if context['passwords']:
            if not disk['passwd']:
                disk['passwd'] = context['disks_keys'].get(disk['identifier'], '')
        else:
            disk.pop('passwd')
            disk.pop('kmip_uid')
        disk['pool'] = context['zfs_guid_to_pool'].get(disk['zfs_guid'])
        return disk

    @private
    async def disk_extend_context(self, extra):
        context = {
            'passwords': extra.get('passwords', False),
            'disks_keys': {},

            'pools': extra.get('pools', False),
            'zfs_guid_to_pool': {},
        }

        if context['passwords']:
            context['disks_keys'] = await self.middleware.call('kmip.retrieve_sed_disks_keys')

        if context['pools']:
            for pool in await self.middleware.call('zfs.pool.query'):
                topology = await self.middleware.call('pool.transform_topology_lightweight', pool['groups'])
                for vdev in await self.middleware.call('pool.flatten_topology', topology):
                    if vdev['type'] == 'DISK':
                        context['zfs_guid_to_pool'][vdev['guid']] = pool['name']

        return context

    def _expand_enclosure(self, disk):
        if disk['enclosure_slot'] is not None:
            disk['enclosure'] = {
                'number': disk['enclosure_slot'] // 1000,
                'slot': disk['enclosure_slot'] % 1000
            }
        else:
            disk['enclosure'] = None
        del disk['enclosure_slot']

    def _compress_enclosure(self, disk):
        if disk['enclosure'] is not None:
            disk['enclosure_slot'] = disk['enclosure']['number'] * 1000 + disk['enclosure']['slot']
        else:
            disk['enclosure_slot'] = None
        del disk['enclosure']

    @accepts(
        Str('id'),
        Dict(
            'disk_update',
            Bool('togglesmart'),
            Str('acousticlevel', enum=[
                'DISABLED', 'MINIMUM', 'MEDIUM', 'MAXIMUM'
            ]),
            Str('advpowermgmt', enum=[
                'DISABLED', '1', '64', '127', '128', '192', '254'
            ]),
            Str('description'),
            Str('hddstandby', enum=[
                'ALWAYS ON', '5', '10', '20', '30', '60', '120', '180', '240', '300', '330'
            ]),
            Bool('hddstandby_force'),
            Str('passwd', private=True),
            Str('smartoptions'),
            Int('critical', null=True),
            Int('difference', null=True),
            Int('informational', null=True),
            Dict(
                'enclosure',
                Int('number'),
                Int('slot'),
                null=True,
            ),
            update=True
        )
    )
    async def do_update(self, id, data):
        """
        Update disk of `id`.

        If extra options need to be passed to SMART which we don't already support, they can be passed by
        `smartoptions`.

        `critical`, `informational` and `difference` are integer values on which alerts for SMART are configured
        if the disk temperature crosses the assigned threshold for each respective attribute.
        If they are set to null, then SMARTD config values are used as defaults.

        Email of log level LOG_CRIT is issued when disk temperature crosses `critical`.

        Email of log level LOG_INFO is issued when disk temperature crosses `informational`.

        If temperature of a disk changes by `difference` degree Celsius since the last report, SMART reports this.
        """

        old = await self.middleware.call(
            'datastore.query', 'storage.disk', [['identifier', '=', id]], {
                'get': True, 'prefix': self._config.datastore_prefix
            }
        )
        old.pop('enabled', None)
        self._expand_enclosure(old)
        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        if new['hddstandby_force']:
            if new['hddstandby'] == 'ALWAYS ON':
                verrors.add(
                    'disk_update.hddstandby_force',
                    'This option does not have sense when HDD Standby is not set'
                )

        if verrors:
            raise verrors

        if not new['passwd'] and old['passwd'] != new['passwd']:
            # We want to make sure kmip uid is None in this case
            if new['kmip_uid']:
                asyncio.ensure_future(self.middleware.call('kmip.reset_sed_disk_password', id, new['kmip_uid']))
            new['kmip_uid'] = None

        for key in ['acousticlevel', 'advpowermgmt', 'hddstandby']:
            new[key] = new[key].title()

        self._compress_enclosure(new)

        await self.middleware.call(
            'datastore.update',
            self._config.datastore,
            id,
            new,
            {'prefix': self._config.datastore_prefix}
        )

        if any(new[key] != old[key] for key in ['hddstandby', 'advpowermgmt', 'acousticlevel']):
            await self.middleware.call('disk.power_management', new['name'])

        if any(
            new[key] != old[key]
            for key in [
                'togglesmart', 'smartoptions', 'hddstandby', 'hddstandby_force',
                'critical', 'difference', 'informational',
            ]
        ):
            if new['togglesmart']:
                await self.middleware.call('disk.toggle_smart_on', new['name'])
            else:
                await self.middleware.call('disk.toggle_smart_off', new['name'])

            await self.middleware.call('disk.update_hddstandby_force')
            await self.middleware.call('disk.update_smartctl_args_for_disks')
            await self.middleware.call('service.restart', 'collectd')
            await self._service_change('smartd', 'restart')
            await self._service_change('snmp', 'restart')

        if new['passwd'] and old['passwd'] != new['passwd']:
            await self.middleware.call('kmip.sync_sed_keys', [id])

        return await self.query([['identifier', '=', id]], {'get': True})

    @private
    async def copy_settings(self, old, new):
        await self.middleware.call('disk.update', new['identifier'], {
            k: v for k, v in old.items() if k in [
                'togglesmart', 'acousticlevel', 'advpowermgmt', 'description', 'hddstandby', 'hddstandby_force',
                'smartoptions', 'critical', 'difference', 'informational',
            ]
        })

        changed = False
        for row in await self.middleware.call('datastore.query', 'tasks.smarttest_smarttest_disks', [
            ['disk_id', '=', old['identifier']],
        ], {'relationships': False}):
            try:
                await self.middleware.call('datastore.insert', 'tasks.smarttest_smarttest_disks', {
                    'smarttest_id': row['smarttest_id'],
                    'disk_id': new['identifier'],
                })
            except IntegrityError:
                pass
            else:
                changed = True

        if changed:
            asyncio.ensure_future(self._service_change('smartd', 'restart'))

    @private
    def get_name(self, disk):
        if disk["multipath_name"]:
            return f"multipath/{disk['multipath_name']}"
        else:
            return disk["name"]

    @accepts(Bool("join_partitions", default=False))
    async def get_unused(self, join_partitions):
        """
        Helper method to get all disks that are not in use, either by the boot
        pool or the user pools.
        """
        disks = await self.query([('devname', 'nin', await self.get_reserved())])

        if join_partitions:
            for disk in disks:
                disk['partitions'] = await self.middleware.call('disk.list_partitions', disk['devname'])

        return disks

    @private
    async def get_reserved(self):
        reserved = list(await self.middleware.call('boot.get_disks'))
        reserved += [i async for i in await self.middleware.call('pool.get_disks')]
        if osc.IS_FREEBSD:
            # FIXME: Make this freebsd specific for now
            reserved += [i async for i in self.__get_iscsi_targets()]
        return reserved

    async def __get_iscsi_targets(self):
        iscsi_target_extent_paths = [
            extent["iscsi_target_extent_path"]
            for extent in await self.middleware.call('datastore.query', 'services.iscsitargetextent',
                                                     [('iscsi_target_extent_type', '=', 'Disk')])
        ]
        for disk in await self.middleware.call('datastore.query', 'storage.disk',
                                               [('disk_identifier', 'in', iscsi_target_extent_paths)]):
            yield disk["disk_name"]

    @private
    async def check_clean(self, disk):
        return not bool(await self.middleware.call('disk.list_partitions', disk))

    @private
    async def sed_unlock_all(self):
        advconfig = await self.middleware.call('system.advanced.config')
        disks = await self.middleware.call('disk.query', [], {'extra': {'passwords': True}})

        # If no SED password was found we can stop here
        if not await self.middleware.call('system.advanced.sed_global_password') and not any(
            [d['passwd'] for d in disks]
        ):
            return

        result = await asyncio_map(lambda disk: self.sed_unlock(disk['name'], disk, advconfig), disks, 16)
        locked = list(filter(lambda x: x['locked'] is True, result))
        if locked:
            disk_names = ', '.join([i['name'] for i in locked])
            self.logger.warn(f'Failed to unlock following SED disks: {disk_names}')
            raise CallError('Failed to unlock SED disks', errno.EACCES)
        return True

    @private
    async def sed_unlock(self, disk_name, disk=None, _advconfig=None):
        if _advconfig is None:
            _advconfig = await self.middleware.call('system.advanced.config')

        devname = await self.middleware.call('disk.sed_dev_name', disk_name)
        # We need two states to tell apart when disk was successfully unlocked
        locked = None
        unlocked = None
        password = await self.middleware.call('system.advanced.sed_global_password')

        if disk is None:
            disk = await self.query([('name', '=', disk_name)], {'extra': {'passwords': True}})
            if disk and disk[0]['passwd']:
                password = disk[0]['passwd']
        elif disk.get('passwd'):
            password = disk['passwd']

        rv = {'name': disk_name, 'locked': None}

        if not password:
            # If there is no password no point in continuing
            return rv

        # Try unlocking TCG OPAL using sedutil
        cp = await run('sedutil-cli', '--query', devname, check=False)
        if cp.returncode == 0:
            output = cp.stdout.decode(errors='ignore')
            if 'Locked = Y' in output:
                locked = True
                cp = await run('sedutil-cli', '--setLockingRange', '0', 'RW', password, devname, check=False)
                if cp.returncode == 0:
                    locked = False
                    unlocked = True
                    # If we were able to unlock it, let's set mbrenable to off
                    if osc.IS_LINUX:
                        cp = await run('sedutil-cli', '--setMBREnable', 'off', password, devname, check=False)
                        if cp.returncode:
                            self.logger.error(
                                'Failed to set MBREnable for %r to "off": %s', devname,
                                cp.stderr.decode(), exc_info=True
                            )

            elif 'Locked = N' in output:
                locked = False

        # Try ATA Security if SED was not unlocked and its not locked by OPAL
        if not unlocked and not locked:
            locked, unlocked = await self.middleware.call('disk.unlock_ata_security', devname, _advconfig, password)

        if osc.IS_FREEBSD and unlocked:
            try:
                # Disk needs to be retasted after unlock
                with open(f'/dev/{disk_name}', 'wb'):
                    pass
            except OSError:
                pass
        elif locked:
            self.logger.error(f'Failed to unlock {disk_name}')
        rv['locked'] = locked
        return rv

    @private
    async def sed_initial_setup(self, disk_name, password):
        """
        NO_SED - Does not support SED
        ACCESS_GRANTED - Already setup and `password` is a valid password
        LOCKING_DISABLED - Locking range is disabled
        SETUP_FAILED - Initial setup call failed
        SUCCESS - Setup successfully completed
        """
        devname = await self.middleware.call('disk.sed_dev_name', disk_name)

        cp = await run('sedutil-cli', '--isValidSED', devname, check=False)
        if b' SED ' not in cp.stdout:
            return 'NO_SED'

        cp = await run('sedutil-cli', '--listLockingRange', '0', password, devname, check=False)
        if cp.returncode == 0:
            output = cp.stdout.decode()
            if RE_SED_RDLOCK_EN.search(output) and RE_SED_WRLOCK_EN.search(output):
                return 'ACCESS_GRANTED'
            else:
                return 'LOCKING_DISABLED'

        try:
            await run('sedutil-cli', '--initialSetup', password, devname)
        except subprocess.CalledProcessError as e:
            self.logger.debug(f'initialSetup failed for {disk_name}:\n{e.stdout}{e.stderr}')
            return 'SETUP_FAILED'

        # OPAL 2.0 disks do not enable locking range on setup like Enterprise does
        try:
            await run('sedutil-cli', '--enableLockingRange', '0', password, devname)
        except subprocess.CalledProcessError as e:
            self.logger.debug(f'enableLockingRange failed for {disk_name}:\n{e.stdout}{e.stderr}')
            return 'SETUP_FAILED'

        return 'SUCCESS'

    def sed_dev_name(self, disk_name):
        if disk_name.startswith("nvd"):
            nvme = get_nsid(f"/dev/{disk_name}")
            return f"/dev/{nvme}"

        return f"/dev/{disk_name}"

    @private
    async def multipath_create(self, name, consumers, mode=None):
        """
        Create an Active/Passive GEOM_MULTIPATH provider
        with name ``name`` using ``consumers`` as the consumers for it

        Modes:
            A - Active/Active
            R - Active/Read
            None - Active/Passive

        Returns:
            True in case the label succeeded and False otherwise
        """
        cmd = ["/sbin/gmultipath", "label", name] + consumers
        if mode:
            cmd.insert(2, f'-{mode}')
        try:
            await run(cmd, stderr=subprocess.STDOUT, encoding="utf-8", errors="ignore")
        except subprocess.CalledProcessError as e:
            raise CallError(f"Error creating multipath: {e.stdout}")

    async def __multipath_next(self):
        """
        Find out the next available name for a multipath named diskX
        where X is a crescenting value starting from 1

        Returns:
            The string of the multipath name to be created
        """
        await self.middleware.run_in_thread(geom.scan)
        numbers = sorted([
            int(RE_MPATH_NAME.search(g.name).group(1))
            for g in geom.class_by_name('MULTIPATH').geoms if RE_MPATH_NAME.match(g.name)
        ])
        if not numbers:
            numbers = [0]
        for number in range(1, numbers[-1] + 2):
            if number not in numbers:
                break
        else:
            raise ValueError('Could not find multipaths')
        return f'disk{number}'

    @private
    @accepts()
    async def multipath_sync(self):
        """
        Synchronize multipath disks

        Every distinct GEOM_DISK that shares an ident (aka disk serial)
        with conjunction of the lunid is considered a multipath and will be
        handled by GEOM_MULTIPATH.

        If the disk is not currently in use by some Volume or iSCSI Disk Extent
        then a gmultipath is automatically created and will be available for use.
        """

        await self.middleware.run_in_thread(geom.scan)

        mp_disks = []
        for g in geom.class_by_name('MULTIPATH').geoms:
            for c in g.consumers:
                p_geom = c.provider.geom
                # For now just DISK is allowed
                if p_geom.clazz.name != 'DISK':
                    self.logger.warn(
                        "A consumer that is not a disk (%s) is part of a "
                        "MULTIPATH, currently unsupported by middleware",
                        p_geom.clazz.name
                    )
                    continue
                mp_disks.append(p_geom.name)

        reserved = await self.get_reserved()

        devlist = await camcontrol_list()
        is_freenas = await self.middleware.call('system.is_freenas')

        serials = defaultdict(list)
        active_active = []
        for g in geom.class_by_name('DISK').geoms:
            if not RE_DA.match(g.name) or g.name in reserved or g.name in mp_disks:
                continue
            if not is_freenas:
                descr = g.provider.config.get('descr') or ''
                if (
                    descr == 'STEC ZeusRAM' or
                    descr.startswith('VIOLIN') or
                    descr.startswith('3PAR')
                ):
                    active_active.append(g.name)
            if devlist.get(g.name, {}).get('driver') == 'umass-sim':
                continue
            serial = ''
            v = g.provider.config.get('ident')
            if v:
                # Exclude fake serial numbers e.g. `000000000000` reported by FreeBSD 12.2 USB stack
                if not v.replace('0', ''):
                    continue
                serial = v
            v = g.provider.config.get('lunid')
            if v:
                serial += v
            if not serial:
                continue
            size = g.provider.mediasize
            serials[(serial, size)].append(g.name)
            serials[(serial, size)].sort(key=lambda x: int(x[2:]))

        disks_pairs = [disks for disks in list(serials.values())]
        disks_pairs.sort(key=lambda x: int(x[0][2:]))

        # Mode is Active/Passive for FreeNAS
        mode = None if is_freenas else 'R'
        for disks in disks_pairs:
            if not len(disks) > 1:
                continue
            name = await self.__multipath_next()
            try:
                await self.multipath_create(name, disks, 'A' if disks[0] in active_active else mode)
            except CallError as e:
                self.logger.error("Error creating multipath: %s", e.errmsg)

        # Scan again to take new multipaths into account
        await self.middleware.run_in_thread(geom.scan)
        mp_ids = []
        for g in geom.class_by_name('MULTIPATH').geoms:
            _disks = []
            for c in g.consumers:
                p_geom = c.provider.geom
                # For now just DISK is allowed
                if p_geom.clazz.name != 'DISK':
                    continue
                _disks.append(p_geom.name)

            qs = await self.middleware.call('datastore.query', 'storage.disk', [
                ['OR', [
                    ['disk_name', 'in', _disks],
                    ['disk_multipath_member', 'in', _disks],
                ]],
                ['disk_expiretime', '=', None],
            ])
            if qs:
                diskobj = qs[0]
                mp_ids.append(diskobj['disk_identifier'])
                update = False  # Make sure to not update if nothing changed
                if diskobj['disk_multipath_name'] != g.name:
                    update = True
                    diskobj['disk_multipath_name'] = g.name
                if diskobj['disk_name'] in _disks:
                    _disks.remove(diskobj['disk_name'])
                if _disks and diskobj['disk_multipath_member'] != _disks[-1]:
                    update = True
                    diskobj['disk_multipath_member'] = _disks.pop()
                if update:
                    await self.middleware.call('datastore.update', 'storage.disk', diskobj['disk_identifier'], diskobj)

        # Update all disks which were not identified as MULTIPATH, resetting attributes
        for disk in (await self.middleware.call('datastore.query', 'storage.disk', [('disk_identifier', 'nin', mp_ids)])):
            if disk['disk_multipath_name'] or disk['disk_multipath_member']:
                disk['disk_multipath_name'] = ''
                disk['disk_multipath_member'] = ''
                await self.middleware.call('datastore.update', 'storage.disk', disk['disk_identifier'], disk)

    @private
    async def check_disks_availability(self, verrors, disks, schema):
        """
        Makes sure the disks are present in the system and not reserved
        by anything else (boot, pool, iscsi, etc).

        Returns:
            dict - disk.query for all disks
        """
        disks_cache = dict(map(
            lambda x: (x['devname'], x),
            await self.middleware.call(
                'disk.query', [('devname', 'in', disks)]
            )
        ))

        disks_set = set(disks)
        disks_not_in_cache = disks_set - set(disks_cache.keys())
        if disks_not_in_cache:
            verrors.add(
                f'{schema}.topology',
                f'The following disks were not found in system: {"," .join(disks_not_in_cache)}.'
            )

        disks_reserved = await self.middleware.call('disk.get_reserved')
        disks_reserved = disks_set - (disks_set - set(disks_reserved))
        if disks_reserved:
            verrors.add(
                f'{schema}.topology',
                f'The following disks are already in use: {"," .join(disks_reserved)}.'
            )
        return disks_cache

    @private
    async def configure_power_management(self):
        """
        This runs on boot to properly configure all power management options
        (Advanced Power Management, Automatic Acoustic Management and IDLE) for all disks.
        """
        # Do not run power management on ENTERPRISE
        if await self.middleware.call('system.product_type') == 'ENTERPRISE':
            return
        for disk in await self.middleware.call('disk.query'):
            await self.middleware.call('disk.power_management', disk['name'], disk)

    @private
    async def power_management(self, dev, disk=None):
        """
        Actually sets power management for `dev`.
        `disk` is the disk.query entry and optional so this can be called only with disk name.
        """

        if not disk:
            disk = await self.middleware.call('disk.query', [('name', '=', dev)])
            if not disk:
                return
            disk = disk[0]

        return await self.middleware.call('disk.power_management_impl', dev, disk)
Exemplo n.º 11
0
            else:
                output['error'] = result

        return {
            'disk': disk['disk'],
            'identifier': disk['identifier'],
            **output
        }

    @filterable
    @filterable_returns(Dict(
        'disk_smart_test_result',
        Str('disk', required=True),
        List('tests', items=[Dict(
            'test_result',
            Int('num', required=True),
            Str('description', required=True),
            Str('status', required=True),
            Str('status_verbose', required=True),
            Float('remaining', required=True),
            Int('lifetime', required=True),
            Str('lba_of_first_error', null=True, required=True),
        )])
    ))
    async def results(self, filters, options):
        """
        Get disk(s) S.M.A.R.T. test(s) results.

        .. examples(websocket)::

          Get all disks tests results
Exemplo n.º 12
0
class MailService(ConfigService):

    oauth_access_token = None
    oauth_access_token_expires_at = None

    class Config:
        datastore = 'system.email'
        datastore_prefix = 'em_'
        datastore_extend = 'mail.mail_extend'
        cli_namespace = 'system.mail'

    @private
    async def mail_extend(self, cfg):
        if cfg['security']:
            cfg['security'] = cfg['security'].upper()
        return cfg

    @accepts(
        Dict(
            'mail_update',
            Str('fromemail', validators=[Email()]),
            Str('fromname'),
            Str('outgoingserver'),
            Int('port'),
            Str('security', enum=['PLAIN', 'SSL', 'TLS']),
            Bool('smtp'),
            Str('user'),
            Str('pass', private=True),
            Dict('oauth',
                 Str('client_id', required=True),
                 Str('client_secret', required=True),
                 Str('refresh_token', required=True),
                 null=True,
                 private=True),
            register=True,
            update=True,
        ))
    async def do_update(self, data):
        """
        Update Mail Service Configuration.

        `fromemail` is used as a sending address which the mail server will use for sending emails.

        `outgoingserver` is the hostname or IP address of SMTP server used for sending an email.

        `security` is type of encryption desired.

        `smtp` is a boolean value which when set indicates that SMTP authentication has been enabled and `user`/`pass`
        are required attributes now.
        """
        config = await self.config()

        new = config.copy()
        new.update(data)
        new['security'] = new['security'].lower()  # Django Model compatibility

        verrors = ValidationErrors()

        if new['smtp'] and new['user'] == '':
            verrors.add(
                'mail_update.user',
                'This field is required when SMTP authentication is enabled',
            )

        self.__password_verify(new['pass'], 'mail_update.pass', verrors)

        if verrors:
            raise verrors

        await self.middleware.call('datastore.update', 'system.email',
                                   config['id'], new, {'prefix': 'em_'})

        await self.middleware.call('mail.gmail_initialize')

        return await self.config()

    def __password_verify(self, password, schema, verrors=None):
        if not password:
            return
        if verrors is None:
            verrors = ValidationErrors()
        # FIXME: smtplib does not support non-ascii password yet
        # https://github.com/python/cpython/pull/8938
        try:
            password.encode('ascii')
        except UnicodeEncodeError:
            verrors.add(
                schema,
                'Only plain text characters (7-bit ASCII) are allowed in passwords. '
                'UTF or composed characters are not allowed.')
        return verrors

    @accepts(
        Dict(
            'mail_message',
            Str('subject', required=True),
            Str('text', max_length=None),
            Str('html', null=True, max_length=None),
            List('to', items=[Str('email')]),
            List('cc', items=[Str('email')]),
            Int('interval', null=True),
            Str('channel', null=True),
            Int('timeout', default=300),
            Bool('attachments', default=False),
            Bool('queue', default=True),
            Dict('extra_headers', additional_attrs=True),
            register=True,
        ), Ref('mail_update'))
    @job(pipes=['input'], check_pipes=False)
    def send(self, job, message, config):
        """
        Sends mail using configured mail settings.

        `text` will be formatted to HTML using Markdown and rendered using default E-Mail template.
        You can put your own HTML using `html`. If `html` is null, no HTML MIME part will be added to E-Mail.

        If `attachments` is true, a list compromised of the following dict is required
        via HTTP upload:
          - headers(list)
            - name(str)
            - value(str)
            - params(dict)
          - content (str)

        [
         {
          "headers": [
           {
            "name": "Content-Transfer-Encoding",
            "value": "base64"
           },
           {
            "name": "Content-Type",
            "value": "application/octet-stream",
            "params": {
             "name": "test.txt"
            }
           }
          ],
          "content": "dGVzdAo="
         }
        ]
        """

        product_name = self.middleware.call_sync('system.product_name')

        gc = self.middleware.call_sync('datastore.config',
                                       'network.globalconfiguration')

        hostname = f'{gc["gc_hostname"]}.{gc["gc_domain"]}'

        message['subject'] = f'{product_name} {hostname}: {message["subject"]}'

        add_html = True
        if 'html' in message and message['html'] is None:
            message.pop('html')
            add_html = False

        if 'text' not in message:
            if 'html' not in message:
                verrors = ValidationErrors()
                verrors.add('mail_message.text',
                            'Text is required when HTML is not set')
                verrors.check()

            message['text'] = html2text.html2text(message['html'])

        if add_html and 'html' not in message:
            lookup = TemplateLookup(directories=[
                os.path.join(os.path.dirname(os.path.realpath(__file__)),
                             '../assets/templates')
            ],
                                    module_directory="/tmp/mako/templates")

            tmpl = lookup.get_template('mail.html')

            message['html'] = tmpl.render(
                body=html.escape(message['text']).replace('\n', '<br>\n'))

        return self.send_raw(job, message, config)

    @accepts(Ref('mail_message'), Ref('mail_update'))
    @job(pipes=['input'], check_pipes=False)
    @private
    def send_raw(self, job, message, config):
        config = dict(self.middleware.call_sync('mail.config'), **config)

        if config['fromname']:
            from_addr = Header(config['fromname'], 'utf-8')
            try:
                config['fromemail'].encode('ascii')
            except UnicodeEncodeError:
                from_addr.append(f'<{config["fromemail"]}>', 'utf-8')
            else:
                from_addr.append(f'<{config["fromemail"]}>', 'ascii')
        else:
            try:
                config['fromemail'].encode('ascii')
            except UnicodeEncodeError:
                from_addr = Header(config['fromemail'], 'utf-8')
            else:
                from_addr = Header(config['fromemail'], 'ascii')

        interval = message.get('interval')
        if interval is None:
            interval = timedelta()
        else:
            interval = timedelta(seconds=interval)

        sw_name = self.middleware.call_sync('system.info')['version'].split(
            '-', 1)[0]

        channel = message.get('channel')
        if not channel:
            channel = sw_name.lower()
        if interval > timedelta():
            channelfile = '/tmp/.msg.%s' % (channel)
            last_update = datetime.now() - interval
            try:
                last_update = datetime.fromtimestamp(
                    os.stat(channelfile).st_mtime)
            except OSError:
                pass
            timediff = datetime.now() - last_update
            if (timediff >= interval) or (timediff < timedelta()):
                # Make sure mtime is modified
                # We could use os.utime but this is simpler!
                with open(channelfile, 'w') as f:
                    f.write('!')
            else:
                raise CallError(
                    'This message was already sent in the given interval')

        verrors = self.__password_verify(config['pass'], 'mail-config.pass')
        if verrors:
            raise verrors
        to = message.get('to')
        if not to:
            to = [
                self.middleware.call_sync('user.query',
                                          [('username', '=', 'root')],
                                          {'get': True})['email']
            ]
            if not to[0]:
                raise CallError('Email address for root is not configured')

        if message.get('attachments'):
            job.check_pipe("input")

            def read_json():
                f = job.pipes.input.r
                data = b''
                i = 0
                while True:
                    read = f.read(1048576)  # 1MiB
                    if read == b'':
                        break
                    data += read
                    i += 1
                    if i > 50:
                        raise ValueError(
                            'Attachments bigger than 50MB not allowed yet')
                if data == b'':
                    return None
                return json.loads(data)

            attachments = read_json()
        else:
            attachments = None

        if 'html' in message or attachments:
            msg = MIMEMultipart()
            msg.preamble = 'This is a multi-part message in MIME format.'
            if 'html' in message:
                msg2 = MIMEMultipart('alternative')
                msg2.attach(
                    MIMEText(message['text'], 'plain', _charset='utf-8'))
                msg2.attach(MIMEText(message['html'], 'html',
                                     _charset='utf-8'))
                msg.attach(msg2)
            if attachments:
                for attachment in attachments:
                    m = Message()
                    m.set_payload(attachment['content'])
                    for header in attachment.get('headers'):
                        m.add_header(header['name'], header['value'],
                                     **(header.get('params') or {}))
                    msg.attach(m)
        else:
            msg = MIMEText(message['text'], _charset='utf-8')

        msg['Subject'] = message['subject']

        msg['From'] = from_addr
        msg['To'] = ', '.join(to)
        if message.get('cc'):
            msg['Cc'] = ', '.join(message.get('cc'))
        msg['Date'] = formatdate()

        local_hostname = socket.gethostname()

        msg['Message-ID'] = "<%s-%s.%s@%s>" % (
            sw_name.lower(), datetime.utcnow().strftime("%Y%m%d.%H%M%S.%f"),
            base64.urlsafe_b64encode(os.urandom(3)), local_hostname)

        extra_headers = message.get('extra_headers') or {}
        for key, val in list(extra_headers.items()):
            # We already have "Content-Type: multipart/mixed" and setting "Content-Type: text/plain" like some scripts
            # do will break python e-mail module.
            if key.lower() == "content-type":
                continue

            if key in msg:
                msg.replace_header(key, val)
            else:
                msg[key] = val

        syslog.openlog(logoption=syslog.LOG_PID, facility=syslog.LOG_MAIL)
        try:
            if config['oauth']:
                self.middleware.call_sync('mail.gmail_send', msg, config)
            else:
                server = self._get_smtp_server(config,
                                               message['timeout'],
                                               local_hostname=local_hostname)
                # NOTE: Don't do this.
                #
                # If smtplib.SMTP* tells you to run connect() first, it's because the
                # mailserver it tried connecting to via the outgoing server argument
                # was unreachable and it tried to connect to 'localhost' and barfed.
                # This is because FreeNAS doesn't run a full MTA.
                # else:
                #    server.connect()
                headers = '\n'.join([f'{k}: {v}' for k, v in msg._headers])
                syslog.syslog(f"sending mail to {', '.join(to)}\n{headers}")
                server.sendmail(from_addr.encode(), to, msg.as_string())
                server.quit()
        except Exception as e:
            # Don't spam syslog with these messages. They should only end up in the
            # test-email pane.
            # We are only interested in ValueError, not subclasses.
            if e.__class__ is ValueError:
                raise CallError(str(e))
            syslog.syslog(f'Failed to send email to {", ".join(to)}: {str(e)}')
            if isinstance(e, smtplib.SMTPAuthenticationError):
                raise CallError(
                    f'Authentication error ({e.smtp_code}): {e.smtp_error}',
                    errno.EAUTH if osc.IS_FREEBSD else errno.EPERM)
            self.logger.warn('Failed to send email: %s', str(e), exc_info=True)
            if message['queue']:
                with MailQueue() as mq:
                    mq.append(msg)
            raise CallError(f'Failed to send email: {e}')
        return True

    def _get_smtp_server(self, config, timeout=300, local_hostname=None):
        self.middleware.call_sync('network.general.will_perform_activity',
                                  'mail')

        if local_hostname is None:
            local_hostname = socket.gethostname()

        if not config['outgoingserver'] or not config['port']:
            # See NOTE below.
            raise ValueError('you must provide an outgoing mailserver and mail'
                             ' server port when sending mail')
        if config['security'] == 'SSL':
            server = smtplib.SMTP_SSL(config['outgoingserver'],
                                      config['port'],
                                      timeout=timeout,
                                      local_hostname=local_hostname)
        else:
            server = smtplib.SMTP(config['outgoingserver'],
                                  config['port'],
                                  timeout=timeout,
                                  local_hostname=local_hostname)
            if config['security'] == 'TLS':
                server.starttls()
        if config['smtp']:
            server.login(config['user'], config['pass'])

        return server

    @periodic(600, run_on_start=False)
    @private
    def send_mail_queue(self):
        with MailQueue() as mq:
            for queue in list(mq.queue):
                try:
                    config = self.middleware.call_sync('mail.config')
                    if config['oauth']:
                        self.middleware.call_sync('mail.gmail_send',
                                                  queue.message, config)
                    else:
                        server = self._get_smtp_server(config)
                        server.sendmail(queue.message['From'].encode(),
                                        queue.message['To'].split(', '),
                                        queue.message.as_string())
                        server.quit()
                except Exception:
                    self.logger.debug('Sending message from queue failed',
                                      exc_info=True)
                    queue.attempts += 1
                    if queue.attempts >= mq.MAX_ATTEMPTS:
                        mq.queue.remove(queue)
                else:
                    mq.queue.remove(queue)
Exemplo n.º 13
0
class GroupService(CRUDService):
    class Config:
        datastore = 'account.bsdgroups'
        datastore_prefix = 'bsdgrp_'
        datastore_extend = 'group.group_extend'

    @private
    async def group_extend(self, group):
        # Get group membership
        group['users'] = [
            gm['user']['id'] for gm in await self.middleware.call(
                'datastore.query', 'account.bsdgroupmembership', [(
                    'group', '=', group['id'])], {'prefix': 'bsdgrpmember_'})
        ]
        group['users'] += [
            gmu['id'] for gmu in await self.middleware.call(
                'datastore.query', 'account.bsdusers', [('bsdusr_group_id',
                                                         '=', group['id'])])
        ]
        return group

    @accepts(
        Dict(
            'group_create',
            Int('gid'),
            Str('name', required=True),
            Bool('sudo', default=False),
            Bool('allow_duplicate_gid', default=False),
            List('users', items=[Int('id')], required=False),
            register=True,
        ))
    async def do_create(self, data):

        verrors = ValidationErrors()
        await self.__common_validation(verrors, data)
        if verrors:
            raise verrors

        if not data.get('gid'):
            data['gid'] = await self.get_next_gid()

        group = data.copy()
        group['group'] = group.pop('name')

        users = group.pop('users', [])

        pk = await self.middleware.call('datastore.insert',
                                        'account.bsdgroups', group,
                                        {'prefix': 'bsdgrp_'})

        for user in users:
            await self.middleware.call('datastore.insert',
                                       'account.bsdgroupmembership', {
                                           'bsdgrpmember_group': pk,
                                           'bsdgrpmember_user': user
                                       })

        await self.middleware.call('notifier.groupmap_add', data['name'],
                                   data['name'])

        await self.middleware.call('service.reload', 'user')

        return pk

    @accepts(
        Int('id'),
        Patch(
            'group_create',
            'group_update',
            ('attr', {
                'update': True
            }),
        ),
    )
    async def do_update(self, pk, data):

        group = await self._get_instance(pk)

        verrors = ValidationErrors()
        await self.__common_validation(verrors, data, pk=pk)
        if verrors:
            raise verrors

        group.update(data)
        delete_groupmap = False
        group.pop('users', None)

        if 'name' in data and data['name'] != group['group']:
            delete_groupmap = group['group']
            group['group'] = group.pop('name')

        await self.middleware.call('datastore.update', 'account.bsdgroups', pk,
                                   group, {'prefix': 'bsdgrp_'})

        if 'users' in data:
            existing = {
                i['bsdgrpmember_user']['id']: i
                for i in await self.middleware.call(
                    'datastore.query', 'account.bsdgroupmembership', [(
                        'bsdgrpmember_group', '=', pk)])
            }
            to_remove = set(existing.keys()) - set(data['users'])
            for i in to_remove:
                await self.middleware.call('datastore.delete',
                                           'account.bsdgroupmembership',
                                           existing[i]['id'])

            to_add = set(data['users']) - set(existing.keys())
            for i in to_add:
                await self.middleware.call('datastore.insert',
                                           'account.bsdgroupmembership', {
                                               'bsdgrpmember_group': pk,
                                               'bsdgrpmember_user': i
                                           })

        if delete_groupmap:
            await self.middleware.call('notifier.groupmap_delete',
                                       delete_groupmap)

        await self.middleware.call('notifier.groupmap_add', group['group'],
                                   group['group'])

        await self.middleware.call('service.reload', 'user')

        return pk

    @accepts(Int('id'), Dict('options', Bool('delete_users', default=False)))
    async def do_delete(self, pk, options=None):

        group = await self._get_instance(pk)

        if group['builtin']:
            raise CallError('A built-in group cannot be deleted.',
                            errno.EACCES)

        if options['delete_users']:
            for i in await self.middleware.call('datastore.query',
                                                'account.bsdusers',
                                                [('group', '=', group['id'])],
                                                {'prefix': 'bsdusr_'}):
                await self.middleware.call('datastore.delete',
                                           'account.bsdusers', i['id'])

        if await self.middleware.call('notifier.common', 'system',
                                      'domaincontroller_enabled'):
            await self.middleware.call('notifier.samba4', 'group_delete',
                                       [group['group']])

        await self.middleware.call('datastore.delete', 'account.bsdgroups', pk)

        await self.middleware.call('service.reload', 'user')

        return pk

    async def get_next_gid(self):
        """
        Get the next available/free gid.
        """
        last_gid = 999
        for i in await self.middleware.call('datastore.query',
                                            'account.bsdgroups',
                                            [('builtin', '=', False)], {
                                                'order_by': ['gid'],
                                                'prefix': 'bsdgrp_'
                                            }):
            # If the difference between the last gid and the current one is
            # bigger than 1, it means we have a gap and can use it.
            if i['gid'] - last_gid > 1:
                return last_gid + 1
            last_gid = i['gid']
        return last_gid + 1

    async def __common_validation(self, verrors, data, pk=None):

        exclude_filter = [('id', '!=', pk)] if pk else []

        if 'name' in data:
            existing = await self.middleware.call(
                'datastore.query', 'account.bsdgroups',
                [('group', '=', data['name'])] + exclude_filter,
                {'prefix': 'bsdgrp_'})
            if existing:
                verrors.add(
                    'name',
                    f'A Group with the name "{data["name"]}" already exists.',
                    errno.EEXIST)

            pw_checkname(verrors, 'name', data['name'])

        allow_duplicate_gid = data.pop('allow_duplicate_gid', False)
        if data.get('gid') and not allow_duplicate_gid:
            existing = await self.middleware.call(
                'datastore.query', 'account.bsdgroups',
                [('gid', '=', data['gid'])] + exclude_filter,
                {'prefix': 'bsdgrp_'})
            if existing:
                verrors.add('gid',
                            f'The Group ID "{data["gid"]}" already exists.',
                            errno.EEXIST)

        if 'users' in data:
            existing = set([
                i['id'] for i in await self.middleware.call(
                    'datastore.query', 'account.bsdusers', [('id', 'in',
                                                             data['users'])])
            ])
            notfound = set(data['users']) - existing
            if notfound:
                verrors.add(
                    'users',
                    f'Following users do not exist: {", ".join(map(str, notfound))}'
                )
Exemplo n.º 14
0
class UserService(CRUDService):
    class Config:
        datastore = 'account.bsdusers'
        datastore_extend = 'user.user_extend'
        datastore_prefix = 'bsdusr_'

    @private
    async def user_extend(self, user):

        # Get group membership
        user['groups'] = [
            gm['group']['id'] for gm in await self.middleware.call(
                'datastore.query', 'account.bsdgroupmembership', [(
                    'user', '=', user['id'])], {'prefix': 'bsdgrpmember_'})
        ]

        # Get authorized keys
        keysfile = f'{user["home"]}/.ssh/authorized_keys'
        user['sshpubkey'] = None
        if os.path.exists(keysfile):
            try:
                with open(keysfile, 'r') as f:
                    user['sshpubkey'] = f.read()
            except Exception:
                pass
        return user

    @accepts(
        Dict(
            'user_create',
            Int('uid'),
            Str('username', required=True),
            Int('group'),
            Bool('group_create', default=False),
            Str('home', default='/nonexistent'),
            Str('home_mode', default='755'),
            Str('shell', default='/bin/csh'),
            Str('full_name', required=True),
            Str('email'),
            Str('password'),
            Bool('password_disabled', default=False),
            Bool('locked', default=False),
            Bool('microsoft_account', default=False),
            Bool('sudo', default=False),
            Str('sshpubkey'),
            List('groups'),
            Dict('attributes', additional_attrs=True),
            register=True,
        ))
    async def do_create(self, data):

        verrors = ValidationErrors()

        if (not data.get('group') and not data.get('group_create')) or (
                data.get('group') is not None and data.get('group_create')):
            verrors.add(
                'group', f'Enter either a group name or create a new group to '
                'continue.', errno.EINVAL)

        await self.__common_validation(verrors, data)

        if data.get('sshpubkey') and not data['home'].startswith('/mnt'):
            verrors.add(
                'sshpubkey',
                'The home directory is not writable. Leave this field blank.')

        if verrors:
            raise verrors

        groups = data.pop('groups') or []
        create = data.pop('group_create')

        if create:
            group = await self.middleware.call(
                'group.query', [('group', '=', data['username'])])
            if group:
                group = group[0]
            else:
                group = await self.middleware.call('group.create',
                                                   {'name': data['username']})
                group = (await self.middleware.call('group.query',
                                                    [('id', '=', group)]))[0]
            data['group'] = group['id']
        else:
            group = await self.middleware.call('group.query',
                                               [('id', '=', data['group'])])
            if not group:
                raise CallError(f'Group {data["group"]} not found')
            group = group[0]

        # Is this a new directory or not? Let's not nuke existing directories,
        # e.g. /, /root, /mnt/tank/my-dataset, etc ;).
        new_homedir = False
        home_mode = data.pop('home_mode')
        if data['home'] and data['home'] != '/nonexistent':
            try:
                os.makedirs(data['home'], mode=int(home_mode, 8))
                os.chown(data['home'], data['uid'], group['gid'])
            except FileExistsError:
                if not os.path.isdir(data['home']):
                    raise CallError(
                        'Path for home directory already '
                        'exists and is not a directory', errno.EEXIST)

                # If it exists, ensure the user is owner
                os.chown(data['home'], data['uid'], group['gid'])
            except OSError as oe:
                raise CallError('Failed to create the home directory '
                                f'({data["home"]}) for user: {oe}')
            else:
                new_homedir = True
            if os.stat(data['home']).st_dev == os.stat('/mnt').st_dev:
                raise CallError(
                    f'The path for the home directory "({data["home"]})" '
                    'must include a volume or dataset.')

        if not data.get('uid'):
            data['uid'] = await self.get_next_uid()

        pk = None  # Make sure pk exists to rollback in case of an error
        try:
            password = await self.__set_password(data)
            sshpubkey = data.pop('sshpubkey',
                                 None)  # datastore does not have sshpubkey

            pk = await self.middleware.call('datastore.insert',
                                            'account.bsdusers', data,
                                            {'prefix': 'bsdusr_'})

            await self.__set_groups(pk, groups)

        except Exception:
            if pk is not None:
                await self.middleware.call('datastore.delete',
                                           'account.bsdusers', pk)
            if new_homedir:
                # Be as atomic as possible when creating the user if
                # commands failed to execute cleanly.
                shutil.rmtree(data['home'])
            raise

        await self.middleware.call('service.reload', 'user')

        await self.__set_smbpasswd(data['username'], password)

        if os.path.exists(data['home']):
            for f in os.listdir(SKEL_PATH):
                if f.startswith('dot'):
                    dest_file = os.path.join(data['home'], f[3:])
                else:
                    dest_file = os.path.join(data['home'], f)
                if not os.path.exists(dest_file):
                    shutil.copyfile(os.path.join(SKEL_PATH, f), dest_file)
                    os.chown(dest_file, data['uid'], group['gid'])

            data['sshpubkey'] = sshpubkey
            try:
                await self.__update_sshpubkey(data['home'], data,
                                              group['group'])
            except PermissionError as e:
                self.logger.warn('Failed to update authorized keys',
                                 exc_info=True)
                raise CallError(f'Failed to update authorized keys: {e}')

        return pk

    @accepts(
        Int('id'),
        Patch(
            'user_create',
            'user_update',
            ('attr', {
                'update': True
            }),
            ('rm', {
                'name': 'group_create'
            }),
        ),
    )
    async def do_update(self, pk, data):

        user = await self._get_instance(pk)

        verrors = ValidationErrors()

        if 'group' in data:
            group = await self.middleware.call('datastore.query',
                                               'account.bsdgroups',
                                               [('id', '=', data['group'])])
            if not group:
                verrors.add('group', f'Group {data["group"]} not found',
                            errno.ENOENT)
            group = group[0]
        else:
            group = user['group']
            user['group'] = group['id']

        await self.__common_validation(verrors, data, pk=pk)

        home = data.get('home') or user['home']
        # root user (uid 0) is an exception to the rule
        if data.get('sshpubkey'
                    ) and not home.startswith('/mnt') and user['uid'] != 0:
            verrors.add('sshpubkey',
                        'Home directory is not writable, leave this blank"')

        # Do not allow attributes to be changed for builtin user
        if user['builtin']:
            for i in ('group', 'home', 'home_mode', 'uid', 'username'):
                if i in data:
                    verrors.add(i, 'This attribute cannot be changed')

        if verrors:
            raise verrors

        # Copy the home directory if it changed
        if ('home' in data
                and data['home'] not in (user['home'], '/nonexistent')
                and not data['home'].startswith(f'{user["home"]}/')):
            home_copy = True
            home_old = user['home']
        else:
            home_copy = False

        # After this point user dict has values from data
        user.update(data)

        if home_copy and not os.path.isdir(user['home']):
            try:
                os.makedirs(user['home'])
                os.chown(user['home'], user['uid'], group['bsdgrp_gid'])
            except OSError:
                self.logger.warn('Failed to chown homedir', exc_info=True)
            if not os.path.isdir(user['home']):
                raise CallError(f'{user["home"]} is not a directory')

        home_mode = user.pop('home_mode', None)
        if user['builtin']:
            home_mode = None

        def set_home_mode():
            if home_mode is not None:
                try:
                    os.chmod(user['home'], int(home_mode, 8))
                except OSError:
                    self.logger.warn('Failed to set homedir mode',
                                     exc_info=True)

        try:
            await self.__update_sshpubkey(
                home_old if home_copy else user['home'],
                user,
                group['bsdgrp_group'],
            )
        except PermissionError as e:
            self.logger.warn('Failed to update authorized keys', exc_info=True)
            raise CallError(f'Failed to update authorized keys: {e}')

        if home_copy:

            def do_home_copy():
                try:
                    subprocess.run(
                        f"/usr/bin/su - {user['username']} -c '/bin/cp -a {home_old}/ {user['home']}/'",
                        shell=True,
                        check=True)
                except subprocess.CalledProcessError as e:
                    self.logger.warn(f"Failed to copy homedir: {e}")
                set_home_mode()

            asyncio.ensure_future(
                self.middleware.run_in_io_thread(do_home_copy))
        else:
            set_home_mode()

        user.pop('sshpubkey', None)
        password = await self.__set_password(user)

        if 'groups' in user:
            groups = user.pop('groups')
            await self.__set_groups(pk, groups)

        await self.middleware.call('datastore.update', 'account.bsdusers', pk,
                                   user, {'prefix': 'bsdusr_'})

        await self.middleware.call('service.reload', 'user')

        await self.__set_smbpasswd(user['username'], password)

        return pk

    @accepts(Int('id'), Dict('options', Bool('delete_group', default=True)))
    async def do_delete(self, pk, options=None):

        user = await self._get_instance(pk)

        if user['builtin']:
            raise CallError('Cannot delete a built-in user', errno.EINVAL)

        if options['delete_group'] and not user['group']['bsdgrp_builtin']:
            count = await self.middleware.call(
                'datastore.query', 'account.bsdgroupmembership',
                [('group', '=', user['group']['id'])], {
                    'prefix': 'bsdgrpmember_',
                    'count': True
                })
            count2 = await self.middleware.call(
                'datastore.query', 'account.bsdusers',
                [('group', '=', user['group']['id']), ('id', '!=', pk)], {
                    'prefix': 'bsdusr_',
                    'count': True
                })
            if count == 0 and count2 == 0:
                try:
                    await self.middleware.call('group.delete',
                                               user['group']['id'])
                except Exception:
                    self.logger.warn(
                        f'Failed to delete primary group of {user["username"]}',
                        exc_info=True)

        await run('smbpasswd', '-x', user['username'], check=False)

        if await self.middleware.call('notifier.common', 'system',
                                      'domaincontroller_enabled'):
            await self.middleware.call('notifier.samba4', 'user_delete',
                                       [user['username']])

        # TODO: add a hook in CIFS service
        cifs = await self.middleware.call('datastore.query', 'services.cifs',
                                          [], {'prefix': 'cifs_srv_'})
        if cifs:
            cifs = cifs[0]
            if cifs['guest'] == user['username']:
                await self.middleware.call('datastore.update', 'services.cifs',
                                           cifs['id'], {'guest': 'nobody'},
                                           {'prefix': 'cifs_srv_'})

        await self.middleware.call('datastore.delete', 'account.bsdusers', pk)
        await self.middleware.call('service.reload', 'user')

        return pk

    @item_method
    @accepts(
        Int('id'),
        Str('key'),
        Any('value'),
    )
    async def set_attribute(self, pk, key, value):
        """
        Set user general purpose `attributes` dictionary `key` to `value`.

        e.g. Setting key="foo" value="var" will result in {"attributes": {"foo": "bar"}}
        """
        user = await self._get_instance(pk)
        user.pop('group')

        user['attributes'][key] = value
        await self.middleware.call('datastore.update', 'account.bsdusers', pk,
                                   user, {'prefix': 'bsdusr_'})

        return True

    @item_method
    @accepts(
        Int('id'),
        Str('key'),
    )
    async def pop_attribute(self, pk, key):
        """
        Remove user general purpose `attributes` dictionary `key`.
        """
        user = await self._get_instance(pk)
        user.pop('group')

        if key in user['attributes']:
            user['attributes'].pop(key)
            await self.middleware.call('datastore.update', 'account.bsdusers',
                                       pk, user, {'prefix': 'bsdusr_'})
            return True
        else:
            return False

    @accepts()
    async def get_next_uid(self):
        """
        Get the next available/free uid.
        """
        last_uid = 999
        for i in await self.middleware.call('datastore.query',
                                            'account.bsdusers',
                                            [('builtin', '=', False)], {
                                                'order_by': ['uid'],
                                                'prefix': 'bsdusr_'
                                            }):
            # If the difference between the last uid and the current one is
            # bigger than 1, it means we have a gap and can use it.
            if i['uid'] - last_uid > 1:
                return last_uid + 1
            last_uid = i['uid']
        return last_uid + 1

    async def __common_validation(self, verrors, data, pk=None):

        exclude_filter = [('id', '!=', pk)] if pk else []

        if 'username' in data:
            pw_checkname(verrors, 'username', data['username'])

            if await self.middleware.call(
                    'datastore.query', 'account.bsdusers',
                [('username', '=', data['username'])] + exclude_filter,
                {'prefix': 'bsdusr_'}):
                verrors.add(
                    'username',
                    f'The username "{data["username"]}" already exists.',
                    errno.EEXIST)

        password = data.get('password')
        if password and '?' in password:
            # See bug #4098
            verrors.add(
                'password',
                'An SMB issue prevents creating passwords containing a '
                'question mark (?).', errno.EINVAL)
        elif not pk and not password and not data.get('password_disabled'):
            verrors.add('password', 'Password is required')
        elif data.get('password_disabled') and password:
            verrors.add(
                'password_disabled',
                'Leave "Password" blank when "Disable password login" '
                'is checked.')

        if 'home' in data:
            if ':' in data['home']:
                verrors.add('home',
                            '"Home Directory" cannot contain colons (:).')
            if not data['home'].startswith(
                    '/mnt/') and data['home'] != '/nonexistent':
                verrors.add(
                    'home', '"Home Directory" must begin with /mnt/ or set to '
                    '/nonexistent.')

        if 'groups' in data:
            groups = data.get('groups') or []
            if groups and len(groups) > 64:
                verrors.add(
                    'groups',
                    'A user cannot belong to more than 64 auxiliary groups.')

        if 'full_name' in data and ':' in data['full_name']:
            verrors.add('full_name',
                        'The ":" character is not allowed in a "Full Name".')

    async def __set_password(self, data):
        if 'password' not in data:
            return
        password = data.pop('password')
        if password:
            data['unixhash'] = crypted_password(password)
            # See http://samba.org.ru/samba/docs/man/manpages/smbpasswd.5.html
            data[
                'smbhash'] = f'{data["username"]}:{data["uid"]}:{"X" * 32}:{nt_password(password)}:[U          ]:LCT-{int(time.time()):X}:'
        else:
            data['unixhash'] = '*'
            data['smbhash'] = '*'
        return password

    async def __set_smbpasswd(self, username, password):
        """
        Currently the way we set samba passwords is using smbpasswd
        and that can only happen after the user exists in master.passwd.
        That is the reason we have two methods/steps to set password.
        """
        if not password:
            return
        proc = await Popen(['smbpasswd', '-D', '0', '-s', '-a', username],
                           stdout=subprocess.PIPE,
                           stderr=subprocess.PIPE,
                           stdin=subprocess.PIPE)
        await proc.communicate(input=f'{password}\n{password}\n'.encode())

    async def __set_groups(self, pk, groups):

        groups = set(groups)
        existing_ids = set()
        for gm in await self.middleware.call('datastore.query',
                                             'account.bsdgroupmembership',
                                             [('user', '=', pk)],
                                             {'prefix': 'bsdgrpmember_'}):
            if gm['id'] not in groups:
                await self.middleware.call('datastore.delete',
                                           'account.bsdgroupmembership',
                                           gm['id'])
            else:
                existing_ids.add(gm['id'])

        for _id in groups - existing_ids:
            group = await self.middleware.call('datastore.query',
                                               'account.bsdgroups',
                                               [('id', '=', _id)],
                                               {'prefix': 'bsdgrp_'})
            if not group:
                raise CallError(f'Group {_id} not found', errno.ENOENT)
            await self.middleware.call('datastore.insert',
                                       'account.bsdgroupmembership', {
                                           'group': _id,
                                           'user': pk
                                       }, {'prefix': 'bsdgrpmember_'})

    async def __update_sshpubkey(self, homedir, user, group):
        if 'sshpubkey' not in user:
            return
        if not os.path.isdir(homedir):
            return

        sshpath = f'hotexamples_com/.ssh'
        keysfile = f'{sshpath}/authorized_keys'

        pubkey = user.get('sshpubkey') or ''
        pubkey = pubkey.strip()
        if pubkey == '':
            try:
                os.unlink(keysfile)
            except OSError:
                pass
            return

        oldpubkey = ''
        try:
            with open(keysfile, 'r') as f:
                oldpubkey = f.read().strip()
        except Exception:
            pass

        if pubkey == oldpubkey:
            return

        if not os.path.isdir(sshpath):
            os.mkdir(sshpath, mode=0o700)
        if not os.path.isdir(sshpath):
            raise CallError(f'{sshpath} is not a directory')
        with open(keysfile, 'w') as f:
            f.write(pubkey)
            f.write('\n')
        os.chmod(keysfile, 0o600)
        await run('/usr/sbin/chown',
                  '-R',
                  f'{user["username"]}:{group}',
                  sshpath,
                  check=False)
Exemplo n.º 15
0
class PoolService(CRUDService):

    GELI_KEYPATH = '/data/geli'

    class Config:
        datastore = 'storage.volume'
        datastore_extend = 'pool.pool_extend'
        datastore_prefix = 'vol_'

    @accepts()
    async def filesystem_choices(self):
        vol_names = [vol['name'] for vol in (await self.query())]
        return [
            y['name']
            for y in await self.middleware.call('zfs.dataset.query', [(
                'name', 'rnin', '.system'), ('pool', 'in', vol_names)])
        ]

    def _topology(self, x, geom_scan=True):
        """
        Transform topology output from libzfs to add `device` and make `type` uppercase.
        """
        if isinstance(x, dict):
            path = x.get('path')
            if path is not None:
                device = None
                if path.startswith('/dev/'):
                    device = self.middleware.call_sync('disk.label_to_dev',
                                                       path[5:], geom_scan)
                x['device'] = device
            for key in x:
                if key == 'type' and isinstance(x[key], str):
                    x[key] = x[key].upper()
                else:
                    x[key] = self._topology(x[key], False)
        elif isinstance(x, list):
            for i, entry in enumerate(x):
                x[i] = self._topology(x[i], False)
        return x

    @private
    def pool_extend(self, pool):
        """
        If pool is encrypted we need to check if the pool is imported
        or if all geli providers exist.
        """
        try:
            zpool = self.middleware.call_sync('zfs.pool.query',
                                              [('id', '=', pool['name'])])[0]
        except Exception:
            zpool = None

        if zpool:
            pool['status'] = zpool['status']
            pool['scan'] = zpool['scan']
            pool['topology'] = self._topology(zpool['groups'])
        else:
            pool.update({
                'status': 'OFFLINE',
                'scan': None,
                'topology': None,
            })

        if pool['encrypt'] > 0:
            if zpool:
                pool['is_decrypted'] = True
            else:
                decrypted = True
                for ed in self.middleware.call_sync(
                        'datastore.query', 'storage.encrypteddisk',
                    [('encrypted_volume', '=', pool['id'])]):
                    if not os.path.exists(
                            f'/dev/{ed["encrypted_provider"]}.eli'):
                        decrypted = False
                        break
                pool['is_decrypted'] = decrypted
        else:
            pool['is_decrypted'] = True
        return pool

    @item_method
    @accepts(Int('id', required=False))
    async def get_disks(self, oid=None):
        """
        Get all disks in use by pools.
        If `id` is provided only the disks from the given pool `id` will be returned.
        """
        filters = []
        if oid:
            filters.append(('id', '=', oid))
        for pool in await self.query(filters):
            if pool['is_decrypted']:
                async for i in await self.middleware.call(
                        'zfs.pool.get_disks', pool['name']):
                    yield i
            else:
                for encrypted_disk in await self.middleware.call(
                        'datastore.query', 'storage.encrypteddisk',
                    [('encrypted_volume', '=', pool['id'])]):
                    disk = {
                        k[len("disk_"):]: v
                        for k, v in encrypted_disk["encrypted_disk"].items()
                    }
                    name = await self.middleware.call("disk.get_name", disk)
                    if os.path.exists(os.path.join("/dev", name)):
                        yield name

    @item_method
    @accepts(Int('id'))
    async def download_encryption_key(self, oid):
        """
        Download encryption key for a given pool `id`.
        """
        pool = await self.query([('id', '=', oid)], {'get': True})
        if not pool['encryptkey']:
            return None

        job_id, url = await self.middleware.call(
            'core.download', 'filesystem.get',
            [os.path.join(self.GELI_KEYPATH, f"{pool['encryptkey']}.key")],
            'geli.key')
        return url

    @private
    def configure_resilver_priority(self):
        """
        Configure resilver priority based on user selected off-peak hours.
        """
        resilver = self.middleware.call_sync('datastore.config',
                                             'storage.resilver')

        if not resilver['enabled'] or not resilver['weekday']:
            return

        higher_prio = False
        weekdays = map(lambda x: int(x), resilver['weekday'].split(','))
        now = datetime.now()
        now_t = now.time()
        # end overlaps the day
        if resilver['begin'] > resilver['end']:
            if now.isoweekday() in weekdays and now_t >= resilver['begin']:
                higher_prio = True
            else:
                lastweekday = now.isoweekday() - 1
                if lastweekday == 0:
                    lastweekday = 7
                if lastweekday in weekdays and now_t < resilver['end']:
                    higher_prio = True
        # end does not overlap the day
        else:
            if now.isoweekday() in weekdays and now_t >= resilver[
                    'begin'] and now_t < resilver['end']:
                higher_prio = True

        if higher_prio:
            resilver_delay = 0
            resilver_min_time_ms = 9000
            scan_idle = 0
        else:
            resilver_delay = 2
            resilver_min_time_ms = 3000
            scan_idle = 50

        sysctl.filter('vfs.zfs.resilver_delay')[0].value = resilver_delay
        sysctl.filter(
            'vfs.zfs.resilver_min_time_ms')[0].value = resilver_min_time_ms
        sysctl.filter('vfs.zfs.scan_idle')[0].value = scan_idle

    @accepts()
    async def import_find(self):
        """
        Get a list of pools available for import with the following details:
        name, guid, status, hostname.
        """

        existing_guids = [
            i['guid'] for i in await self.middleware.call('pool.query')
        ]

        for pool in await self.middleware.call('zfs.pool.find_import'):
            if pool['status'] == 'UNAVAIL':
                continue
            # Exclude pools with same guid as existing pools (in database)
            # It could be the pool is in the database but was exported/detached for some reason
            # See #6808
            if pool['guid'] in existing_guids:
                continue
            entry = {}
            for i in ('name', 'guid', 'status', 'hostname'):
                entry[i] = pool[i]
            yield entry

    @accepts(
        Dict(
            'pool_import',
            Str('guid', required=True),
            Str('name'),
            Str('passphrase', private=True),
            List('devices', items=[Str('device')]),
        ))
    @job(lock='import_pool', pipes=['input'], check_pipes=False)
    async def import_pool(self, job, data):
        """
        Import a pool.

        Errors:
            ENOENT - Pool not found
        """

        pool = None
        for p in await self.middleware.call('zfs.pool.find_import'):
            if p['guid'] == data['guid']:
                pool = p
                break
        if pool is None:
            raise CallError(f'Pool with guid "{data["guid"]}" not found',
                            errno.ENOENT)

        if data['devices']:
            job.check_pipe("input")
            args = [job.pipes.input.r, data['passphrase'], data['devices']]
        else:
            args = []

        await self.middleware.call('notifier.volume_import',
                                   data.get('name') or pool['name'],
                                   data['guid'], *args)
        return True

    @accepts(Str('volume'), Str('fs_type'),
             Dict('fs_options', additional_attrs=True), Str('dst_path'))
    @job(lock=lambda args: 'volume_import', logs=True)
    async def import_disk(self, job, volume, fs_type, fs_options, dst_path):
        job.set_progress(None, description="Mounting")

        src = os.path.join('/var/run/importcopy/tmpdir',
                           os.path.relpath(volume, '/'))

        if os.path.exists(src):
            os.rmdir(src)

        try:
            os.makedirs(src)

            async with KernelModuleContextManager({
                    "msdosfs": "msdosfs_iconv",
                    "ntfs": "fuse"
            }.get(fs_type)):
                async with MountFsContextManager(self.middleware, volume, src,
                                                 fs_type, fs_options, ["ro"]):
                    job.set_progress(None, description="Importing")

                    line = [
                        '/usr/local/bin/rsync', '--info=progress2',
                        '--modify-window=1', '-rltvh', '--no-perms', src + '/',
                        dst_path
                    ]
                    rsync_proc = await Popen(
                        line,
                        stdout=subprocess.PIPE,
                        stderr=subprocess.PIPE,
                        bufsize=0,
                        preexec_fn=os.setsid,
                    )
                    try:
                        progress_buffer = JobProgressBuffer(job)
                        while True:
                            line = await rsync_proc.stdout.readline()
                            job.logs_fd.write(line)
                            if line:
                                try:
                                    line = line.decode("utf-8",
                                                       "ignore").strip()
                                    bits = re.split("\s+", line)
                                    if len(bits) == 6 and bits[1].endswith(
                                            "%") and bits[1][:-1].isdigit():
                                        progress_buffer.set_progress(
                                            int(bits[1][:-1]))
                                    elif not line.endswith('/'):
                                        if (line not in [
                                                'sending incremental file list'
                                        ] and 'xfr#' not in line):
                                            progress_buffer.set_progress(
                                                None, extra=line)
                                except Exception:
                                    logger.warning(
                                        'Parsing error in rsync task',
                                        exc_info=True)
                            else:
                                break

                        progress_buffer.flush()
                        await rsync_proc.wait()
                        if rsync_proc.returncode != 0:
                            raise Exception("rsync failed with exit code %r" %
                                            rsync_proc.returncode)
                    except asyncio.CancelledError:
                        rsync_proc.kill()
                        raise

                    job.set_progress(100, description="Done", extra="")
        finally:
            os.rmdir(src)

    @accepts()
    def import_disk_msdosfs_locales(self):
        return [
            locale.strip()
            for locale in subprocess.check_output(["locale", "-a"],
                                                  encoding="utf-8").split("\n")
            if locale.strip()
        ]

    """
    These methods are hacks for old UI which supports only one volume import at a time
    """

    dismissed_import_disk_jobs = set()

    @private
    async def get_current_import_disk_job(self):
        import_jobs = await self.middleware.call(
            'core.get_jobs', [('method', '=', 'pool.import_disk')])
        not_dismissed_import_jobs = [
            job for job in import_jobs
            if job["id"] not in self.dismissed_import_disk_jobs
        ]
        if not_dismissed_import_jobs:
            return not_dismissed_import_jobs[0]

    @private
    async def dismiss_current_import_disk_job(self):
        current_import_job = await self.get_current_import_disk_job()
        if current_import_job:
            self.dismissed_import_disk_jobs.add(current_import_job["id"])
Exemplo n.º 16
0
class OpenVPNServerService(SystemServiceService):
    class Config:
        namespace = 'openvpn.server'
        service = 'openvpn_server'
        service_model = 'openvpnserver'
        service_verb = 'restart'
        datastore_extend = 'openvpn.server.server_extend'

    @private
    async def server_extend(self, data):
        data['server_certificate'] = None if not data[
            'server_certificate'] else data['server_certificate']['id']
        data[
            'root_ca'] = None if not data['root_ca'] else data['root_ca']['id']
        data['tls_crypt_auth_enabled'] = bool(data['tls_crypt_auth'])
        return data

    @private
    async def config_valid(self):
        config = await self.config()
        if not config['root_ca']:
            raise CallError('Please configure root_ca first.')
        else:
            if not await self.middleware.call(
                    'certificateauthority.query',
                [['id', '=', config['root_ca']], ['revoked', '=', False]]):
                raise CallError(
                    'Root CA has been revoked. Please select another Root CA.')

        if not config['server_certificate']:
            raise CallError('Please configure server certificate first.')
        else:
            if not await self.middleware.call('certificate.query', [[
                    'id', '=', config['server_certificate']
            ], ['revoked', '=', False]]):
                raise CallError(
                    'Server certificate has been revoked. Please select another Server certificate.'
                )

        if not await self.validate_nobind(config):
            raise CallError(
                'Please enable "nobind" on OpenVPN Client to concurrently run OpenVPN Server/Client '
                'on the same local port without any issues.')

    @accepts()
    async def authentication_algorithm_choices(self):
        """
        Returns a dictionary of valid authentication algorithms which can be used with OpenVPN server.
        """
        return OpenVPN.digests()

    @accepts()
    async def cipher_choices(self):
        """
        Returns a dictionary of valid ciphers which can be used with OpenVPN server.
        """
        return OpenVPN.ciphers()

    @private
    async def validate(self, data, schema_name):
        verrors, data = await OpenVPN.common_validation(
            self.middleware, data, schema_name, 'server')

        if not await self.validate_nobind(data):
            verrors.add(
                f'{schema_name}.nobind',
                'Please enable "nobind" on OpenVPN Client to concurrently run OpenVPN Server/Client '
                'on the same local port without any issues.')

        if ipaddress.ip_address(
                data['server']).version == 4 and data['netmask'] > 32:
            verrors.add(
                f'{schema_name}.netmask',
                'For IPv4 server addresses please provide a netmask value from 0-32.'
            )

        verrors.check()

        return data

    @private
    async def validate_nobind(self, config):
        client_config = await self.middleware.call('openvpn.client.config')
        if (await self.middleware.call('service.started', 'openvpn_client')
                and config['port'] == client_config['port']
                and not client_config['nobind']):
            return False
        else:
            return True

    @private
    async def generate_static_key(self):
        keyfile = tempfile.NamedTemporaryFile(mode='w+', dir='/tmp/')
        await run(['openvpn', '--genkey', '--secret', keyfile.name])
        keyfile.seek(0)
        key = keyfile.read()
        keyfile.close()
        return key.strip()

    @accepts()
    async def renew_static_key(self):
        """
        Reset OpenVPN server's TLS static key which will be used to encrypt/authenticate control channel packets.
        """
        return await self.update({
            'tls_crypt_auth': (await self.generate_static_key()),
            'tls_crypt_auth_enabled':
            True
        })

    @accepts(Int('client_certificate_id'), Str('server_address', null=True))
    async def client_configuration_generation(self,
                                              client_certificate_id,
                                              server_address=None):
        """
        Returns a configuration for OpenVPN client which can be used with any client to connect to FN/TN OpenVPN
        server.

        `client_certificate_id` should be a valid certificate issued for use with OpenVPN client service.

        `server_address` if specified auto-fills the remote directive in the OpenVPN configuration enabling the end
        user to use the file without making any edits to connect to OpenVPN server.
        """
        await self.config_valid()
        config = await self.config()
        root_ca = await self.middleware.call('certificateauthority.query',
                                             [['id', '=', config['root_ca']]],
                                             {'get': True})
        client_cert = await self.middleware.call(
            'certificate.query',
            [['id', '=', client_certificate_id], ['revoked', '=', False]])
        if not client_cert:
            raise CallError(
                'Please provide a client certificate id for a certificate which exists on '
                'the system and hasn\'t been marked as revoked.')
        else:
            client_cert = client_cert[0]
            if (await OpenVPN.common_validation(
                    self.middleware, {
                        **config, 'client_certificate': client_certificate_id
                    }, '', 'client'))[0]:
                raise CallError(
                    'Please ensure provided client certificate exists in Root CA chain '
                    'and has necessary extensions set.')

        client_config = [
            'client',
            f'dev {config["device_type"].lower()}',
            f'proto {config["protocol"].lower()}',
            f'port {config["port"]}',
            f'remote "{server_address or "PLEASE FILL OUT SERVER DOMAIN/IP HERE"}"',
            'user nobody',
            'group nobody',
            'persist-key',
            'persist-tun',
            '<ca>',
            f'{root_ca["certificate"]}',
            '</ca>',
            '<cert>',
            client_cert['certificate'],
            '</cert>',
            '<key>',
            client_cert['privatekey'],
            '</key>',
            'verb 3',
            'remote-cert-tls server',
            f'compress {config["compression"].lower()}'
            if config['compression'] else None,
            f'auth {config["authentication_algorithm"]}'
            if config['authentication_algorithm'] else None,
            f'cipher {config["cipher"]}' if config['cipher'] else None,
        ]

        if config['tls_crypt_auth_enabled']:
            client_config.extend(
                ['<tls-crypt>', config['tls_crypt_auth'], '</tls-crypt>'])

        return '\n'.join(filter(bool, client_config)).strip()

    @accepts(
        Dict('openvpn_server_update',
             Bool('tls_crypt_auth_enabled'),
             Int('netmask', validators=[Range(min=0, max=128)]),
             Int('server_certificate'),
             Int('port', validators=[Port()]),
             Int('root_ca'),
             IPAddr('server'),
             Str('additional_parameters'),
             Str('authentication_algorithm', null=True),
             Str('cipher', null=True),
             Str('compression', null=True, enum=['LZO', 'LZ4']),
             Str('device_type', enum=['TUN', 'TAP']),
             Str('protocol', enum=PROTOCOLS),
             Str('tls_crypt_auth', null=True),
             Str('topology', null=True, enum=['NET30', 'P2P', 'SUBNET']),
             update=True))
    async def do_update(self, data):
        """
        Update OpenVPN Server configuration.

        When `tls_crypt_auth_enabled` is enabled and `tls_crypt_auth` not provided, a static key is automatically
        generated to be used with OpenVPN server.
        """
        old_config = await self.config()
        config = old_config.copy()

        config.update(data)

        # If tls_crypt_auth_enabled is set and we don't have a tls_crypt_auth key,
        # let's generate one please
        if config['tls_crypt_auth_enabled'] and not config['tls_crypt_auth']:
            config['tls_crypt_auth'] = await self.generate_static_key()

        config = await self.validate(config, 'openvpn_server_update')

        await self._update_service(old_config, config)

        return await self.config()
Exemplo n.º 17
0
class PoolDatasetService(CRUDService):
    class Config:
        namespace = 'pool.dataset'

    @filterable
    def query(self, filters, options):
        # Otimization for cases in which they can be filtered at zfs.dataset.query
        zfsfilters = []
        for f in filters:
            if len(f) == 3:
                if f[0] in ('id', 'name', 'pool', 'type'):
                    zfsfilters.append(f)
        datasets = self.middleware.call_sync('zfs.dataset.query', zfsfilters,
                                             None)
        return filter_list(self.__transform(datasets), filters, options)

    def __transform(self, datasets):
        """
        We need to transform the data zfs gives us to make it consistent/user-friendly,
        making it match whatever pool.dataset.{create,update} uses as input.
        """
        def transform(dataset):
            for orig_name, new_name, method in (
                ('org.freenas:description', 'comments', None),
                ('dedup', 'deduplication', str.upper),
                ('atime', None, str.upper),
                ('casesensitivity', None, str.upper),
                ('exec', None, str.upper),
                ('sync', None, str.upper),
                ('compression', None, str.upper),
                ('compressratio', None, None),
                ('origin', None, None),
                ('quota', None, _null),
                ('refquota', None, _null),
                ('reservation', None, _null),
                ('refreservation', None, _null),
                ('copies', None, None),
                ('snapdir', None, str.upper),
                ('readonly', None, str.upper),
                ('recordsize', None, None),
                ('sparse', None, None),
                ('volsize', None, None),
                ('volblocksize', None, None),
            ):
                if orig_name not in dataset['properties']:
                    continue
                i = new_name or orig_name
                dataset[i] = dataset['properties'][orig_name]
                if method:
                    dataset[i]['value'] = method(dataset[i]['value'])
            del dataset['properties']

            if dataset['type'] == 'FILESYSTEM':
                dataset['share_type'] = self.middleware.call_sync(
                    'notifier.get_dataset_share_type',
                    dataset['name'],
                ).upper()
            else:
                dataset['share_type'] = None

            rv = []
            for child in dataset['children']:
                rv.append(transform(child))
            dataset['children'] = rv
            return dataset

        rv = []
        for dataset in datasets:
            rv.append(transform(dataset))
        return rv

    @accepts(
        Dict(
            'pool_dataset_create',
            Str('name', required=True),
            Str('type', enum=['FILESYSTEM', 'VOLUME'], default='FILESYSTEM'),
            Int('volsize'),
            Str('volblocksize',
                enum=[
                    '512',
                    '1K',
                    '2K',
                    '4K',
                    '8K',
                    '16K',
                    '32K',
                    '64K',
                    '128K',
                ]),
            Bool('sparse'),
            Str('comments'),
            Str('sync', enum=[
                'STANDARD',
                'ALWAYS',
                'DISABLED',
            ]),
            Str('compression',
                enum=[
                    'OFF',
                    'LZ4',
                    'GZIP-1',
                    'GZIP-6',
                    'GZIP-9',
                    'ZLE',
                    'LZJB',
                ]),
            Str('atime', enum=['ON', 'OFF']),
            Str('exec', enum=['ON', 'OFF']),
            Int('quota'),
            Int('refquota'),
            Int('reservation'),
            Int('refreservation'),
            Int('copies'),
            Str('snapdir', enum=['VISIBLE', 'HIDDEN']),
            Str('deduplication', enum=['ON', 'VERIFY', 'OFF']),
            Str('readonly', enum=['ON', 'OFF']),
            Str('recordsize',
                enum=[
                    '512',
                    '1K',
                    '2K',
                    '4K',
                    '8K',
                    '16K',
                    '32K',
                    '64K',
                    '128K',
                    '256K',
                    '512K',
                    '1024K',
                ]),
            Str('casesensitivity', enum=['SENSITIVE', 'INSENSITIVE', 'MIXED']),
            Str('share_type', enum=['UNIX', 'WINDOWS', 'MAC']),
            register=True,
        ))
    async def do_create(self, data):
        """
        Creates a dataset/zvol.

        `volsize` is required for type=VOLUME and is supposed to be a multiple of the block size.
        """

        verrors = ValidationErrors()
        await self.__common_validation(verrors, 'pool_dataset_create', data,
                                       'CREATE')
        if verrors:
            raise verrors

        props = {}
        for i, real_name, transform in (
            ('atime', None, str.lower),
            ('casesensitivity', None, str.lower),
            ('comments', 'org.freenas:description', None),
            ('compression', None, str.lower),
            ('copies', None, lambda x: str(x)),
            ('deduplication', 'dedup', str.lower),
            ('exec', None, str.lower),
            ('quota', None, _none),
            ('readonly', None, str.lower),
            ('recordsize', None, None),
            ('refquota', None, _none),
            ('refreservation', None, _none),
            ('reservation', None, _none),
            ('snapdir', None, str.lower),
            ('sparse', None, None),
            ('sync', None, str.lower),
            ('volblocksize', None, None),
            ('volsize', None, lambda x: str(x)),
        ):
            if i not in data:
                continue
            name = real_name or i
            props[name] = data[i] if not transform else transform(data[i])

        await self.middleware.call('zfs.dataset.create', {
            'name': data['name'],
            'type': data['type'],
            'properties': props,
        })

        data['id'] = data['name']

        await self.middleware.call('zfs.dataset.mount', data['name'])

        if data['type'] == 'FILESYSTEM':
            await self.middleware.call('notifier.change_dataset_share_type',
                                       data['name'],
                                       data.get('share_type', 'UNIX').lower())

        return await self._get_instance(data['id'])

    def _add_inherit(name):
        def add(attr):
            attr.enum.append('INHERIT')

        return {'name': name, 'method': add}

    @accepts(
        Str('id', required=True),
        Patch(
            'pool_dataset_create',
            'pool_dataset_update',
            ('rm', {
                'name': 'name'
            }),
            ('rm', {
                'name': 'type'
            }),
            ('rm', {
                'name': 'casesensitivity'
            }),  # Its a readonly attribute
            ('rm', {
                'name': 'sparse'
            }),  # Create time only attribute
            ('rm', {
                'name': 'volblocksize'
            }),  # Create time only attribute
            ('edit', _add_inherit('atime')),
            ('edit', _add_inherit('exec')),
            ('edit', _add_inherit('sync')),
            ('edit', _add_inherit('compression')),
            ('edit', _add_inherit('deduplication')),
            ('edit', _add_inherit('readonly')),
            ('edit', _add_inherit('recordsize')),
            ('edit', _add_inherit('snapdir')),
            ('attr', {
                'update': True
            }),
        ))
    async def do_update(self, id, data):
        """
        Updates a dataset/zvol `id`.
        """

        verrors = ValidationErrors()

        dataset = await self.middleware.call('pool.dataset.query',
                                             [('id', '=', id)])
        if not dataset:
            verrors.add('id', f'{id} does not exist', errno.ENOENT)
        else:
            data['type'] = dataset[0]['type']
            await self.__common_validation(verrors, 'pool_dataset_update',
                                           data, 'UPDATE')
        if verrors:
            raise verrors

        props = {}
        for i, real_name, transform, inheritable in (
            ('atime', None, str.lower, True),
            ('comments', 'org.freenas:description', None, False),
            ('sync', None, str.lower, True),
            ('compression', None, str.lower, True),
            ('deduplication', 'dedup', str.lower, True),
            ('exec', None, str.lower, True),
            ('quota', None, _none, False),
            ('refquota', None, _none, False),
            ('reservation', None, _none, False),
            ('refreservation', None, _none, False),
            ('copies', None, None, False),
            ('snapdir', None, str.lower, True),
            ('readonly', None, str.lower, True),
            ('recordsize', None, None, True),
            ('volsize', None, lambda x: str(x), False),
        ):
            if i not in data:
                continue
            name = real_name or i
            if inheritable and data[i] == 'INHERIT':
                props[name] = {'source': 'INHERIT'}
            else:
                props[name] = {
                    'value': data[i] if not transform else transform(data[i])
                }

        rv = await self.middleware.call('zfs.dataset.update', id,
                                        {'properties': props})

        if data['type'] == 'FILESYSTEM' and 'share_type' in data:
            await self.middleware.call('notifier.change_dataset_share_type',
                                       id, data['share_type'].lower())

        return rv

    async def __common_validation(self, verrors, schema, data, mode):
        assert mode in ('CREATE', 'UPDATE')

        if data['type'] == 'FILESYSTEM':
            for i in ('sparse', 'volsize', 'volblocksize'):
                if i in data:
                    verrors.add(f'{schema}.{i}',
                                'This field is not valid for FILESYSTEM')
        elif data['type'] == 'VOLUME':
            if mode == 'CREATE' and 'volsize' not in data:
                verrors.add(f'{schema}.volsize',
                            'This field is required for VOLUME')

            for i in (
                    'atime',
                    'casesensitivity',
                    'quota',
                    'refquota',
                    'recordsize',
                    'share_type',
            ):
                if i in data:
                    verrors.add(f'{schema}.{i}',
                                'This field is not valid for VOLUME')

    @accepts(Str('id'))
    async def do_delete(self, id):
        return await self.middleware.call('zfs.dataset.delete', id)

    @item_method
    @accepts(Str('id'))
    async def promote(self, id):
        """
        Promote the cloned dataset `id`
        """
        dataset = await self.middleware.call('zfs.dataset.query',
                                             [('id', '=', id)])
        if not dataset:
            raise CallError(f'Dataset "{id}" does not exist.', errno.ENOENT)
        if not dataset[0]['properties']['origin']['value']:
            raise CallError('Only cloned datasets can be promoted.',
                            errno.EBADMSG)
        return await self.middleware.call('zfs.dataset.promote', id)

    @accepts(Str('id', default=None, required=True),
             Dict('pool_dataset_permission', Str('user'), Str('group'),
                  UnixPerm('mode'),
                  Str('acl', enum=['UNIX', 'MAC', 'WINDOWS'], default='UNIX'),
                  Bool('recursive', default=False)))
    @item_method
    async def permission(self, id, data):

        path = (await self._get_instance(id))['mountpoint']
        user = data.get('user', None)
        group = data.get('group', None)
        mode = data.get('mode', None)
        recursive = data.get('recursive', False)
        acl = data['acl']
        verrors = ValidationErrors()

        if (acl == 'UNIX' or acl == 'MAC') and mode is None:
            verrors.add('pool_dataset_permission.mode',
                        'This field is required')

        if verrors:
            raise verrors

        await self.middleware.call('notifier.mp_change_permission', path, user,
                                   group, mode, recursive, acl.lower())
        return data
Exemplo n.º 18
0
class OpenVPNClientService(SystemServiceService):
    class Config:
        namespace = 'openvpn.client'
        service = 'openvpn_client'
        service_model = 'openvpnclient'
        service_verb = 'restart'
        datastore_extend = 'openvpn.client.client_extend'

    @private
    async def client_extend(self, data):
        data['client_certificate'] = None if not data[
            'client_certificate'] else data['client_certificate']['id']
        data[
            'root_ca'] = None if not data['root_ca'] else data['root_ca']['id']
        data['tls_crypt_auth_enabled'] = bool(data['tls_crypt_auth'])
        return data

    @accepts()
    async def authentication_algorithm_choices(self):
        """
        Returns a dictionary of valid authentication algorithms which can be used with OpenVPN server.
        """
        return OpenVPN.digests()

    @accepts()
    async def cipher_choices(self):
        """
        Returns a dictionary of valid ciphers which can be used with OpenVPN server.
        """
        return OpenVPN.ciphers()

    @private
    async def validate(self, data, schema_name):
        verrors, data = await OpenVPN.common_validation(
            self.middleware, data, schema_name, 'client')

        if not data.get('remote'):
            verrors.add(f'{schema_name}.remote', 'This field is required.')

        if not await self.validate_nobind(data):
            verrors.add(
                f'{schema_name}.nobind',
                'Please enable this to concurrently run OpenVPN Server/Client on the same local port.'
            )

        verrors.check()

        return data

    @private
    async def validate_nobind(self, config):
        if (await self.middleware.call('service.started', 'openvpn_server')
                and config['port']
                == (await
                    self.middleware.call('openvpn.server.config'))['port']
                and not config['nobind']):
            return False
        else:
            return True

    @private
    async def config_valid(self):
        config = await self.config()
        if not config['root_ca']:
            raise CallError('Please configure root_ca first.')
        else:
            if not await self.middleware.call(
                    'certificateauthority.query',
                [['id', '=', config['root_ca']], ['revoked', '=', False]]):
                raise CallError(
                    'Root CA has been revoked. Please select another Root CA.')

        if not config['client_certificate']:
            raise CallError('Please configure client certificate first.')
        else:
            if not await self.middleware.call('certificate.query', [[
                    'id', '=', config['client_certificate']
            ], ['revoked', '=', False]]):
                raise CallError(
                    'Client certificate has been revoked. Please select another Client certificate.'
                )

        if not config['remote']:
            raise CallError('Please configure remote first.')

        if not await self.validate_nobind(config):
            raise CallError(
                'Please enable "nobind" to concurrently run OpenVPN Server/Client on the same local port.'
            )

    @accepts(
        Dict('openvpn_client_update',
             Bool('nobind'),
             Bool('tls_crypt_auth_enabled'),
             Int('client_certificate'),
             Int('root_ca'),
             Int('port', validators=[Port()]),
             Str('additional_parameters'),
             Str('authentication_algorithm', null=True),
             Str('cipher', null=True),
             Str('compression', null=True, enum=['LZO', 'LZ4']),
             Str('device_type', enum=['TUN', 'TAP']),
             Str('protocol', enum=PROTOCOLS),
             Str('remote'),
             Str('tls_crypt_auth', null=True),
             update=True))
    async def do_update(self, data):
        """
        Update OpenVPN Client configuration.

        `remote` can be a valid ip address / domain which openvpn will try to connect to.

        `nobind` must be enabled if OpenVPN client / server are to run concurrently.
        """
        old_config = await self.config()
        config = old_config.copy()

        config.update(data)

        config = await self.validate(config, 'openvpn_client_update')

        await self._update_service(old_config, config)

        return await self.config()
Exemplo n.º 19
0
class PoolScrubService(CRUDService):
    class Config:
        datastore = 'storage.scrub'
        datastore_extend = 'pool.scrub.pool_scrub_extend'
        datastore_prefix = 'scrub_'
        namespace = 'pool.scrub'

    @private
    async def pool_scrub_extend(self, data):
        data['pool'] = data.pop('volume')
        data['pool'] = data['pool']['id']
        Cron.convert_db_format_to_schedule(data)
        return data

    @private
    async def validate_data(self, data, schema):
        verrors = ValidationErrors()

        pool_pk = data.get('pool')
        if pool_pk:
            pool_obj = await self.middleware.call('datastore.query',
                                                  'storage.volume',
                                                  [('id', '=', pool_pk)])

            if len(pool_obj) == 0:
                verrors.add(f'{schema}.pool',
                            'The specified volume does not exist')
            elif ('id' not in data.keys() or
                  ('id' in data.keys() and 'original_pool_id' in data.keys()
                   and pool_pk != data['original_pool_id'])):
                scrub_obj = await self.query(filters=[('volume_id', '=',
                                                       pool_pk)])
                if len(scrub_obj) != 0:
                    verrors.add(f'{schema}.pool',
                                'A scrub with this pool already exists')

        return verrors, data

    @accepts(
        Dict('pool_scrub_create',
             Int('pool', validators=[Range(min=1)], required=True),
             Int('threshold', validators=[Range(min=0)]),
             Str('description'),
             Cron('schedule'),
             Bool('enabled'),
             register=True))
    async def do_create(self, data):
        verrors, data = await self.validate_data(data, 'pool_scrub_create')

        if verrors:
            raise verrors

        data['volume'] = data.pop('pool')
        Cron.convert_schedule_to_db_format(data)

        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})

        await self.middleware.call('service.restart', 'cron',
                                   {'onetime': False})

        return await self.query(filters=[('id', '=', data['id'])],
                                options={'get': True})

    @accepts(Int('id', validators=[Range(min=1)]),
             Patch('pool_scrub_create', 'pool_scrub_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        task_data = await self.query(filters=[('id', '=', id)],
                                     options={'get': True})
        original_data = task_data.copy()
        task_data['original_pool_id'] = original_data['pool']
        task_data.update(data)
        verrors, task_data = await self.validate_data(task_data,
                                                      'pool_scrub_update')

        if verrors:
            raise verrors

        task_data.pop('original_pool_id')
        Cron.convert_schedule_to_db_format(task_data)
        Cron.convert_schedule_to_db_format(original_data)

        if len(set(task_data.items()) ^ set(original_data.items())) > 0:

            task_data['volume'] = task_data.pop('pool')

            await self.middleware.call(
                'datastore.update', self._config.datastore, id, task_data,
                {'prefix': self._config.datastore_prefix})

            await self.middleware.call('service.restart', 'cron',
                                       {'onetime': False})

        return await self.query(filters=[('id', '=', id)],
                                options={'get': True})

    @accepts(Int('id'))
    async def do_delete(self, id):
        response = await self.middleware.call('datastore.delete',
                                              self._config.datastore, id)

        await self.middleware.call('service.restart', 'cron',
                                   {'onetime': False})
        return response
Exemplo n.º 20
0
class PoolService(CRUDService):

    @filterable
    async def query(self, filters=None, options=None):
        filters = filters or []
        options = options or {}
        options['extend'] = 'pool.pool_extend'
        options['prefix'] = 'vol_'
        return await self.middleware.call('datastore.query', 'storage.volume', filters, options)

    @private
    async def pool_extend(self, pool):
        pool.pop('fstype', None)

        """
        If pool is encrypted we need to check if the pool is imported
        or if all geli providers exist.
        """
        try:
            zpool = libzfs.ZFS().get(pool['name'])
        except libzfs.ZFSException:
            zpool = None

        if zpool:
            pool['status'] = zpool.status
            pool['scan'] = zpool.scrub.__getstate__()
        else:
            pool.update({
                'status': 'OFFLINE',
                'scan': None,
            })

        if pool['encrypt'] > 0:
            if zpool:
                pool['is_decrypted'] = True
            else:
                decrypted = True
                for ed in await self.middleware.call('datastore.query', 'storage.encrypteddisk', [('encrypted_volume', '=', pool['id'])]):
                    if not os.path.exists(f'/dev/{ed["encrypted_provider"]}.eli'):
                        decrypted = False
                        break
                pool['is_decrypted'] = decrypted
        else:
            pool['is_decrypted'] = True
        return pool

    @item_method
    @accepts(Int('id'))
    async def get_disks(self, oid):
        """
        Get all disks from a given pool `id`.
        """
        pool = await self.query([('id', '=', oid)], {'get': True})
        if not pool['is_decrypted']:
            yield
        async for i in await self.middleware.call('zfs.pool.get_disks', pool['name']):
            yield i

    @private
    def configure_resilver_priority(self):
        """
        Configure resilver priority based on user selected off-peak hours.
        """
        resilver = self.middleware.call_sync('datastore.config', 'storage.resilver')

        if not resilver['enabled'] or not resilver['weekday']:
            return

        higher_prio = False
        weekdays = map(lambda x: int(x), resilver['weekday'].split(','))
        now = datetime.now()
        now_t = now.time()
        # end overlaps the day
        if resilver['begin'] > resilver['end']:
            if now.isoweekday() in weekdays and now_t >= resilver['begin']:
                higher_prio = True
            else:
                lastweekday = now.isoweekday() - 1
                if lastweekday == 0:
                    lastweekday = 7
                if lastweekday in weekdays and now_t < resilver['end']:
                    higher_prio = True
        # end does not overlap the day
        else:
            if now.isoweekday() in weekdays and now_t >= resilver['begin'] and now_t < resilver['end']:
                higher_prio = True

        if higher_prio:
            resilver_delay = 0
            resilver_min_time_ms = 9000
            scan_idle = 0
        else:
            resilver_delay = 2
            resilver_min_time_ms = 3000
            scan_idle = 50

        sysctl.filter('vfs.zfs.resilver_delay')[0].value = resilver_delay
        sysctl.filter('vfs.zfs.resilver_min_time_ms')[0].value = resilver_min_time_ms
        sysctl.filter('vfs.zfs.scan_idle')[0].value = scan_idle
Exemplo n.º 21
0
class CoreService(Service):

    class Config:
        cli_namespace = 'system.core'

    @accepts(Str('id'), Int('cols'), Int('rows'))
    async def resize_shell(self, id, cols, rows):
        """
        Resize terminal session (/websocket/shell) to cols x rows
        """
        shell = middlewared.main.ShellApplication.shells.get(id)
        if shell is None:
            raise CallError('Shell does not exist', errno.ENOENT)

        shell.resize(cols, rows)

    @filterable
    def sessions(self, filters, options):
        """
        Get currently open websocket sessions.
        """
        return filter_list([
            {
                'id': i.session_id,
                'socket_family': socket.AddressFamily(
                    i.request.transport.get_extra_info('socket').family
                ).name,
                'address': (
                    (
                        i.request.headers.get('X-Real-Remote-Addr'),
                        i.request.headers.get('X-Real-Remote-Port')
                    ) if i.request.headers.get('X-Real-Remote-Addr') else (
                        i.request.transport.get_extra_info("peername")
                    )
                ),
                'authenticated': i.authenticated,
                'call_count': i._softhardsemaphore.counter,
            }
            for i in self.middleware.get_wsclients().values()
        ], filters, options)

    @private
    def get_tasks(self):
        for task in asyncio.all_tasks(loop=self.middleware.loop):
            formatted = None
            frame = None
            frames = []
            for frame in task.get_stack():
                cur_frame = get_frame_details(frame, self.logger)
                if cur_frame:
                    frames.append(cur_frame)

            if frame:
                formatted = traceback.format_stack(frame)
            yield {
                'stack': formatted,
                'frames': frames,
            }

    @filterable
    def get_jobs(self, filters, options):
        """Get the long running jobs."""
        jobs = filter_list([
            i.__encode__() for i in list(self.middleware.jobs.all().values())
        ], filters, options)
        return jobs

    @accepts(Int('id'))
    @job()
    def job_wait(self, job, id):
        target_job = self.middleware.jobs.get(id)
        target_job.wait_sync()
        if target_job.error:
            raise CallError(target_job.error)
        else:
            return target_job.result

    @accepts(Int('id'), Dict(
        'job-update',
        Dict('progress', additional_attrs=True),
    ))
    def job_update(self, id, data):
        job = self.middleware.jobs.all()[id]
        progress = data.get('progress')
        if progress:
            job.set_progress(
                progress['percent'],
                description=progress.get('description'),
                extra=progress.get('extra'),
            )

    @private
    def notify_postinit(self):
        self.middleware.call_sync('migration.run')

        # Sentinel file to tell we have gone far enough in the boot process.
        # See #17508
        open('/tmp/.bootready', 'w').close()

        # Send event to middlewared saying we are late enough in the process to call it ready
        self.middleware.call_sync(
            'core.event_send',
            'system',
            'ADDED',
            {'id': 'ready'}
        )

        # Let's setup periodic tasks now
        self.middleware._setup_periodic_tasks()

    @accepts(Int('id'))
    def job_abort(self, id):
        job = self.middleware.jobs.all()[id]
        return job.abort()

    @accepts(Bool('cli', default=False))
    def get_services(self, cli):
        """Returns a list of all registered services."""
        services = {}
        for k, v in list(self.middleware.get_services().items()):
            if v._config.private is True:
                continue
            if cli and v._config.cli_private:
                continue

            if is_service_class(v, CRUDService):
                _typ = 'crud'
            elif is_service_class(v, ConfigService):
                _typ = 'config'
            else:
                _typ = 'service'

            config = {k: v for k, v in list(v._config.__dict__.items())
                      if not k.startswith(('_', 'process_pool', 'thread_pool'))}
            if config['cli_description'] is None:
                if v.__doc__:
                    config['cli_description'] = inspect.getdoc(v).split("\n")[0].strip()

            services[k] = {
                'config': config,
                'type': _typ,
            }

        return services

    @accepts(Str('service', default=None, null=True), Bool('cli', default=False))
    def get_methods(self, service, cli):
        """
        Return methods metadata of every available service.

        `service` parameter is optional and filters the result for a single service.
        """
        data = {}
        for name, svc in list(self.middleware.get_services().items()):
            if service is not None and name != service:
                continue

            # Skip private services
            if svc._config.private:
                continue
            if cli and svc._config.cli_private:
                continue

            for attr in dir(svc):

                if attr.startswith('_'):
                    continue

                method = None
                # For CRUD.do_{update,delete} they need to be accounted
                # as "item_method", since they are just wrapped.
                item_method = None
                filterable_schema = None
                if is_service_class(svc, CRUDService):
                    """
                    For CRUD the create/update/delete are special.
                    The real implementation happens in do_create/do_update/do_delete
                    so thats where we actually extract pertinent information.
                    """
                    if attr in ('create', 'update', 'delete'):
                        method = getattr(svc, 'do_{}'.format(attr), None)
                        if method is None:
                            continue
                        if attr in ('update', 'delete'):
                            item_method = True
                    elif attr in ('do_create', 'do_update', 'do_delete'):
                        continue
                elif is_service_class(svc, ConfigService):
                    """
                    For Config the update is special.
                    The real implementation happens in do_update
                    so thats where we actually extract pertinent information.
                    """
                    if attr == 'update':
                        original_name = 'do_{}'.format(attr)
                        if hasattr(svc, original_name):
                            method = getattr(svc, original_name, None)
                        else:
                            method = getattr(svc, attr)
                        if method is None:
                            continue
                    elif attr in ('do_update'):
                        continue

                if method is None:
                    method = getattr(svc, attr, None)

                if method is None or not callable(method):
                    continue

                # Skip private methods
                if hasattr(method, '_private'):
                    continue
                if cli and hasattr(method, '_cli_private'):
                    continue

                # terminate is a private method used to clean up a service on shutdown
                if attr == 'terminate':
                    continue

                examples = defaultdict(list)
                doc = inspect.getdoc(method)
                if doc:
                    """
                    Allow method docstring to have sections in the format of:

                      .. section_name::

                    Currently the following sections are available:

                      .. examples:: - goes into `__all__` list in examples
                      .. examples(rest):: - goes into `rest` list in examples
                      .. examples(websocket):: - goes into `websocket` list in examples
                    """
                    sections = re.split(r'^.. (.+?)::$', doc, flags=re.M)
                    doc = sections[0]
                    for i in range(int((len(sections) - 1) / 2)):
                        idx = (i + 1) * 2 - 1
                        reg = re.search(r'examples(?:\((.+)\))?', sections[idx])
                        if reg is None:
                            continue
                        exname = reg.groups()[0]
                        if exname is None:
                            exname = '__all__'
                        examples[exname].append(sections[idx + 1])

                accepts = getattr(method, 'accepts', None)
                if accepts:
                    accepts = [i.to_json_schema() for i in accepts if not getattr(i, 'hidden', False)]

                    names = set()
                    for i in accepts:
                        names.add(i['_name_'])

                        if i.get('type') == 'object':
                            for j in i['properties'].values():
                                names.add(j['_name_'])

                    args_descriptions_doc = doc
                    if attr == 'update':
                        if do_create := getattr(svc, 'do_create', None):
                            args_descriptions_doc += "\n" + inspect.getdoc(do_create)

                    args_descriptions = self._cli_args_descriptions(args_descriptions_doc, names)

                    for i in accepts:
                        if not i.get('description') and i['_name_'] in args_descriptions:
                            i['description'] = args_descriptions[i['_name_']]

                        if i.get('type') == 'object':
                            for j in i['properties'].values():
                                if not j.get('description') and j['_name_'] in args_descriptions:
                                    j['description'] = args_descriptions[j['_name_']]

                data['{0}.{1}'.format(name, attr)] = {
                    'description': doc,
                    'cli_description': (doc or '').split('.')[0].replace('\n', ' '),
                    'examples': examples,
                    'accepts': accepts,
                    'item_method': True if item_method else hasattr(method, '_item_method'),
                    'no_auth_required': hasattr(method, '_no_auth_required'),
                    'filterable': hasattr(method, '_filterable'),
                    'filterable_schema': None,
                    'pass_application': hasattr(method, '_pass_app'),
                    'require_websocket': hasattr(method, '_pass_app') and not method._pass_app['rest'],
                    'job': hasattr(method, '_job'),
                    'downloadable': hasattr(method, '_job') and 'output' in method._job['pipes'],
                    'uploadable': hasattr(method, '_job') and 'input' in method._job['pipes'],
                    'require_pipes': hasattr(method, '_job') and method._job['check_pipes'] and any(
                        i in method._job['pipes'] for i in ('input', 'output')
                    ),
                }

            if is_service_class(svc, CRUDService):
                # FIXME: Find a better solution
                if f'{name}.create' in data:
                    data[f'{name}.query']['filterable_schema'] = data[f'{name}.create']['accepts'][0]
                else:
                    data[f'{name}.query']['filterable_schema'] = []

        return data
Exemplo n.º 22
0
class JailService(CRUDService):

    class Config:
        process_pool = True

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        # We want debug for jails starting/stopping
        os.environ['IOCAGE_DEBUG'] = 'TRUE'

    @filterable
    def query(self, filters=None, options=None):
        """
        Query all jails with `query-filters` and `query-options`.
        """
        options = options or {}
        jail_identifier = None
        jails = []

        if filters and len(filters) == 1 and list(
                filters[0][:2]) == ['host_hostuuid', '=']:
            jail_identifier = filters[0][2]

        recursive = False if jail_identifier == 'default' else True

        try:
            jail_dicts = ioc.IOCage(
                jail=jail_identifier).get('all', recursive=recursive)

            if jail_identifier == 'default':
                jail_dicts['host_hostuuid'] = 'default'
                jails.append(jail_dicts)
            else:
                for jail in jail_dicts:
                    jail = list(jail.values())[0]
                    jail['id'] = jail['host_hostuuid']
                    if jail['dhcp'] == 'on':
                        uuid = jail['host_hostuuid']

                        if jail['state'] == 'up':
                            interface = jail['interfaces'].split(',')[0].split(
                                ':')[0]
                            if interface == 'vnet0':
                                # Inside jails they are epair0b
                                interface = 'epair0b'
                            ip4_cmd = ['jexec', f'ioc-{uuid}', 'ifconfig',
                                       interface, 'inet']
                            try:
                                out = su.check_output(ip4_cmd)
                                out = out.splitlines()[2].split()[1].decode()
                                jail['ip4_addr'] = f'{interface}|{out}'
                            except (su.CalledProcessError, IndexError):
                                jail['ip4_addr'] = f'{interface}|ERROR'
                        else:
                            jail['ip4_addr'] = 'DHCP (not running)'

                    if jail['state'] == 'up':
                        try:
                            jail['jid'] = su.check_output(
                                [
                                    'jls', '-j',
                                    f'ioc-{jail["host_hostuuid"]}',
                                    'jid'
                                ]
                            ).decode().strip()
                        except su.CalledProcessError:
                            jail['jid'] = 'ERROR'
                    else:
                        jail['jid'] = None

                    jails.append(jail)
        except ioc_exceptions.JailMisconfigured as e:
            self.logger.error(e, exc_info=True)
        except BaseException:
            # Brandon is working on fixing this generic except, till then I
            # am not going to make the perfect the enemy of the good enough!
            self.logger.debug('Failed to get list of jails', exc_info=True)

        return filter_list(jails, filters, options)

    @accepts(
        Dict(
            "options",
            Str("release", required=True),
            Str("template"),
            Str("pkglist"),
            Str("uuid", required=True),
            Bool("basejail", default=False),
            Bool("empty", default=False),
            Bool("short", default=False),
            List("props", default=[]),
            Bool('https', default=True)
        )
    )
    async def do_create(self, options):
        """Creates a jail."""
        # Typically one would return the created jail's id in this
        # create call BUT since jail creation may or may not involve
        # fetching a release, which in turn could be time consuming
        # and could then block for a long time. This dictates that we
        # make it a job, but that violates the principle that CRUD methods
        # are not jobs as yet, so I settle on making this a wrapper around
        # the main job that calls this and return said job's id instead of
        # the created jail's id

        return await self.middleware.call('jail.create_job', options)

    @private
    @job(lock=lambda args: f'jail_create:{args[-1]["uuid"]}')
    def create_job(self, job, options):
        verrors = ValidationErrors()
        uuid = options["uuid"]

        job.set_progress(0, f'Creating: {uuid}')

        try:
            self.check_jail_existence(uuid, skip=False)

            verrors.add(
                'uuid',
                f'A jail with name {uuid} already exists'
            )
            raise verrors
        except CallError:
            # A jail does not exist with the provided name, we can create one
            # now

            verrors = self.common_validation(verrors, options)

            if verrors:
                raise verrors

            job.set_progress(20, 'Initial validation complete')

        iocage = ioc.IOCage(skip_jails=True)

        release = options["release"]
        template = options.get("template", False)
        pkglist = options.get("pkglist", None)
        basejail = options["basejail"]
        empty = options["empty"]
        short = options["short"]
        props = options["props"]
        pool = IOCJson().json_get_value("pool")
        iocroot = IOCJson(pool).json_get_value("iocroot")
        https = options.get('https', True)

        if template:
            release = template

        if (
                not os.path.isdir(f'{iocroot}/releases/{release}') and
                not template and
                not empty
        ):
            job.set_progress(50, f'{release} missing, calling fetch')
            self.middleware.call_sync(
                'jail.fetch', {"release": release, "https": https}, job=True
            )

        err, msg = iocage.create(
            release,
            props,
            0,
            pkglist,
            template=template,
            short=short,
            _uuid=uuid,
            basejail=basejail,
            empty=empty
        )

        if err:
            raise CallError(msg)

        job.set_progress(100, f'Created: {uuid}')

        return True

    @private
    def validate_ips(self, verrors, options, schema='options.props', exclude=None):
        for item in options['props']:
            for f in ('ip4_addr', 'ip6_addr'):
                # valid ip values can be
                # 1) none
                # 2) interface|accept_rtadv
                # 3) interface|ip/netmask
                # 4) interface|ip
                # 5) ip/netmask
                # 6) ip
                # 7) All the while making sure that the above can be mixed with each other using ","
                # we explicitly check these
                if f in item:
                    for ip in map(
                        lambda ip: ip.split('|', 1)[-1].split('/')[0],
                        filter(
                            lambda v: v != 'none' and v.split('|')[-1] != 'accept_rtadv',
                            item.split('=')[1].split(',')
                        )
                    ):
                        try:
                            IpInUse(self.middleware, exclude)(
                                ip
                            )
                        except ValueError as e:
                            verrors.add(
                                f'{schema}.{f}',
                                str(e)
                            )

    @accepts(Str("jail"), Dict(
             "options",
             Bool("plugin", default=False),
             additional_attrs=True,
             ))
    def do_update(self, jail, options):
        """Sets a jail property."""
        plugin = options.pop("plugin")
        _, _, iocage = self.check_jail_existence(jail)

        name = options.pop("name", None)

        verrors = ValidationErrors()

        jail = self.query([['id', '=', jail]], {'get': True})

        verrors = self.common_validation(verrors, options, True, jail)

        if name is not None and plugin:
            verrors.add('options.plugin',
                        'Cannot be true while trying to rename')

        if verrors:
            raise verrors

        for prop, val in options.items():
            p = f"{prop}={val}"

            try:
                iocage.set(p, plugin)
            except RuntimeError as err:
                raise CallError(err)

        if name:
            iocage.rename(name)

        return True

    @private
    def common_validation(self, verrors, options, update=False, jail=None):
        if not update:
            # Ensure that api call conforms to format set by iocage for props
            # Example 'key=value'

            for value in options['props']:
                if '=' not in value:
                    verrors.add(
                        'options.props',
                        'Please follow the format specified by iocage for api calls'
                        'e.g "key=value"'
                    )
                    break

            if verrors:
                raise verrors

            # normalise vnet mac address
            # expected format here is 'vnet0_mac=00-D0-56-F2-B5-12,00-D0-56-F2-B5-13'
            vnet_macs = {
                f.split('=')[0]: f.split('=')[1] for f in options['props']
                if any(f'vnet{i}_mac' in f.split('=')[0] for i in range(0, 4))
            }

            self.validate_ips(verrors, options)
        else:
            vnet_macs = {
                key: value for key, value in options.items()
                if any(f'vnet{i}_mac' in key for i in range(0, 4))
            }

            exclude_ips = [
                ip.split('|')[1].split('/')[0] if '|' in ip else ip.split('/')[0]
                for f in ('ip4_addr', 'ip6_addr') for ip in jail[f].split(',')
                if ip not in ('none', 'DHCP (not running)')
            ]

            self.validate_ips(
                verrors, {'props': [f'{k}={v}' for k, v in options.items()]},
                'options', exclude_ips
            )

        # validate vnetX_mac addresses
        for key, value in vnet_macs.items():
            if value and value != 'none':
                value = value.replace(',', ' ')
                try:
                    for mac in value.split():
                        MACAddr()(mac)

                    if (
                        len(value.split()) != 2 or
                        any(value.split().count(v) > 1 for v in value.split())
                    ):
                        raise ValueError('Exception')
                except ValueError:
                    verrors.add(
                        key,
                        'Please Enter two valid and different '
                        f'space/comma-delimited MAC addresses for {key}.'
                    )

        return verrors

    @accepts(Str("jail"))
    def do_delete(self, jail):
        """Takes a jail and destroys it."""
        _, _, iocage = self.check_jail_existence(jail)

        # TODO: Port children checking, release destroying.
        iocage.destroy_jail()

        return True

    @private
    def check_dataset_existence(self):
        try:
            IOCCheck(migrate=True)
        except ioc_exceptions.PoolNotActivated as e:
            raise CallError(e, errno=errno.ENOENT)

    @private
    def check_jail_existence(self, jail, skip=True, callback=None):
        """Wrapper for iocage's API, as a few commands aren't ported to it"""
        try:
            if callback is not None:
                iocage = ioc.IOCage(callback=callback,
                                    skip_jails=skip, jail=jail)
            else:
                iocage = ioc.IOCage(skip_jails=skip, jail=jail)
            jail, path = iocage.__check_jail_existence__()
        except (SystemExit, RuntimeError):
            raise CallError(f"jail '{jail}' not found!")

        return jail, path, iocage

    @accepts()
    def get_activated_pool(self):
        """Returns the activated pool if there is one, or None"""
        try:
            pool = ioc.IOCage(skip_jails=True).get('', pool=True)
        except (RuntimeError, SystemExit) as e:
            raise CallError(f'Error occurred getting activated pool: {e}')
        except (ioc_exceptions.PoolNotActivated, FileNotFoundError):
            self.check_dataset_existence()

            try:
                pool = ioc.IOCage(skip_jails=True).get('', pool=True)
            except ioc_exceptions.PoolNotActivated:
                pool = None

        return pool

    @accepts()
    async def interface_choices(self):
        """
        Returns a dictionary of interface choices which can be used with creating/updating jails.
        """
        return await self.middleware.call(
            'interface.choices', {
                'exclude': ['lo', 'pflog', 'pfsync', 'tun', 'tap', 'epair', 'vnet', 'bridge']
            }
        )

    @accepts(
        Dict(
            'options',
            Str('release'),
            Str('server', default='download.freebsd.org'),
            Str('user', default='anonymous'),
            Str('password', default='anonymous@'),
            Str('name', default=None, null=True),
            Str('jail_name', default=None, null=True),
            Bool('accept', default=True),
            Bool('https', default=True),
            List('props', default=[]),
            List(
                'files',
                default=['MANIFEST', 'base.txz', 'lib32.txz', 'doc.txz']
            ),
            Str('branch', default=None, null=True)
        )
    )
    @job(lock=lambda args: f"jail_fetch:{args[-1]}")
    def fetch(self, job, options):
        """Fetches a release or plugin."""
        fetch_output = {'install_notes': []}
        release = options.get('release', None)
        https = options.pop('https', False)
        name = options.pop('name')
        jail_name = options.pop('jail_name')

        post_install = False

        verrors = ValidationErrors()

        self.validate_ips(verrors, options)

        if verrors:
            raise verrors

        def progress_callback(content, exception):
            msg = content['message'].strip('\r\n')
            rel_up = f'* Updating {release} to the latest patch level... '
            nonlocal post_install

            if name is None:
                if 'Downloading : base.txz' in msg and '100%' in msg:
                    job.set_progress(5, msg)
                elif 'Downloading : lib32.txz' in msg and '100%' in msg:
                    job.set_progress(10, msg)
                elif 'Downloading : doc.txz' in msg and '100%' in msg:
                    job.set_progress(15, msg)
                elif 'Downloading : src.txz' in msg and '100%' in msg:
                    job.set_progress(20, msg)
                if 'Extracting: base.txz' in msg:
                    job.set_progress(25, msg)
                elif 'Extracting: lib32.txz' in msg:
                    job.set_progress(50, msg)
                elif 'Extracting: doc.txz' in msg:
                    job.set_progress(75, msg)
                elif 'Extracting: src.txz' in msg:
                    job.set_progress(90, msg)
                elif rel_up in msg:
                    job.set_progress(95, msg)
                else:
                    job.set_progress(None, msg)
            else:
                if post_install:
                    for split_msg in msg.split('\n'):
                        fetch_output['install_notes'].append(split_msg)

                if '  These pkgs will be installed:' in msg:
                    job.set_progress(50, msg)
                elif 'Installing plugin packages:' in msg:
                    job.set_progress(75, msg)
                elif 'Running post_install.sh' in msg:
                    job.set_progress(90, msg)
                    # Sets each message going forward as important to the user
                    post_install = True
                else:
                    job.set_progress(None, msg)

        self.check_dataset_existence()  # Make sure our datasets exist.
        start_msg = f'{release} being fetched'
        final_msg = f'{release} fetched'

        iocage = ioc.IOCage(callback=progress_callback, silent=False)

        if name is not None:
            pool = IOCJson().json_get_value('pool')
            iocroot = IOCJson(pool).json_get_value('iocroot')

            options["plugin_name"] = name
            start_msg = 'Starting plugin install'
            final_msg = f"Plugin: {name} installed"
        elif name is None and https:
            if 'https' not in options['server']:
                options['server'] = f'https://{options["server"]}'

        options["accept"] = True
        options['name'] = jail_name

        job.set_progress(0, start_msg)
        iocage.fetch(**options)

        if post_install and name is not None:
            plugin_manifest = pathlib.Path(
                f'{iocroot}/.plugin_index/{name}.json'
            )
            plugin_json = json.loads(plugin_manifest.read_text())
            schema_version = plugin_json.get('plugin_schema', '1')

            if schema_version.isdigit() and int(schema_version) >= 2:
                plugin_output = pathlib.Path(
                    f'{iocroot}/jails/{name}/root/root/PLUGIN_INFO'
                )

                if plugin_output.is_file():
                    # Otherwise it will be the verbose output from the
                    # post_install script
                    fetch_output['install_notes'] = [
                        x for x in plugin_output.read_text().split('\n') if x
                    ]

                    # This is to get the admin URL and such
                    fetch_output['install_notes'] += job.progress[
                        'description'].split('\n')

        job.set_progress(100, final_msg)

        return fetch_output

    @accepts(
        Str('resource', enum=['RELEASE', 'TEMPLATE', 'PLUGIN', 'BRANCHES']),
        Bool('remote', default=False),
        Bool('want_cache', default=True),
        Str('branch', default=None)
    )
    @job(lock=lambda args: args[0])
    def list_resource(self, job, resource, remote, want_cache, branch):
        """Returns a JSON list of the supplied resource on the host"""
        self.check_dataset_existence()  # Make sure our datasets exist.
        iocage = ioc.IOCage(skip_jails=True)
        resource = "base" if resource == "RELEASE" else resource.lower()

        if resource == "plugin":
            if remote:
                if want_cache:
                    try:
                        resource_list = self.middleware.call_sync(
                            'cache.get', 'iocage_remote_plugins')

                        return resource_list
                    except KeyError:
                        pass

                resource_list = iocage.fetch(list=True, plugins=True, header=False, branch=branch)
                try:
                    plugins_versions_data = self.middleware.call_sync('cache.get', 'iocage_plugin_versions')
                except KeyError:
                    plugins_versions_data_job = self.middleware.call_sync(
                        'core.get_jobs',
                        [['method', '=', 'jail.retrieve_plugin_versions'], ['state', '=', 'RUNNING']]
                    )
                    error = None
                    plugins_versions_data = {}
                    if plugins_versions_data_job:
                        try:
                            plugins_versions_data = self.middleware.call_sync(
                                'core.job_wait', plugins_versions_data_job[0]['id'], job=True
                            )
                        except CallError as e:
                            error = str(e)
                    else:
                        try:
                            plugins_versions_data = self.middleware.call_sync(
                                'jail.retrieve_plugin_versions', job=True
                            )
                        except Exception as e:
                            error = e

                    if error:
                        # Let's not make the failure fatal
                        self.middleware.logger.error(f'Retrieving plugins version failed: {error}')
            else:
                resource_list = iocage.list("all", plugin=True)
                pool = IOCJson().json_get_value("pool")
                iocroot = IOCJson(pool).json_get_value("iocroot")
                index_path = f'{iocroot}/.plugin_index/INDEX'
                plugin_jails = {
                    j['host_hostuuid']: j for j in self.middleware.call_sync(
                        'jail.query', [['type', 'in', ['plugin', 'pluginv2']]]
                    )
                }

                if not pathlib.Path(index_path).is_file():
                    index_json = None
                else:
                    index_fd = open(index_path, 'r')
                    index_json = json.load(index_fd)

            for index, plugin in enumerate(resource_list):

                if remote:
                    # In case of remote, "plugin" is going to be a dictionary
                    plugin.update({
                        k: plugins_versions_data.get(plugin['plugin'], {}).get(k, 'N/A')
                        for k in ('version', 'revision', 'epoch')
                    })
                else:
                    # "plugin" is a list which we will convert to a dictionary for readability
                    plugin_dict = {
                        k: v if v != '-' else None
                        for k, v in zip((
                            'jid', 'name', 'boot', 'state', 'type', 'release', 'ip4', 'ip6', 'template', 'admin_portal'
                        ), plugin)
                    }
                    plugin_output = pathlib.Path(
                        f'{iocroot}/jails/{plugin[1]}/root/root/PLUGIN_INFO'
                    )

                    if plugin_output.is_file():
                        plugin_info = [[
                            x for x in plugin_output.read_text().split(
                                '\n') if x
                        ]]
                    else:
                        plugin_info = None

                    plugin_name = plugin_jails[plugin_dict['name']]['plugin_name']
                    plugin_dict.update({
                        'plugin_info': plugin_info,
                        'plugin': plugin_name if plugin_name != 'none' else plugin_dict['name'],
                        **self.get_local_plugin_version(
                            plugin_name if plugin_name != 'none' else plugin_dict['name'],
                            index_json, iocroot, plugin_dict['name']
                        )
                    })

                    resource_list[index] = plugin_dict

            if remote:
                self.middleware.call_sync(
                    'cache.put', 'iocage_remote_plugins', resource_list,
                    86400
                )
            else:
                index_fd.close()
        elif resource == "base":
            try:
                if remote:
                    resource_list = self.middleware.call_sync(
                        'cache.get', 'iocage_remote_releases')

                    return resource_list
            except KeyError:
                pass

            resource_list = iocage.fetch(list=True, remote=remote, http=True)

            if remote:
                self.middleware.call_sync(
                    'cache.put', 'iocage_remote_releases', resource_list,
                    86400
                )
        elif resource == 'branches':
            official_branches = requests.get(
                'https://api.github.com/repos/freenas/iocage-ix-plugins/'
                'branches'
            )
            official_branches.raise_for_status()
            resource_list = [
                {'name': b['name'], 'repo': 'official'}
                for b in official_branches.json()
            ]
        else:
            resource_list = [
                {k: v if v != '-' else None for k, v in zip(('jid', 'name', 'state', 'release', 'ip4'), jail_data)}
                for jail_data in iocage.list(resource)
            ]

        return resource_list

    @periodic(interval=86400)
    @private
    @accepts(Str('branch', null=True, default=None))
    @job(lock='retrieve_plugin_versions')
    def retrieve_plugin_versions(self, job, branch=None):
        branch = branch or self.get_version()
        try:
            pool = self.get_activated_pool()
        except CallError:
            pool = None

        if pool:
            plugins = IOCPlugin(branch=branch).fetch_plugin_versions()
        else:
            with tempfile.TemporaryDirectory() as td:
                github_repo = 'https://github.com/freenas/iocage-ix-plugins.git'
                try:
                    IOCPlugin._clone_repo(branch, github_repo, td, depth=1)
                except Exception:
                    self.middleware.logger.error('Failed to clone iocage-ix-plugins repository.', exc_info=True)
                    return {}
                else:
                    plugins_index_data = IOCPlugin.retrieve_plugin_index_data(td)
                    plugins = IOCPlugin.fetch_plugin_versions_from_plugin_index(plugins_index_data)

        self.middleware.call_sync('cache.put', 'iocage_plugin_versions', plugins)
        return plugins

    @accepts(Str("action", enum=["START", "STOP", "RESTART"]))
    def rc_action(self, action):
        """Does specified action on rc enabled (boot=on) jails"""
        iocage = ioc.IOCage(rc=True)

        try:
            if action == "START":
                iocage.start()
            elif action == "STOP":
                iocage.stop()
            else:
                iocage.restart()
        except BaseException as e:
            raise CallError(str(e))

        return True

    @accepts(Str('jail'))
    @job(lock=lambda args: f'jail_start:{args[-1]}')
    def start(self, job, jail):
        """Takes a jail and starts it."""
        uuid, _, iocage = self.check_jail_existence(jail)
        status, _ = IOCList.list_get_jid(uuid)

        if not status:
            try:
                iocage.start()
            except BaseException as e:
                raise CallError(str(e))

        return True

    @accepts(Str("jail"), Bool('force', default=False))
    @job(lock=lambda args: f'jail_stop:{args[-1]}')
    def stop(self, job, jail, force):
        """Takes a jail and stops it."""
        uuid, _, iocage = self.check_jail_existence(jail)
        status, _ = IOCList.list_get_jid(uuid)

        if status:
            try:
                iocage.stop(force=force)
            except BaseException as e:
                raise CallError(str(e))

            return True

    @accepts(Str('jail'))
    @job(lock=lambda args: f"jail_restart:{args[-1]}")
    def restart(self, job, jail):
        """Takes a jail and restarts it."""
        uuid, _, iocage = self.check_jail_existence(jail)
        status, _ = IOCList.list_get_jid(uuid)

        if status:
            try:
                iocage.stop()
            except BaseException as e:
                raise CallError(str(e))

        try:
            iocage.start()
        except BaseException as e:
            raise CallError(str(e))

        return True

    @private
    def get_iocroot(self):
        pool = IOCJson().json_get_value("pool")
        return IOCJson(pool).json_get_value("iocroot")

    @accepts(
        Str("jail"),
        Dict(
            "options",
            Str(
                "action", enum=["ADD", "REMOVE", "REPLACE", "LIST"],
                required=True
            ),
            Str("source"),
            Str("destination"),
            Str("fstype", default='nullfs'),
            Str("fsoptions", default='ro'),
            Str("dump", default='0'),
            Str("pass", default='0'),
            Int("index", default=None),
        ))
    def fstab(self, jail, options):
        """Manipulate a jails fstab"""
        uuid, _, iocage = self.check_jail_existence(jail, skip=False)
        status, jid = IOCList.list_get_jid(uuid)
        action = options['action'].lower()
        index = options.get('index')

        if status and action != 'list':
            raise CallError(
                f'{jail} should not be running when adding a mountpoint')

        verrors = ValidationErrors()

        if action in ('add', 'replace', 'remove'):
            if action != 'remove' or index is None:
                # For remove we allow removing by index or mount, so if index is not specified
                # we should validate that rest of the fields exist.
                for f in ('source', 'destination', 'fstype', 'fsoptions', 'dump', 'pass'):
                    if not options.get(f):
                        verrors.add(
                            f'options.{f}',
                            f'This field is required with "{action}" action.'
                        )

            if action == 'replace' and index is None:
                verrors.add(
                    'options.index',
                    'Index cannot be "None" when replacing an fstab entry.'
                )

        verrors.check()

        source = options.get('source')
        if action in ('add', 'replace') and not os.path.exists(source):
            verrors.add(
                'options.source',
                'The provided path for the source does not exist.'
            )

        destination = options.get('destination')
        if destination:
            destination = f'/{destination}' if destination[0] != '/' else \
                destination
            dst = f'{self.get_iocroot()}/jails/{jail}/root'
            if dst not in destination:
                destination = f'{dst}{destination}'

            if os.path.exists(destination):
                if not os.path.isdir(destination):
                    verrors.add(
                        'options.destination',
                        'Destination is not a directory. Please provide a '
                        'empty directory for the destination.'
                    )
                elif os.listdir(destination):
                    verrors.add(
                        'options.destination',
                        'Destination directory must be empty.'
                    )
            else:
                os.makedirs(destination)

        # Setup defaults for library
        source = source or ''
        destination = destination or ''
        fstype = options.get('fstype')
        fsoptions = options.get('fsoptions')
        dump = options.get('dump')
        _pass = options.get('pass')

        if verrors:
            raise verrors

        try:
            _list = iocage.fstab(
                action, source, destination, fstype, fsoptions,
                dump, _pass, index=index
            )
        except ioc_exceptions.ValidationFailed as e:
            # CallError uses strings, the exception message may not always be a
            # list.
            if not isinstance(e.message, str) and isinstance(
                e.message,
                Iterable
            ):
                e.message = '\n'.join(e.message)

            self.logger.error(f'{e!r}')
            raise CallError(e.message)

        if action == "list":
            split_list = {}
            system_mounts = (
                '/root/bin', '/root/boot', '/root/lib', '/root/libexec',
                '/root/rescue', '/root/sbin', '/root/usr/bin',
                '/root/usr/include', '/root/usr/lib', '/root/usr/libexec',
                '/root/usr/sbin', '/root/usr/share', '/root/usr/libdata',
                '/root/usr/lib32'
            )

            for i in _list:
                fstab_entry = i[1].split()
                _fstab_type = 'SYSTEM' if fstab_entry[0].endswith(
                    system_mounts) else 'USER'

                split_list[i[0]] = {'entry': fstab_entry, 'type': _fstab_type}

            return split_list

        return True

    @accepts(Str("pool"))
    def activate(self, pool):
        """Activates a pool for iocage usage, and deactivates the rest."""
        zfs = libzfs.ZFS(history=True, history_prefix="<iocage>")
        pools = zfs.pools
        prop = "org.freebsd.ioc:active"
        activated = False

        for _pool in pools:
            if _pool.name == pool:
                ds = zfs.get_dataset(_pool.name)
                ds.properties[prop] = libzfs.ZFSUserProperty("yes")
                activated = True
            else:
                ds = zfs.get_dataset(_pool.name)
                ds.properties[prop] = libzfs.ZFSUserProperty("no")

        return activated

    @accepts(Str("ds_type", enum=["ALL", "JAIL", "TEMPLATE", "RELEASE"]))
    def clean(self, ds_type):
        """Cleans all iocage datasets of ds_type"""

        if ds_type == "JAIL":
            IOCClean().clean_jails()
        elif ds_type == "ALL":
            IOCClean().clean_all()
        elif ds_type == "TEMPLATE":
            IOCClean().clean_templates()

        return True

    @accepts(
        Str("jail"),
        List("command", required=True),
        Dict("options", Str("host_user", default="root"), Str("jail_user")))
    @job(lock=lambda args: f"jail_exec:{args[-1]}")
    def exec(self, job, jail, command, options):
        """Issues a command inside a jail."""
        _, _, iocage = self.check_jail_existence(jail, skip=False)

        host_user = options["host_user"]
        jail_user = options.get("jail_user", None)

        if isinstance(command[0], list):
            # iocage wants a flat list, not a list inside a list
            command = list(itertools.chain.from_iterable(command))

        # We may be getting ';', '&&' and so forth. Adding the shell for
        # safety.
        if len(command) == 1:
            command = ["/bin/sh", "-c"] + command

        host_user = "" if jail_user and host_user == "root" else host_user
        try:
            msg = iocage.exec(
                command, host_user, jail_user, start_jail=True, msg_return=True
            )
        except BaseException as e:
            raise CallError(str(e))

        return '\n'.join(msg)

    @accepts(
        Str("jail"),
        Bool("update_pkgs", default=False)
    )
    @job(lock=lambda args: f"jail_update:{args[-2]}")
    def update_to_latest_patch(self, job, jail, update_pkgs=False):
        """Updates specified jail to latest patch level."""
        job.set_progress(0, f'Updating {jail}')
        msg_queue = deque(maxlen=10)

        def progress_callback(content, exception):
            msg = content['message'].strip('\n')
            msg_queue.append(msg)
            final_msg = '\n'.join(msg_queue)

            if 'Inspecting system... done' in msg:
                job.set_progress(20)
            elif 'Preparing to download files... done.' in msg:
                job.set_progress(50)
            elif 'Applying patches... done.' in msg:
                job.set_progress(75)
            elif 'Installing updates... done.' in msg:
                job.set_progress(90)
            elif f'{jail} has been updated successfully' in msg:
                job.set_progress(100)

            job.set_progress(None, description=final_msg)

        _, _, iocage = self.check_jail_existence(
            jail,
            callback=progress_callback
        )
        iocage.update(update_pkgs)

        return True

    @accepts(
        Str("jail"),
        Dict("options",
             Str("release", required=False),
             Bool("plugin", default=False))
    )
    @job(lock=lambda args: f"jail_upgrade:{args[-1]}")
    def upgrade(self, job, jail, options):
        """Upgrades specified jail to specified RELEASE."""
        verrors = ValidationErrors()
        release = options.get('release', None)
        plugin = options['plugin']

        if release is None and not plugin:
            verrors.add(
                'options.release',
                'Must not be None if options.plugin is False.'
            )
            raise verrors

        job.set_progress(0, f'Upgrading {jail}')
        msg_queue = deque(maxlen=10)

        def progress_callback(content, exception):
            msg = content['message'].strip('\n')
            msg_queue.append(msg)
            final_msg = '\n'.join(msg_queue)

            if plugin:
                plugin_progress(job, msg)
            else:
                jail_progress(job, msg)

            job.set_progress(None, description=final_msg)

        def plugin_progress(job, msg):
            if 'Snapshotting' in msg:
                job.set_progress(20)
            elif 'Updating plugin INDEX' in msg:
                job.set_progress(40)
            elif 'Running upgrade' in msg:
                job.set_progress(70)
            elif 'Installing plugin packages' in msg:
                job.set_progress(90)
            elif f'{jail} successfully upgraded' in msg:
                job.set_progress(100)

        def jail_progress(job, msg):
            if 'Inspecting system' in msg:
                job.set_progress(20)
            elif 'Preparing to download files' in msg:
                job.set_progress(50)
            elif 'Applying patches' in msg:
                job.set_progress(75)
            elif 'Installing updates' in msg:
                job.set_progress(90)
            elif f'{jail} successfully upgraded' in msg:
                job.set_progress(100)

        _, _, iocage = self.check_jail_existence(
            jail,
            callback=progress_callback
        )
        iocage.upgrade(release=release)

        return True

    @accepts(Str("jail"))
    @job(lock=lambda args: f"jail_export:{args[-1]}")
    def export(self, job, jail):
        """Exports jail to zip file"""
        uuid, path, _ = self.check_jail_existence(jail)
        status, jid = IOCList.list_get_jid(uuid)
        started = False

        if status:
            self.middleware.call_sync('jail.stop', jail, job=True)
            started = True

        IOCImage().export_jail(uuid, path)

        if started:
            self.middleware.call_sync('jail.start', jail, job=True)

        return True

    @accepts(Str("jail"))
    @job(lock=lambda args: f"jail_import:{args[-1]}")
    def _import(self, job, jail):
        """Imports jail from zip file"""

        IOCImage().import_jail(jail)

        return True

    @private
    def get_local_plugin_version(self, plugin, index_json, iocroot, jail_name):
        """
        Checks the primary_pkg key in the INDEX with the pkg version
        inside the jail.
        """
        version = {k: 'N/A' for k in ('version', 'revision', 'epoch')}

        if index_json is None:
            return version

        try:
            base_plugin = plugin.rsplit('_', 1)[0]  # May have multiple
            primary_pkg = index_json[base_plugin].get('primary_pkg') or plugin

            # Since these are plugins, we don't want to spin them up just to
            # check a pkg, directly accessing the db is best in this case.
            db_rows = self.read_plugin_pkg_db(
                f'{iocroot}/jails/{jail_name}/root/var/db/pkg/local.sqlite',
                primary_pkg)

            for row in db_rows:
                row = list(row)
                if '/' not in primary_pkg:
                    row[1] = row[1].split('/', 1)[-1]
                    row[2] = row[2].split('/', 1)[-1]

                if primary_pkg == row[1] or primary_pkg == row[2]:
                    version = ioc_common.parse_package_name(f'{plugin}-{row[3]}')
                    break
        except (KeyError, sqlite3.OperationalError):
            pass

        return version

    @private
    def read_plugin_pkg_db(self, db, pkg):
        try:
            conn = sqlite3.connect(db)
        except sqlite3.Error as e:
            raise CallError(e)

        with conn:
            cur = conn.cursor()
            cur.execute(
                f'SELECT * FROM packages WHERE origin="{pkg}" OR name="{pkg}"'
            )

            rows = cur.fetchall()

            return rows

    @private
    def start_on_boot(self):
        self.logger.debug('Starting jails on boot: PENDING')
        ioc.IOCage(rc=True).start()
        self.logger.debug('Starting jails on boot: SUCCESS')

        return True

    @private
    def stop_on_shutdown(self):
        self.logger.debug('Stopping jails on shutdown: PENDING')
        ioc.IOCage(rc=True).stop()
        self.logger.debug('Stopping jails on shutdown: SUCCESS')

        return True

    @private
    async def terminate(self):
        await SHUTDOWN_LOCK.acquire()

    @private
    def get_version(self):
        """
        Uses system.version and parses it out for the RELEASE branch we need
        """
        r = os.uname().release
        version = f'{round(float(r.split("-")[0]), 1)}-RELEASE'

        return version
Exemplo n.º 23
0
class BackupB2Service(Service):
    class Config:
        namespace = 'backup.b2'

    def __get_auth(self, id):
        credential = self.middleware.call_sync('datastore.query',
                                               'system.cloudcredentials',
                                               [('id', '=', id)],
                                               {'get': True})

        r = requests.get(
            'https://api.backblazeb2.com/b2api/v1/b2_authorize_account',
            auth=(credential['attributes'].get('account_id'),
                  credential['attributes'].get('app_key')),
        )
        if r.status_code != 200:
            raise ValueError(f'Invalid request: {r.text}')
        return r.json()

    @accepts(Int('id'))
    def get_buckets(self, id):
        """Returns buckets from a given B2 credential."""
        auth = self.__get_auth(id)
        r = requests.post(
            f'{auth["apiUrl"]}/b2api/v1/b2_list_buckets',
            headers={
                'Authorization': auth['authorizationToken'],
                'Content-Type': 'application/json',
            },
            data=json.dumps({'accountId': auth['accountId']}),
        )
        if r.status_code != 200:
            raise CallError(f'Invalid B2 request: [{r.status_code}] {r.text}')
        return r.json()['buckets']

    @private
    async def sync(self, job, backup, credential):
        # Use a temporary file to store rclone file
        with tempfile.NamedTemporaryFile(mode='w+') as f:
            # Make sure only root can read it as there is sensitive data
            os.chmod(f.name, 0o600)

            f.write(
                textwrap.dedent("""
                [remote]
                type = b2
                env_auth = false
                account = {account}
                key = {key}
                endpoint =
                """).format(
                    account=credential['attributes']['account_id'],
                    key=credential['attributes']['app_key'],
                ))
            f.flush()

            args = [
                '/usr/local/bin/rclone',
                '--config',
                f.name,
                '-v',
                '--stats',
                '1s',
                'sync',
            ]

            remote_path = 'remote:{}{}'.format(
                backup['attributes']['bucket'],
                '/{}'.format(backup['attributes']['folder'])
                if backup['attributes'].get('folder') else '',
            )

            if backup['direction'] == 'PUSH':
                args.extend([backup['path'], remote_path])
            else:
                args.extend([remote_path, backup['path']])

            proc = await Popen(
                args,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
            )
            check_task = asyncio.ensure_future(rclone_check_progress(
                job, proc))
            await proc.wait()
            if proc.returncode != 0:
                await asyncio.wait_for(check_task, None)
                raise ValueError('rclone failed: {}'.format(
                    check_task.result()))
            return True
Exemplo n.º 24
0
class LDAPService(ConfigService):
    class Config:
        service = "ldap"
        datastore = 'directoryservice.ldap'
        datastore_extend = "ldap.ldap_extend"
        datastore_prefix = "ldap_"
        cli_namespace = "directory_service.ldap"

    @private
    async def ldap_extend(self, data):
        data['hostname'] = data['hostname'].split(
            ',') if data['hostname'] else []
        for key in ["ssl", "schema"]:
            data[key] = data[key].upper()

        if data["certificate"] is not None:
            data["cert_name"] = data['certificate']['cert_name']
            data["certificate"] = data['certificate']['id']
        else:
            data["cert_name"] = None

        if data["kerberos_realm"] is not None:
            data["kerberos_realm"] = data["kerberos_realm"]["id"]

        data['uri_list'] = await self.hostnames_to_uris(data)

        return data

    @private
    async def ldap_compress(self, data):
        data['hostname'] = ','.join(data['hostname'])
        for key in ["ssl", "schema"]:
            data[key] = data[key].lower()

        if not data['bindpw']:
            data.pop('bindpw')

        data.pop('uri_list')
        data.pop('cert_name')

        return data

    @accepts()
    async def schema_choices(self):
        """
        Returns list of available LDAP schema choices.
        """
        return await self.middleware.call('directoryservices.nss_info_choices',
                                          'LDAP')

    @accepts()
    async def ssl_choices(self):
        """
        Returns list of SSL choices.
        """
        return await self.middleware.call('directoryservices.ssl_choices',
                                          'LDAP')

    @private
    async def hostnames_to_uris(self, data):
        ret = []
        for h in data['hostname']:
            proto = 'ldaps' if SSL(data['ssl']) == SSL.USESSL else 'ldap'
            parsed = urlparse(f"{proto}://{h}")
            try:
                port = parsed.port
                host = parsed.netloc if not parsed.port else parsed.netloc.rsplit(
                    ':', 1)[0]
            except ValueError:
                """
                ParseResult.port will raise a ValueError if the port is not an int
                Ignore for now. ValidationError will be raised in common_validate()
                """
                host, port = h.rsplit(':', 1)

            if port is None:
                port = 636 if SSL(data['ssl']) == SSL.USESSL else 389

            uri = f"{proto}://{host}:{port}"
            ret.append(uri)

        return ret

    @private
    async def common_validate(self, new, old, verrors):
        if not new["enable"]:
            return

        if new["certificate"]:
            verrors.extend(await self.middleware.call(
                "certificate.cert_services_validation", new["certificate"],
                "ldap_update.certificate", False))

        if not new["bindpw"] and new["has_samba_schema"]:
            verrors.add(
                "ldap_update.bindpw",
                "Bind credentials are required in order to use samba schema.")

        if not new["bindpw"] and not new["kerberos_principal"] and not new[
                "anonbind"]:
            verrors.add(
                "ldap_update.binddn",
                "Bind credentials or kerberos keytab are required for an authenticated bind."
            )
        if new["bindpw"] and new["kerberos_principal"]:
            self.logger.warning(
                "Simultaneous keytab and password authentication "
                "are selected. Clearing LDAP bind password.")
            new["bindpw"] = ""

        if not new["basedn"]:
            verrors.add("ldap_update.basedn",
                        "The basedn parameter is required.")
        if not new["hostname"]:
            verrors.add("ldap_update.hostname",
                        "The LDAP hostname parameter is required.")
        for idx, uri in enumerate(new["uri_list"]):
            parsed = urlparse(uri)
            try:
                port = parsed.port

            except ValueError:
                verrors.add(f"ldap_update.hostname.{idx}",
                            f"Invalid port number: [{port}].")

    @private
    async def convert_ldap_err_to_verr(self, data, e, verrors):
        if e.extra == "INVALID_CREDENTIALS":
            verrors.add(
                'ldap_update.binddn',
                'Remote LDAP server returned response that '
                'credentials are invalid.')

        elif e.extra == "STRONG_AUTH_NOT_SUPPORTED" and data['certificate']:
            verrors.add(
                'ldap_update.certificate',
                'Certificate-based authentication is not '
                f'supported by remote LDAP server: {e.errmsg}.')

        elif e.extra == "NO_SUCH_OBJECT":
            verrors.add(
                'ldap_update.basedn',
                'Remote LDAP server returned "NO_SUCH_OBJECT". This may '
                'indicate that the base DN is syntactically correct, but does '
                'not exist on the server.')

        elif e.extra == "INVALID_DN_SYNTAX":
            verrors.add(
                'ldap_update.basedn',
                'Remote LDAP server returned that the base DN is '
                'syntactically invalid.')

        elif e.extra:
            verrors.add('ldap_update', f'[{e.extra.__name__}]: {e.errmsg}')

        else:
            verrors.add('ldap_update', e.errmsg)

    @private
    async def ldap_validate(self, data, verrors):
        ldap_has_samba_schema = False

        for idx, h in enumerate(data['uri_list']):
            host, port = urlparse(h).netloc.rsplit(':', 1)
            try:
                await self.middleware.call('ldap.port_is_listening', host,
                                           int(port), data['dns_timeout'])
            except Exception as e:
                verrors.add(
                    f'ldap_update.hostname.{idx}',
                    f'Failed to open socket to remote LDAP server: {e}')
                return

        try:
            await self.middleware.call('ldap.validate_credentials', data)
        except CallError as e:
            await self.convert_ldap_err_to_verr(data, e, verrors)
            return

        try:
            ldap_has_samba_schema = True if (await self.middleware.call(
                'ldap.get_workgroup', data)) else False
        except CallError as e:
            await self.convert_ldap_err_to_verr(data, e, verrors)

        if data['has_samba_schema'] and not ldap_has_samba_schema:
            verrors.add(
                'ldap_update.has_samba_schema',
                'Remote LDAP server does not have Samba schema extensions.')

    @accepts(
        Dict('ldap_update',
             List('hostname', required=True),
             Str('basedn', required=True),
             Str('binddn'),
             Str('bindpw', private=True),
             Bool('anonbind', default=False),
             Str('ssl', default='OFF', enum=['OFF', 'ON', 'START_TLS']),
             Int('certificate', null=True),
             Bool('validate_certificates', default=True),
             Bool('disable_freenas_cache'),
             Int('timeout', default=30),
             Int('dns_timeout', default=5),
             Int('kerberos_realm', null=True),
             Str('kerberos_principal'),
             Bool('has_samba_schema', default=False),
             Str('auxiliary_parameters', default=False, max_length=None),
             Str('schema', default='RFC2307', enum=['RFC2307', 'RFC2307BIS']),
             Bool('enable'),
             update=True))
    async def do_update(self, data):
        """
        `hostname` list of ip addresses or hostnames of LDAP servers with
        which to communicate in order of preference. Failover only occurs
        if the current LDAP server is unresponsive.

        `basedn` specifies the default base DN to use when performing ldap
        operations. The base must be specified as a Distinguished Name in LDAP
        format.

        `binddn` specifies the default bind DN to use when performing ldap
        operations. The bind DN must be specified as a Distinguished Name in
        LDAP format.

        `anonbind` use anonymous authentication.

        `ssl` establish SSL/TLS-protected connections to the LDAP server(s).
        GSSAPI signing is disabled on SSL/TLS-protected connections if
        kerberos authentication is used.

        `certificate` LDAPs client certificate to be used for certificate-
        based authentication.

        `validate_certificates` specifies whether to perform checks on server
        certificates in a TLS session. If enabled, TLS_REQCERT demand is set.
        The server certificate is requested. If no certificate is provided or
        if a bad certificate is provided, the session is immediately terminated.
        If disabled, TLS_REQCERT allow is set. The server certificate is
        requested, but all errors are ignored.

        `kerberos_realm` in which the server is located. This parameter is
        only required for SASL GSSAPI authentication to the remote LDAP server.

        `kerberos_principal` kerberos principal to use for SASL GSSAPI
        authentication to the remote server. If `kerberos_realm` is specified
        without a keytab, then the `binddn` and `bindpw` are used to
        perform to obtain the ticket necessary for GSSAPI authentication.

        `timeout` specifies  a  timeout  (in  seconds) after which calls to
        synchronous LDAP APIs will abort if no response is received.

        `dns_timeout` specifies the timeout (in seconds) after which the
        poll(2)/select(2) following a connect(2) returns in case of no activity
        for openldap. For nslcd this specifies the time limit (in seconds) to
        use when connecting to the directory server. This directly impacts the
        length of time that the LDAP service tries before failing over to
        a secondary LDAP URI.

        `has_samba_schema` determines whether to configure samba to use the
        ldapsam passdb backend to provide SMB access to LDAP users. This feature
        requires the presence of Samba LDAP schema extensions on the remote
        LDAP server.
        """
        verrors = ValidationErrors()
        must_reload = False
        old = await self.config()
        new = old.copy()
        new.update(data)
        new['uri_list'] = await self.hostnames_to_uris(new)
        await self.common_validate(new, old, verrors)
        verrors.check()

        if data.get(
                'certificate') and data['certificate'] != old['certificate']:
            new_cert = await self.middleware.call(
                'certificate.query', [('id', '=', data['certificate'])],
                {'get': True})
            new['cert_name'] = new_cert['name']

        if old != new:
            must_reload = True
            if new['enable']:
                await self.middleware.call('ldap.ldap_validate', new, verrors)
                verrors.check()

        await self.ldap_compress(new)
        await self.middleware.call('datastore.update', 'directoryservice.ldap',
                                   old['id'], new, {'prefix': 'ldap_'})

        if must_reload:
            if new['enable']:
                await self.middleware.call('ldap.start')
            else:
                await self.middleware.call('ldap.stop')

        return await self.config()

    @private
    def port_is_listening(self, host, port, timeout=1):
        ret = False

        try:
            ipaddress.IPv6Address(host)
            s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
        except ipaddress.AddressValueError:
            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

        if timeout:
            s.settimeout(timeout)

        try:
            s.connect((host, port))
            ret = True

        except Exception as e:
            raise CallError(e)

        finally:
            s.close()

        return ret

    @private
    @job(lock="ldapquery")
    def do_ldap_query(self, job, ldap_conf, action, args):
        supported_actions = [
            'get_samba_domains',
            'get_root_DSE',
            'get_dn',
            'validate_credentials',
        ]
        if action not in supported_actions:
            raise CallError(f"Unsuported LDAP query: {action}")

        if ldap_conf is None:
            ldap_conf = self.middleware.call_sync('ldap.config')

        with LDAPQuery(conf=ldap_conf,
                       logger=self.logger,
                       hosts=ldap_conf['uri_list']) as LDAP:
            if action == "get_samba_domains":
                ret = LDAP.get_samba_domains()

            elif action == "get_root_DSE":
                ret = LDAP.get_root_DSE()

            elif action == "get_dn":
                dn = ldap_conf['basedn'] if args is None else args
                ret = LDAP.get_dn(dn)

            elif action == "validate_credentials":
                ret = LDAP.validate_credentials()

        return ret

    @private
    def validate_credentials(self, ldap_config=None):
        ldap_job = self.middleware.call_sync("ldap.do_ldap_query", ldap_config,
                                             "validate_credentials", None)
        ret = ldap_job.wait_sync(raise_error=True)
        return ret

    @private
    def get_samba_domains(self, ldap_config=None):
        ldap_job = self.middleware.call_sync("ldap.do_ldap_query", ldap_config,
                                             "get_samba_domains", None)
        ret = ldap_job.wait_sync(raise_error=True)
        return ret

    @private
    def get_root_DSE(self, ldap_config=None):
        """
        root DSE is defined in RFC4512, and must include the following:

        `namingContexts` naming contexts held in the LDAP sever

        `subschemaSubentry` subschema entries known by the LDAP server

        `altServer` alternative servers in case this one is unavailable

        `supportedExtension` list of supported extended operations

        `supportedControl` list of supported controls

        `supportedSASLMechnaisms` recognized Simple Authentication and Security layers
        (SASL) [RFC4422] mechanisms.

        `supportedLDAPVersion` LDAP versions implemented by the LDAP server

        In practice, this full data is not returned from many LDAP servers
        """
        ldap_job = self.middleware.call_sync("ldap.do_ldap_query", ldap_config,
                                             "get_root_DSE", None)
        ret = ldap_job.wait_sync(raise_error=True)
        return ret

    @private
    def get_dn(self, dn=None, ldap_config=None):
        """
        Outputs contents of specified DN in JSON. By default will target the basedn.
        """
        ldap_job = self.middleware.call_sync("ldap.do_ldap_query", ldap_config,
                                             "get_dn", dn)
        ret = ldap_job.wait_sync(raise_error=True)
        return ret

    @private
    async def started(self):
        """
        Returns False if disabled, True if healthy, raises exception if faulted.
        """
        verrors = ValidationErrors()
        ldap = await self.config()
        if not ldap['enable']:
            return False

        await self.common_validate(ldap, ldap, verrors)
        try:
            verrors.check()
        except Exception:
            await self.middleware.call('datastore.update',
                                       'directoryservice.ldap', ldap['id'],
                                       {'ldap_enable': False})
            raise CallError(
                'Automatically disabling LDAP service due to invalid configuration.',
                errno.EINVAL)
        """
        Initialize state to "JOINING" until after booted.
        """
        if not await self.middleware.call('system.ready'):
            await self.set_state(DSStatus['JOINING'])
            return True

        try:
            await asyncio.wait_for(self.middleware.call(
                'ldap.get_root_DSE', ldap),
                                   timeout=ldap['timeout'])
        except asyncio.TimeoutError:
            raise CallError(
                f'LDAP status check timed out after {ldap["timeout"]} seconds.',
                errno.ETIMEDOUT)

        except Exception as e:
            raise CallError(e)

        try:
            cached_state = await self.middleware.call('cache.get', 'DS_STATE')

            if cached_state['ldap'] != 'HEALTHY':
                await self.set_state(DSStatus['HEALTHY'])
        except KeyError:
            await self.set_state(DSStatus['HEALTHY'])

        return True

    @private
    async def get_workgroup(self, ldap=None):
        ret = None
        smb = await self.middleware.call('smb.config')
        if ldap is None:
            ldap = await self.config()

        try:
            ret = await asyncio.wait_for(self.middleware.call(
                'ldap.get_samba_domains', ldap),
                                         timeout=ldap['timeout'])
        except asyncio.TimeoutError:
            raise CallError(
                f'ldap.get_workgroup timed out after {ldap["timeout"]} seconds.',
                errno.ETIMEDOUT)

        if len(ret) > 1:
            self.logger.warning(
                'Multiple Samba Domains detected in LDAP environment '
                'auto-configuration of workgroup map have failed: %s', ret)

        ret = ret[0]['data']['sambaDomainName'][0] if ret else []

        if ret and smb['workgroup'] != ret:
            self.logger.debug(
                f'Updating SMB workgroup to match the LDAP domain name [{ret}]'
            )
            await self.middleware.call('datastore.update', 'services.cifs',
                                       smb['id'], {'cifs_srv_workgroup': ret})

        return ret

    @private
    async def set_state(self, state):
        return await self.middleware.call('directoryservices.set_state',
                                          {'ldap': state.name})

    @accepts()
    async def get_state(self):
        """
        Wrapper function for 'directoryservices.get_state'. Returns only the state of the
        LDAP service.
        """
        return (await
                self.middleware.call('directoryservices.get_state'))['ldap']

    @private
    def get_nslcd_status(self):
        """
        Returns internal nslcd state. nslcd will preferentially use the first LDAP server,
        and only failover if the current LDAP server is unreachable.
        """
        with NslcdClient(NlscdConst.NSLCD_ACTION_STATE_GET.value) as ctx:
            while ctx.get_response() == NlscdConst.NSLCD_RESULT_BEGIN.value:
                nslcd_status = ctx.read_string()

        return nslcd_status

    @private
    async def nslcd_cmd(self, cmd):
        nslcd = await run(['service', 'nslcd', cmd], check=False)
        if nslcd.returncode != 0:
            raise CallError(
                f'nslcd failed to {cmd} with errror: {nslcd.stderr.decode()}',
                errno.EFAULT)

    @private
    async def nslcd_status(self):
        nslcd = await run(['service', 'nslcd', 'onestatus'], check=False)
        return True if nslcd.returncode == 0 else False

    @private
    async def start(self):
        """
        Refuse to start service if the service is alreading in process of starting or stopping.
        If state is 'HEALTHY' or 'FAULTED', then stop the service first before restarting it to ensure
        that the service begins in a clean state.
        """
        ldap = await self.config()

        ldap_state = await self.middleware.call('ldap.get_state')
        if ldap_state in ['LEAVING', 'JOINING']:
            raise CallError(
                f'LDAP state is [{ldap_state}]. Please wait until directory service operation completes.',
                errno.EBUSY)

        await self.middleware.call('datastore.update', self._config.datastore,
                                   ldap['id'], {'ldap_enable': True})
        if ldap['kerberos_realm']:
            await self.middleware.call('kerberos.start')

        await self.middleware.call('etc.generate', 'rc')
        await self.middleware.call('etc.generate', 'nss')
        await self.middleware.call('etc.generate', 'ldap')
        await self.middleware.call('etc.generate', 'pam')

        if not await self.nslcd_status():
            await self.nslcd_cmd('start')
        else:
            await self.nslcd_cmd('restart')

        if ldap['has_samba_schema']:
            await self.middleware.call('etc.generate', 'smb')
            await self.middleware.call('smb.store_ldap_admin_password')
            await self.middleware.call('service.restart', 'cifs')
            await self.middleware.call('smb.set_passdb_backend', 'ldapsam')

        await self.set_state(DSStatus['HEALTHY'])
        await self.middleware.call('ldap.fill_cache')

    @private
    async def stop(self):
        ldap = await self.config()
        await self.middleware.call('datastore.update', self._config.datastore,
                                   ldap['id'], {'ldap_enable': False})
        await self.set_state(DSStatus['LEAVING'])
        await self.middleware.call('etc.generate', 'rc')
        await self.middleware.call('etc.generate', 'nss')
        await self.middleware.call('etc.generate', 'ldap')
        await self.middleware.call('etc.generate', 'pam')
        if ldap['has_samba_schema']:
            await self.middleware.call('etc.generate', 'smb')
            await self.middleware.call('service.restart', 'cifs')
            await self.middleware.call('smb.synchronize_passdb')
            await self.middleware.call('smb.synchronize_group_mappings')
            await self.middleware.call('smb.set_passdb_backend', 'tdbsam')
        await self.middleware.call('cache.pop', 'LDAP_cache')
        await self.nslcd_cmd('stop')
        await self.set_state(DSStatus['DISABLED'])

    @private
    @job(lock='fill_ldap_cache')
    def fill_cache(self, job, force=False):
        user_next_index = group_next_index = 100000000
        cache_data = {'users': {}, 'groups': {}}

        if self.middleware.call_sync('cache.has_key',
                                     'LDAP_cache') and not force:
            raise CallError(
                'LDAP cache already exists. Refusing to generate cache.')

        self.middleware.call_sync('cache.pop', 'LDAP_cache')

        if (self.middleware.call_sync('ldap.config'))['disable_freenas_cache']:
            self.middleware.call_sync('cache.put', 'LDAP_cache', cache_data)
            self.logger.debug('LDAP cache is disabled. Bypassing cache fill.')
            return

        pwd_list = pwd.getpwall()
        grp_list = grp.getgrall()

        local_uid_list = list(u['uid']
                              for u in self.middleware.call_sync('user.query'))
        local_gid_list = list(
            g['gid'] for g in self.middleware.call_sync('group.query'))

        for u in pwd_list:
            is_local_user = True if u.pw_uid in local_uid_list else False
            if is_local_user:
                continue

            cache_data['users'].update({
                u.pw_name: {
                    'id': user_next_index,
                    'uid': u.pw_uid,
                    'username': u.pw_name,
                    'unixhash': None,
                    'smbhash': None,
                    'group': {},
                    'home': '',
                    'shell': '',
                    'full_name': u.pw_gecos,
                    'builtin': False,
                    'email': '',
                    'password_disabled': False,
                    'locked': False,
                    'sudo': False,
                    'microsoft_account': False,
                    'attributes': {},
                    'groups': [],
                    'sshpubkey': None,
                    'local': False
                }
            })
            user_next_index += 1

        for g in grp_list:
            is_local_user = True if g.gr_gid in local_gid_list else False
            if is_local_user:
                continue

            cache_data['groups'].update({
                g.gr_name: {
                    'id': group_next_index,
                    'gid': g.gr_gid,
                    'group': g.gr_name,
                    'builtin': False,
                    'sudo': False,
                    'users': [],
                    'local': False
                }
            })
            group_next_index += 1

        self.middleware.call_sync('cache.put', 'LDAP_cache', cache_data)
        self.middleware.call_sync('dscache.backup')

    @private
    async def get_cache(self):
        if not await self.middleware.call('cache.has_key', 'LDAP_cache'):
            await self.middleware.call('ldap.fill_cache')
            self.logger.debug('cache fill is in progress.')
            return {'users': {}, 'groups': {}}
        return await self.middleware.call('cache.get', 'LDAP_cache')
Exemplo n.º 25
0
class BackupService(CRUDService):
    @filterable
    async def query(self, filters=None, options=None):
        return await self.middleware.call('datastore.query', 'tasks.cloudsync',
                                          filters, options)

    async def _clean_credential(self, data):
        credential = await self.middleware.call(
            'datastore.query', 'system.cloudcredentials',
            [('id', '=', data['credential'])], {'get': True})
        assert credential is not None

        if credential['provider'] == 'AMAZON':
            data['attributes']['region'] = await self.middleware.call(
                'backup.s3.get_bucket_location', credential['id'],
                data['attributes']['bucket'])
        elif credential['provider'] in ('BACKBLAZE', 'GCLOUD'):
            #  BACKBLAZE|GCLOUD does not need validation nor new data at this stage
            pass
        else:
            raise NotImplementedError('Invalid provider: {}'.format(
                credential['provider']))

    @accepts(
        Dict(
            'backup',
            Str('description'),
            Str('direction', enum=['PUSH', 'PULL']),
            Str('path'),
            Int('credential'),
            Str('minute'),
            Str('hour'),
            Str('daymonth'),
            Str('dayweek'),
            Str('month'),
            Dict('attributes', additional_attrs=True),
            Bool('enabled'),
            register=True,
        ))
    async def do_create(self, data):
        """
        Creates a new backup entry.

        .. examples(websocket)::

          Create a new backup using amazon s3 attributes, which is supposed to run every hour.

            :::javascript
            {
              "id": "6841f242-840a-11e6-a437-00e04d680384",
              "msg": "method",
              "method": "backup.create",
              "params": [{
                "description": "s3 sync",
                "path": "/mnt/tank",
                "credential": 1,
                "minute": "00",
                "hour": "*",
                "daymonth": "*",
                "month": "*",
                "attributes": {
                  "bucket": "mybucket",
                  "folder": ""
                },
                "enabled": true
              }]
            }
        """
        await self._clean_credential(data)
        pk = await self.middleware.call('datastore.insert', 'tasks.cloudsync',
                                        data)
        await self.middleware.call('notifier.restart', 'cron')
        return pk

    @accepts(Int('id'),
             Patch('backup', 'backup_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        """
        Updates the backup entry `id` with `data`.
        """
        backup = await self.middleware.call(
            'datastore.query',
            'tasks.cloudsync',
            [('id', '=', id)],
            {'get': True},
        )
        assert backup is not None
        # credential is a foreign key for now
        if backup['credential']:
            backup['credential'] = backup['credential']['id']

        backup.update(data)
        await self._clean_credential(backup)
        await self.middleware.call('datastore.update', 'tasks.cloudsync', id,
                                   backup)
        await self.middleware.call('notifier.restart', 'cron')

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Deletes backup entry `id`.
        """
        await self.middleware.call('datastore.delete', 'tasks.cloudsync', id)
        await self.middleware.call('notifier.restart', 'cron')

    @item_method
    @accepts(Int('id'))
    @job(lock=lambda args: 'backup:{}'.format(args[-1]))
    async def sync(self, job, id):
        """
        Run the backup job `id`, syncing the local data to remote.
        """

        backup = await self.middleware.call('datastore.query',
                                            'tasks.cloudsync',
                                            [('id', '=', id)], {'get': True})
        if not backup:
            raise ValueError("Unknown id")

        credential = await self.middleware.call(
            'datastore.query', 'system.cloudcredentials',
            [('id', '=', backup['credential']['id'])], {'get': True})
        if not credential:
            raise ValueError("Backup credential not found.")

        if credential['provider'] == 'AMAZON':
            return await self.middleware.call('backup.s3.sync', job, backup,
                                              credential)
        elif credential['provider'] == 'BACKBLAZE':
            return await self.middleware.call('backup.b2.sync', job, backup,
                                              credential)
        elif credential['provider'] == 'GCLOUD':
            return await self.middleware.call('backup.gcs.sync', job, backup,
                                              credential)
        else:
            raise NotImplementedError('Unsupported provider: {}'.format(
                credential['provider']))
Exemplo n.º 26
0
class SharingCIFSService(CRUDService):
    class Config:
        namespace = 'sharing.cifs'
        datastore = 'sharing.cifs_share'
        datastore_prefix = 'cifs_'
        datastore_extend = 'sharing.cifs.extend'

    @accepts(
        Dict('sharingcifs_create',
             Str('path'),
             Bool('home', default=False),
             Str('name'),
             Str('comment'),
             Bool('ro', default=False),
             Bool('browsable', default=True),
             Bool('recyclebin', default=False),
             Bool('showhiddenfiles', default=False),
             Bool('guestok', default=False),
             Bool('guestonly', default=False),
             Bool('abe', default=False),
             List('hostsallow', items=[IPAddr('ip', cidr=True)]),
             List('hostsdeny', items=[IPAddr('ip', cidr=True)]),
             List('vfsobjects',
                  default=['zfs_space', 'zfsacl', 'streams_xattr']),
             Int('storage_task'),
             Str('auxsmbconf'),
             Bool('default_permissions'),
             register=True))
    async def do_create(self, data):
        verrors = ValidationErrors()
        path = data['path']

        default_perms = data.pop('default_permissions', True)

        await self.clean(data, 'sharingcifs_create', verrors)
        await self.validate(data, 'sharingcifs_create', verrors)

        await check_path_resides_within_volume(verrors, self.middleware,
                                               "sharingcifs_create.path", path)

        if verrors:
            raise verrors

        if path and not os.path.exists(path):
            try:
                os.makedirs(path)
            except OSError as e:
                raise CallError(f'Failed to create {path}: {e}')

        await self.compress(data)
        await self.set_storage_tasks(data)
        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})
        await self.extend(data)

        await self.middleware.call('service.reload', 'cifs')
        await self.apply_default_perms(default_perms, path)

        return data

    @accepts(Int('id'),
             Patch('sharingcifs_create', 'sharingcifs_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        verrors = ValidationErrors()
        path = data['path']
        default_perms = data.pop('default_permissions', False)

        old = await self.middleware.call(
            'datastore.query', self._config.datastore, [('id', '=', id)], {
                'extend': self._config.datastore_extend,
                'prefix': self._config.datastore_prefix,
                'get': True
            })

        new = old.copy()
        new.update(data)

        await self.clean(new, 'sharingcifs_update', verrors, id=id)
        await self.validate(new, 'sharingcifs_update', verrors, old=old)

        await check_path_resides_within_volume(verrors, self.middleware,
                                               "sharingcifs_update.path", path)

        if verrors:
            raise verrors

        if path and not os.path.exists(path):
            try:
                os.makedirs(path)
            except OSError as e:
                raise CallError(f'Failed to create {path}: {e}')

        await self.compress(new)
        await self.set_storage_tasks(new)
        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})
        await self.extend(new)

        await self.middleware.call('service.reload', 'cifs')
        await self.apply_default_perms(default_perms, path)

        return new

    @accepts(Int('id'))
    async def do_delete(self, id):
        return await self.middleware.call('datastore.delete',
                                          self._config.datastore, id)

    @private
    async def clean(self, data, schema_name, verrors, id=None):
        data['name'] = await self.name_exists(data, schema_name, verrors, id)

    @private
    async def validate(self, data, schema_name, verrors, old=None):
        home_result = await self.home_exists(data['home'], schema_name,
                                             verrors, old)

        if home_result:
            verrors.add(f'{schema_name}.home',
                        'Only one share is allowed to be a home share.')
        elif not home_result and not data['path']:
            verrors.add(f'{schema_name}.path', 'This field is required.')

    @private
    async def home_exists(self, home, schema_name, verrors, old=None):
        home_filters = [('home', '=', True)]
        home_result = None

        if home:
            if old and old['id'] is not None:
                id = old['id']

                if not old['home']:
                    home_filters.append(('id', '!=', id))
                    # The user already had this set as the home share
                    home_result = await self.middleware.call(
                        'datastore.query', self._config.datastore,
                        home_filters,
                        {'prefix': self._config.datastore_prefix})

        return home_result

    @private
    async def name_exists(self, data, schema_name, verrors, id=None):
        name = data['name']
        path = data['path']
        name_filters = [('name', '=', name)]
        path_filters = [('path', '=', path)]

        if path and not name:
            name = path.rsplit('/', 1)[-1]

        if id is not None:
            name_filters.append(('id', '!=', id))
            path_filters.append(('id', '!=', id))

        name_result = await self.middleware.call(
            'datastore.query', self._config.datastore, name_filters,
            {'prefix': self._config.datastore_prefix})
        path_result = await self.middleware.call(
            'datastore.query', self._config.datastore, path_filters,
            {'prefix': self._config.datastore_prefix})

        if name_result:
            verrors.add(f'{schema_name}.name',
                        'A share with this name already exists.')

        if path_result:
            verrors.add(f'{schema_name}.path',
                        'A share with this path already exists.')

        return name

    @private
    async def extend(self, data):
        data['hostsallow'] = data['hostsallow'].split()
        data['hostsdeny'] = data['hostsdeny'].split()

        return data

    @private
    async def compress(self, data):
        data['hostsallow'] = ' '.join(data['hostsallow'])
        data['hostsdeny'] = ' '.join(data['hostsdeny'])

        return data

    @private
    async def apply_default_perms(self, default_perms, path):
        if default_perms:
            try:
                (owner,
                 group) = await self.middleware.call('notifier.mp_get_owner',
                                                     path)
            except Exception:
                (owner, group) = ('root', 'wheel')

            await self.middleware.call('notifier.winacl_reset', path, owner,
                                       group)

    async def get_storage_tasks(self, path=None, home=False):
        zfs_datasets = await self.middleware.call('zfs.dataset.query')
        task_list = []
        task_dict = {}

        if path:
            for ds in zfs_datasets:
                tasks = []
                name = ds['name']
                fs_type = ds['properties']['type']['parsed']

                if fs_type != "filesystem":
                    continue

                mountpoint = ds['properties']['mountpoint']['parsed']

                if path == mountpoint or path.startswith(f'{mountpoint}/'):
                    if mountpoint == path:
                        tasks = await self.middleware.call(
                            'datastore.query', 'storage.task',
                            [['task_filesystem', '=', name]])
                    else:
                        tasks = await self.middleware.call(
                            'datastore.query', 'storage.task',
                            [['task_filesystem', '=', name],
                             ['task_recursive', '=', 'True']])

                for t in tasks:
                    task_list.append(t)
        elif home:
            task_list = await self.middleware.call(
                'datastore.query', 'storage.task',
                [['task_recursive', '=', 'True']])

        if task_list:
            for task in task_list:
                task_id = task['id']
                fs = task['task_filesystem']
                retcount = task['task_ret_count']
                retunit = task['task_ret_unit']
                _interval = task['task_interval']
                interval = dict(await self.middleware.call(
                    'notifier.choices', 'TASK_INTERVAL'))[_interval]

                msg = f'{fs} - every {interval} - {retcount}{retunit}'

                task_dict[task_id] = msg

        return task_dict

    @private
    async def set_storage_tasks(self, data):
        task = data.get('storage_task', None)
        home = data['home']
        path = data['path']
        task_list = []

        if not task:
            if path:
                task_list = await self.get_storage_tasks(path=path)
            elif home:
                task_list = await self.get_storage_tasks(home=home)

        if task_list:
            data['storage_task'] = list(task_list.keys())[0]

        return data

    @accepts()
    def vfsobjects_choices(self):
        vfs_modules_path = '/usr/local/lib/shared-modules/vfs'
        vfs_modules = []
        vfs_exclude = {'shadow_copy2', 'recycle', 'aio_pthread'}

        if os.path.exists(vfs_modules_path):
            vfs_modules.extend(
                filter(
                    lambda m: m not in vfs_exclude,
                    map(lambda f: f.rpartition('.')[0],
                        os.listdir(vfs_modules_path))))
        else:
            vfs_modules.extend(['streams_xattr'])

        return vfs_modules
Exemplo n.º 27
0
class RsyncModService(SharingService):

    share_task_type = 'Rsync Module'

    class Config:
        datastore = 'services.rsyncmod'
        datastore_prefix = 'rsyncmod_'
        datastore_extend = 'rsyncmod.rsync_mod_extend'
        cli_namespace = 'service.rsync_mod'

    ENTRY = Patch(
        'rsyncmod_create',
        'rsyncmod_entry',
        ('add', Bool('locked')),
        ('add', Int('id')),
    )

    @private
    async def rsync_mod_extend(self, data):
        data['hostsallow'] = data['hostsallow'].split()
        data['hostsdeny'] = data['hostsdeny'].split()
        data['mode'] = data['mode'].upper()
        return data

    @private
    async def common_validation(self, data, schema_name):
        verrors = ValidationErrors()

        await self.validate_path_field(data, schema_name, verrors)

        for entity in ('user', 'group'):
            value = data.get(entity)
            try:
                await self.middleware.call(f'{entity}.get_{entity}_obj',
                                           {f'{entity}name': value})
            except Exception:
                verrors.add(f'{schema_name}.{entity}',
                            f'Please specify a valid {entity}')

        verrors.check()

        data['hostsallow'] = ' '.join(data['hostsallow'])
        data['hostsdeny'] = ' '.join(data['hostsdeny'])
        data['mode'] = data['mode'].lower()

        return data

    @accepts(
        Dict(
            'rsyncmod_create',
            Bool('enabled', default=True),
            Str('name', validators=[Match(r'[^/\]]')]),
            Str('comment'),
            Str('path', required=True, max_length=RSYNC_PATH_LIMIT),
            Str('mode', enum=['RO', 'RW', 'WO'], required=True),
            Int('maxconn'),
            Str('user', default='nobody'),
            Str('group', default='nobody'),
            List('hostsallow', items=[Str('hostsallow')]),
            List('hostsdeny', items=[Str('hostdeny')]),
            Str('auxiliary', max_length=None),
            register=True,
        ))
    async def do_create(self, data):
        """
        Create a Rsyncmod module.

        `path` represents the path to a dataset. Path length is limited to 1023 characters maximum as per the limit
        enforced by FreeBSD. It is possible that we reach this max length recursively while transferring data. In that
        case, the user must ensure the maximum path will not be too long or modify the recursed path to shorter
        than the limit.

        `maxconn` is an integer value representing the maximum number of simultaneous connections. Zero represents
        unlimited.

        `hostsallow` is a list of patterns to match hostname/ip address of a connecting client. If list is empty,
        all hosts are allowed.

        `hostsdeny` is a list of patterns to match hostname/ip address of a connecting client. If the pattern is
        matched, access is denied to the client. If no client should be denied, this should be left empty.

        `auxiliary` attribute can be used to pass on any additional parameters from rsyncd.conf(5).
        """

        data = await self.common_validation(data, 'rsyncmod_create')

        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})

        await self._service_change('rsync', 'reload')

        return await self.get_instance(data['id'])

    @accepts(Int('id'),
             Patch('rsyncmod_create', 'rsyncmod_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        """
        Update Rsyncmod module of `id`.
        """
        module = await self.get_instance(id)
        module.update(data)

        module = await self.common_validation(module, 'rsyncmod_update')
        module.pop(self.locked_field)

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, module,
                                   {'prefix': self._config.datastore_prefix})

        await self._service_change('rsync', 'reload')

        return await self.get_instance(id)

    async def do_delete(self, id):
        """
        Delete Rsyncmod module of `id`.
        """
        return await self.middleware.call('datastore.delete',
                                          self._config.datastore, id)
Exemplo n.º 28
0
class SystemService(Service):
    @no_auth_required
    @accepts()
    async def is_freenas(self):
        """
        Returns `true` if running system is a FreeNAS or `false` is Something Else.
        """
        # This is a stub calling notifier until we have all infrastructure
        # to implement in middlewared
        return await self.middleware.call('notifier.is_freenas')

    @accepts()
    def version(self):
        return sw_version()

    @accepts()
    def ready(self):
        """
        Returns whether the system completed boot and is ready to use
        """
        return SYSTEM_READY

    @accepts()
    async def info(self):
        """
        Returns basic system information.
        """
        uptime = (await (await Popen(
            "env -u TZ uptime | awk -F', load averages:' '{ print $1 }'",
            stdout=subprocess.PIPE,
            shell=True,
        )).communicate())[0].decode().strip()

        serial = (await (await Popen(
            ['dmidecode', '-s', 'system-serial-number'],
            stdout=subprocess.PIPE,
        )).communicate())[0].decode().strip() or None

        product = (await (await Popen(
            ['dmidecode', '-s', 'system-product-name'],
            stdout=subprocess.PIPE,
        )).communicate())[0].decode().strip() or None

        manufacturer = (await (await Popen(
            ['dmidecode', '-s', 'system-manufacturer'],
            stdout=subprocess.PIPE,
        )).communicate())[0].decode().strip() or None

        license = get_license()[0]
        if license:
            license = {
                "system_serial": license.system_serial,
                "system_serial_ha": license.system_serial_ha,
                "contract_type":
                ContractType(license.contract_type).name.upper(),
                "contract_end": license.contract_end,
            }

        return {
            'version':
            self.version(),
            'hostname':
            socket.gethostname(),
            'physmem':
            sysctl.filter('hw.physmem')[0].value,
            'model':
            sysctl.filter('hw.model')[0].value,
            'cores':
            sysctl.filter('hw.ncpu')[0].value,
            'loadavg':
            os.getloadavg(),
            'uptime':
            uptime,
            'system_serial':
            serial,
            'system_product':
            product,
            'license':
            license,
            'boottime':
            datetime.fromtimestamp(
                struct.unpack('l',
                              sysctl.filter('kern.boottime')[0].value[:8])[0]),
            'datetime':
            datetime.utcnow(),
            'timezone':
            (await self.middleware.call('datastore.config',
                                        'system.settings'))['stg_timezone'],
            'system_manufacturer':
            manufacturer,
        }

    @accepts(
        Dict('system-reboot', Int('delay', required=False), required=False))
    @job()
    async def reboot(self, job, options=None):
        """
        Reboots the operating system.

        Emits an "added" event of name "system" and id "reboot".
        """
        if options is None:
            options = {}

        self.middleware.send_event('system',
                                   'ADDED',
                                   id='reboot',
                                   fields={
                                       'description':
                                       'System is going to reboot',
                                   })

        delay = options.get('delay')
        if delay:
            time.sleep(delay)

        await Popen(["/sbin/reboot"])

    @accepts(
        Dict('system-shutdown', Int('delay', required=False), required=False))
    @job()
    async def shutdown(self, job, options=None):
        """
        Shuts down the operating system.

        Emits an "added" event of name "system" and id "shutdown".
        """
        if options is None:
            options = {}

        self.middleware.send_event('system',
                                   'ADDED',
                                   id='shutdown',
                                   fields={
                                       'description':
                                       'System is going to shutdown',
                                   })

        delay = options.get('delay')
        if delay:
            time.sleep(delay)

        await Popen(["/sbin/poweroff"])

    @accepts()
    @job(lock='systemdebug')
    def debug(self, job):
        # FIXME: move the implementation from freenasUI
        mntpt, direc, dump = debug_get_settings()
        debug_run(direc)
        return dump
Exemplo n.º 29
0
class BootEnvService(CRUDService):
    class Config:
        datastore_primary_key_type = 'string'
        cli_namespace = 'system.bootenv'

    BE_TOOL = 'zectl' if osc.IS_LINUX else 'beadm'
    ENTRY = Dict('bootenv_entry',
                 Str('id'),
                 Str('realname'),
                 Str('name'),
                 Str('active'),
                 Bool('activated'),
                 Bool('can_activate'),
                 Str('mountpoint'),
                 Str('space'),
                 Datetime('created'),
                 Bool('keep'),
                 Int('rawspace'),
                 additional_attrs=True)

    @filterable
    def query(self, filters, options):
        """
        Query all Boot Environments with `query-filters` and `query-options`.
        """
        results = []

        cp = subprocess.run([self.BE_TOOL, 'list', '-H'],
                            capture_output=True,
                            text=True)
        datasets_origins = [
            d['properties']['origin']['parsed']
            for d in self.middleware.call_sync(
                'zfs.dataset.query', [], {'extra': {
                    'properties': ['origin']
                }})
        ]
        boot_pool = self.middleware.call_sync('boot.pool_name')
        for line in cp.stdout.strip().split('\n'):
            fields = line.split('\t')
            name = fields[0]
            if len(fields) > 5 and fields[5] != '-':
                name = fields[5]
            be = {
                'id':
                name,
                'realname':
                fields[0],
                'name':
                name,
                'active':
                fields[1],
                'activated':
                'n' in fields[1].lower(),
                'can_activate':
                False,
                'mountpoint':
                fields[2],
                'space':
                None if osc.IS_LINUX else fields[3],
                'created':
                datetime.strptime(fields[3 if osc.IS_LINUX else 4],
                                  '%Y-%m-%d %H:%M'),
                'keep':
                False,
                'rawspace':
                None
            }

            ds = self.middleware.call_sync('zfs.dataset.query', [
                ('id', '=', rf'{boot_pool}/ROOT/{fields[0]}'),
            ], {'extra': {
                'snapshots': True
            }})
            if ds:
                ds = ds[0]
                snapshot = None
                origin = ds['properties']['origin']['parsed']
                if '@' in origin:
                    snapshot = self.middleware.call_sync(
                        'zfs.snapshot.query', [('id', '=', origin)])
                    if snapshot:
                        snapshot = snapshot[0]
                if f'{self.BE_TOOL}:keep' in ds['properties']:
                    if ds['properties'][f'{self.BE_TOOL}:keep'][
                            'value'] == 'True':
                        be['keep'] = True
                    elif ds['properties'][f'{self.BE_TOOL}:keep'][
                            'value'] == 'False':
                        be['keep'] = False

                # When a BE is deleted, following actions happen
                # 1) It's descendants ( if any ) are promoted once
                # 2) BE is deleted
                # 3) Filesystems dependent on BE's origin are promoted
                # 4) Origin is deleted
                #
                # Now we would like to find out the space which will be freed when a BE is removed.
                # We classify a BE as of being 2 types,
                # 1) BE without descendants
                # 2) BE with descendants
                #
                # For (1), space freed is "usedbydataset" property and space freed by it's "origin".
                # For (2), space freed is "usedbydataset" property and space freed by it's "origin" but this cannot
                # actively determined because all the descendants are promoted once for this BE and at the end origin
                # of current BE would be determined by last descendant promoted. So we ignore this for now and rely
                # only on the space it is currently consuming as a best effort to predict.
                # There is also "usedbysnaps" property, for that we will retrieve all snapshots of the dataset,
                # find if any of them do not have a dataset cloned, that space will also be freed when we delete
                # this dataset. And we will also factor in the space consumed by children.

                be['rawspace'] = ds['properties']['usedbydataset'][
                    'parsed'] + ds['properties']['usedbychildren']['parsed']

                children = False
                for snap in ds['snapshots']:
                    if snap['name'] not in datasets_origins:
                        be['rawspace'] += self.middleware.call_sync(
                            'zfs.snapshot.query', [['id', '=', snap['name']]],
                            {'extra': {
                                'properties': ['used']
                            }})['properties']['used']['parsed']
                    else:
                        children = True

                if snapshot and not children:
                    # This indicates the current BE is a leaf and it is safe to add the BE's origin
                    # space to the space freed when it is deleted.
                    be['rawspace'] += snapshot['properties']['used']['parsed']

                if be['rawspace'] < 1024:
                    be['space'] = f'{be["rawspace"]}B'
                elif 1024 <= be['rawspace'] < 1048576:
                    be['space'] = f'{be["rawspace"] / 1024}K'
                elif 1048576 <= be['rawspace'] < 1073741824:
                    be['space'] = f'{be["rawspace"] / 1048576}M'
                elif 1073741824 <= be['rawspace'] < 1099511627776:
                    be['space'] = f'{be["rawspace"] / 1073741824}G'
                elif 1099511627776 <= be['rawspace'] < 1125899906842624:
                    be['space'] = f'{be["rawspace"] / 1099511627776}T'
                elif 1125899906842624 <= be['rawspace'] < 1152921504606846976:
                    be['space'] = f'{be["rawspace"] / 1125899906842624}P'
                elif 1152921504606846976 <= be[
                        'rawspace'] < 1152921504606846976:
                    be['space'] = f'{be["rawspace"] / 1152921504606846976}E'
                else:
                    be['space'] = f'{be["rawspace"] / 1152921504606846976}Z'

                be['space'] = f'{round(float(be["space"][:-1]), 2)}{be["space"][-1]}'

                if osc.IS_FREEBSD:
                    be['can_activate'] = 'truenas:kernel_version' not in ds[
                        'properties']
                if osc.IS_LINUX:
                    be['can_activate'] = ('truenas:kernel_version'
                                          in ds['properties']
                                          or 'truenas:12' in ds['properties'])

            results.append(be)
        return filter_list(results, filters, options)

    @item_method
    @accepts(Str('id'))
    @returns(Bool('successfully_activated'))
    def activate(self, oid):
        """
        Activates boot environment `id`.
        """
        be = self.middleware.call_sync('bootenv.query', [['id', '=', oid]],
                                       {'get': True})
        if not be['can_activate']:
            raise CallError('This BE cannot be activated')

        try:
            subprocess.run([self.BE_TOOL, 'activate', oid],
                           capture_output=True,
                           text=True,
                           check=True)
        except subprocess.CalledProcessError as cpe:
            raise CallError(f'Failed to activate BE: {cpe.stdout.strip()}')
        else:
            return True

    @item_method
    @accepts(Str('id'), Dict(
        'attributes',
        Bool('keep', default=False),
    ))
    @returns(Bool('successfully_set_attribute'))
    async def set_attribute(self, oid, attrs):
        """
        Sets attributes boot environment `id`.

        Currently only `keep` attribute is allowed.
        """
        boot_pool = await self.middleware.call('boot.pool_name')
        boot_env = await self.get_instance(oid)
        dsname = f'{boot_pool}/ROOT/{boot_env["realname"]}'
        ds = await self.middleware.call('zfs.dataset.query',
                                        [('id', '=', dsname)])
        if not ds:
            raise CallError(f'BE {oid!r} does not exist.', errno.ENOENT)
        await self.middleware.call(
            'zfs.dataset.update', dsname, {
                'properties': {
                    f'{self.BE_TOOL}:keep': {
                        'value': str(attrs['keep'])
                    }
                },
            })
        return True

    @accepts(
        Dict(
            'bootenv_create',
            Str('name', required=True, validators=[Match(RE_BE_NAME)]),
            Str('source'),
        ))
    @returns(Str('bootenv_name'))
    async def do_create(self, data):
        """
        Create a new boot environment using `name`.

        If a new boot environment is desired which is a clone of another boot environment, `source` can be passed.
        Then, a new boot environment of `name` is created using boot environment `source` by cloning it.

        Ensure that `name` and `source` are valid boot environment names.
        """
        verrors = ValidationErrors()
        await self._clean_be_name(verrors, 'bootenv_create', data['name'])
        verrors.check()

        args = [self.BE_TOOL, 'create']
        source = data.get('source')
        if source:
            args += [
                '-e',
                os.path.join(await self.middleware.call('boot.pool_name'),
                             'ROOT', source) if osc.IS_LINUX else source
            ]
        args.append(data['name'])
        try:
            await run(args, encoding='utf8', check=True)
        except subprocess.CalledProcessError as cpe:
            raise CallError(f'Failed to create boot environment: {cpe.stdout}')
        return data['name']

    @accepts(Str('id'),
             Dict(
                 'bootenv_update',
                 Str('name', required=True, validators=[Match(RE_BE_NAME)]),
             ))
    @returns(Str('bootenv_name'))
    async def do_update(self, oid, data):
        """
        Update `id` boot environment name with a new provided valid `name`.
        """
        await self._get_instance(oid)

        verrors = ValidationErrors()
        await self._clean_be_name(verrors, 'bootenv_update', data['name'])
        verrors.check()

        try:
            await run(self.BE_TOOL,
                      'rename',
                      oid,
                      data['name'],
                      encoding='utf8',
                      check=True)
        except subprocess.CalledProcessError as cpe:
            raise CallError(f'Failed to update boot environment: {cpe.stdout}')
        return data['name']

    async def _clean_be_name(self, verrors, schema, name):
        beadm_names = (await (await Popen(
            f"{self.BE_TOOL} list -H | awk '{{print ${1 if osc.IS_LINUX else 7}}}'",
            shell=True,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
        )).communicate())[0].decode().split('\n')
        if name in filter(None, beadm_names):
            verrors.add(f'{schema}.name', f'The name "{name}" already exists',
                        errno.EEXIST)

    @accepts(Str('id'))
    @job(lock=lambda args: f'bootenv_delete_{args[0]}')
    async def do_delete(self, job, oid):
        """
        Delete `id` boot environment. This removes the clone from the system.
        """
        be = await self._get_instance(oid)
        try:
            await run(self.BE_TOOL,
                      'destroy',
                      '-F',
                      be['id'],
                      encoding='utf8',
                      check=True)
        except subprocess.CalledProcessError as cpe:
            raise CallError(f'Failed to delete boot environment: {cpe.stdout}')
        return True
Exemplo n.º 30
0
class VMService(Service):

    async def __next_clone_name(self, name):
        vm_names = [
            i['name']
            for i in await self.middleware.call('vm.query', [
                ('name', '~', rf'{name}{ZVOL_CLONE_SUFFIX}\d+')
            ])
        ]
        clone_index = 0
        while True:
            clone_name = f'{name}{ZVOL_CLONE_SUFFIX}{clone_index}'
            if clone_name not in vm_names:
                break
            clone_index += 1
        return clone_name

    async def __clone_zvol(self, name, zvol, created_snaps, created_clones):
        if not await self.middleware.call('zfs.dataset.query', [('id', '=', zvol)]):
            raise CallError(f'zvol {zvol} does not exist.', errno.ENOENT)

        snapshot_name = name
        i = 0
        while True:
            zvol_snapshot = f'{zvol}@{snapshot_name}'
            if await self.middleware.call('zfs.snapshot.query', [('id', '=', zvol_snapshot)]):
                if ZVOL_CLONE_RE.search(snapshot_name):
                    snapshot_name = ZVOL_CLONE_RE.sub(rf'\1{ZVOL_CLONE_SUFFIX}{i}', snapshot_name)
                else:
                    snapshot_name = f'{name}{ZVOL_CLONE_SUFFIX}{i}'
                i += 1
                continue
            break

        await self.middleware.call('zfs.snapshot.create', {'dataset': zvol, 'name': snapshot_name})
        created_snaps.append(zvol_snapshot)

        clone_suffix = name
        i = 0
        while True:
            clone_dst = f'{zvol}_{clone_suffix}'
            if await self.middleware.call('zfs.dataset.query', [('id', '=', clone_dst)]):
                if ZVOL_CLONE_RE.search(clone_suffix):
                    clone_suffix = ZVOL_CLONE_RE.sub(rf'\1{ZVOL_CLONE_SUFFIX}{i}', clone_suffix)
                else:
                    clone_suffix = f'{name}{ZVOL_CLONE_SUFFIX}{i}'
                i += 1
                continue
            break

        await self.middleware.call('zfs.snapshot.clone', {'snapshot': zvol_snapshot, 'dataset_dst': clone_dst})

        created_clones.append(clone_dst)

        return clone_dst

    @item_method
    @accepts(Int('id'), Str('name', default=None))
    @returns(Bool())
    async def clone(self, id, name):
        """
        Clone the VM `id`.

        `name` is an optional parameter for the cloned VM.
        If not provided it will append the next number available to the VM name.
        """
        vm = await self.middleware.call('vm.get_instance', id)

        origin_name = vm['name']
        del vm['id']
        del vm['status']
        devices = vm.pop('devices')

        vm['name'] = await self.__next_clone_name(vm['name'])
        vm['uuid'] = str(uuid.uuid4())  # We want to use a newer uuid here as it is supposed to be unique per VM

        if name is not None:
            vm['name'] = name

        # In case we need to rollback
        created_snaps = []
        created_clones = []
        try:
            new_vm = await self.middleware.call('vm.do_create', vm)

            for item in devices:
                item.pop('id', None)
                item['vm'] = new_vm['id']
                if item['dtype'] == 'NIC':
                    if 'mac' in item['attributes']:
                        del item['attributes']['mac']
                if item['dtype'] == 'DISPLAY':
                    if 'port' in item['attributes']:
                        dev_dict = await self.middleware.call('vm.port_wizard')
                        item['attributes']['port'] = dev_dict['port']
                if item['dtype'] == 'DISK':
                    zvol = zvol_path_to_name(item['attributes']['path'])
                    item['attributes']['path'] = zvol_name_to_path(
                        await self.__clone_zvol(vm['name'], zvol, created_snaps, created_clones)
                    )
                if item['dtype'] == 'RAW':
                    item['attributes']['path'] = ''
                    self.logger.warn('For RAW disk you need copy it manually inside your NAS.')

                await self.middleware.call('vm.device.create', item)
        except Exception as e:
            for i in reversed(created_clones):
                try:
                    await self.middleware.call('zfs.dataset.delete', i)
                except Exception:
                    self.logger.warn('Rollback of VM clone left dangling zvol: %s', i)
            for i in reversed(created_snaps):
                try:
                    dataset, snap = i.split('@')
                    await self.middleware.call('zfs.snapshot.remove', {
                        'dataset': dataset,
                        'name': snap,
                        'defer_delete': True,
                    })
                except Exception:
                    self.logger.warn('Rollback of VM clone left dangling snapshot: %s', i)
            raise e
        self.logger.info('VM cloned from {0} to {1}'.format(origin_name, vm['name']))

        return True