Пример #1
0
class DatastoreService(Service):
    def _filters_to_queryset(self, filters, field_prefix=None):
        opmap = {
            '=': 'exact',
            '!=': 'exact',
            '>': 'gt',
            '>=': 'gte',
            '<': 'lt',
            '<=': 'lte',
            '~': 'regex',
            'in': 'in',
            'nin': 'in',
        }

        rv = []
        for f in filters:
            if not isinstance(f, (list, tuple)):
                raise ValueError('Filter must be a list: {0}'.format(f))
            if len(f) == 3:
                name, op, value = f
                # id is special
                if field_prefix and name != 'id':
                    name = field_prefix + name
                if op not in opmap:
                    raise Exception("Invalid operation: {0}".format(op))
                q = Q(**{'{0}__{1}'.format(name, opmap[op]): value})
                if op in ('!=', 'nin'):
                    q.negate()
                rv.append(q)
            elif len(f) == 2:
                op, value = f
                if op == 'OR':
                    or_value = None
                    for value in self._filters_to_queryset(
                            value, field_prefix=field_prefix):
                        if or_value is None:
                            or_value = value
                        else:
                            or_value |= value
                    rv.append(or_value)
                else:
                    raise ValueError('Invalid operation: {0}'.format(op))
            else:
                raise Exception("Invalid filter {0}".format(f))
        return rv

    def __get_model(self, name):
        """Helper method to get Model for given name
        e.g. network.interfaces -> Interfaces
        """
        app, model = name.split('.', 1)
        return apps.get_model(app, model)

    async def __queryset_serialize(self, qs, extend=None, field_prefix=None):
        result = await self.middleware.threaded(lambda: list(qs))
        for i in result:
            yield await django_modelobj_serialize(self.middleware,
                                                  i,
                                                  extend=extend,
                                                  field_prefix=field_prefix)

    @accepts(
        Str('name'),
        List('query-filters', register=True),
        Dict(
            'query-options',
            Str('extend'),
            Dict('extra', additional_attrs=True),
            List('order_by'),
            Bool('count'),
            Bool('get'),
            Str('prefix'),
            register=True,
        ),
    )
    async def query(self, name, filters=None, options=None):
        """Query for items in a given collection `name`.

        `filters` is a list which each entry can be in one of the following formats:

            entry: simple_filter | conjuntion
            simple_filter: '[' attribute_name, OPERATOR, value ']'
            conjunction: '[' CONJUNTION, '[' simple_filter (',' simple_filter)* ']]'

            OPERATOR: ('=' | '!=' | '>' | '>=' | '<' | '<=' | '~' | 'in' | 'nin')
            CONJUNCTION: 'OR'

        e.g.

        `['OR', [ ['username', '=', 'root' ], ['uid', '=', 0] ] ]`

        `[ ['username', '=', 'root' ] ]`

        .. examples(websocket)::

          Querying for username "root" and returning a single item:

            :::javascript
            {
              "id": "d51da71b-bb48-4b8b-a8f7-6046fcc892b4",
              "msg": "method",
              "method": "datastore.query",
              "params": ["account.bsdusers", [ ["username", "=", "root" ] ], {"get": true}]
            }
        """
        model = self.__get_model(name)
        if options is None:
            options = {}
        else:
            # We do not want to make changes to original options
            # which might happen with "prefix"
            options = options.copy()

        qs = model.objects.all()

        extra = options.get('extra')
        if extra:
            qs = qs.extra(**extra)

        prefix = options.get('prefix')

        if filters:
            qs = qs.filter(*self._filters_to_queryset(filters, prefix))

        order_by = options.get('order_by')
        if order_by:
            if prefix:
                # Do not change original order_by
                order_by = order_by[:]
                for i, order in enumerate(order_by):
                    if order.startswith('-'):
                        order_by[i] = '-' + prefix + order[1:]
                    else:
                        order_by[i] = prefix + order
            qs = qs.order_by(*order_by)

        if options.get('count') is True:
            return qs.count()

        result = []
        async for i in self.__queryset_serialize(
                qs,
                extend=options.get('extend'),
                field_prefix=options.get('prefix')):
            result.append(i)

        if options.get('get') is True:
            return result[0]

        return result

    @accepts(Str('name'), Ref('query-options'))
    async def config(self, name, options=None):
        """
        Get configuration settings object for a given `name`.

        This is a shortcut for `query(name, {"get": true})`.
        """
        if options is None:
            options = {}
        options['get'] = True
        return await self.query(name, None, options)

    @accepts(Str('name'), Dict('data', additional_attrs=True),
             Dict('options', Str('prefix')))
    async def insert(self, name, data, options=None):
        """
        Insert a new entry to `name`.
        """
        data = data.copy()
        options = options or {}
        prefix = options.get('prefix')
        model = self.__get_model(name)
        for field in model._meta.fields:
            if prefix:
                name = field.name.replace(prefix, '')
            else:
                name = field.name
            if name not in data:
                continue
            if isinstance(field, ForeignKey):
                data[name] = field.rel.to.objects.get(pk=data[name])
        if prefix:
            for k, v in list(data.items()):
                k_new = f'{prefix}{k}'
                data[k_new] = data.pop(k)
        obj = model(**data)
        await self.middleware.threaded(obj.save)
        return obj.pk

    @accepts(Str('name'), Any('id'), Dict('data', additional_attrs=True),
             Dict('options', Str('prefix')))
    async def update(self, name, id, data, options=None):
        """
        Update an entry `id` in `name`.
        """
        data = data.copy()
        options = options or {}
        prefix = options.get('prefix')
        model = self.__get_model(name)
        obj = await self.middleware.threaded(
            lambda oid: model.objects.get(pk=oid), id)
        for field in model._meta.fields:
            if prefix:
                name = field.name.replace(prefix, '')
            else:
                name = field.name
            if name not in data:
                continue
            if isinstance(field, ForeignKey):
                data[name] = field.rel.to.objects.get(pk=data[name])
        for k, v in list(data.items()):
            if prefix:
                k = f'{prefix}{k}'
            setattr(obj, k, v)
        await self.middleware.threaded(obj.save)
        return obj.pk

    @accepts(Str('name'), Any('id'))
    async def delete(self, name, id):
        """
        Delete an entry `id` in `name`.
        """
        model = self.__get_model(name)
        await self.middleware.threaded(
            lambda oid: model.objects.get(pk=oid).delete(), id)
        return True

    @private
    def sql(self, query, params=None):
        cursor = connection.cursor()
        rv = None
        try:
            if params is None:
                cursor.executelocal(query)
            else:
                cursor.executelocal(query, params)
            rv = cursor.fetchall()
        finally:
            cursor.close()
        return rv

    @private
    @accepts(List('queries'))
    def restore(self, queries):
        """
        Receives a list of SQL queries (usually a database dump)
        and executes it within a transaction.
        """
        return connection.dump_recv(queries)

    @private
    @accepts()
    def dump(self):
        """
        Dumps the database, returning a list of SQL commands.
        """
        # FIXME: This could return a few hundred KB of data,
        # we need to investigate a way of doing that in chunks.
        return connection.dump()
Пример #2
0
class CloudSyncService(TaskPathService):

    local_fs_lock_manager = FsLockManager()
    remote_fs_lock_manager = FsLockManager()
    share_task_type = 'CloudSync'

    class Config:
        datastore = "tasks.cloudsync"
        datastore_extend = "cloudsync.extend"

    @filterable
    async def query(self, filters=None, options=None):
        """
        Query all Cloud Sync Tasks with `query-filters` and `query-options`.
        """
        tasks_or_task = await super().query(filters, options)

        jobs = {}
        for j in await self.middleware.call(
                "core.get_jobs",
            [('OR', [("method", "=", "cloudsync.sync"),
                     ("method", "=", "cloudsync.restore")])],
            {"order_by": ["id"]}):
            try:
                task_id = int(j["arguments"][0])
            except (IndexError, ValueError):
                continue

            if task_id in jobs and jobs[task_id]["state"] == "RUNNING":
                continue

            jobs[task_id] = j

        if isinstance(tasks_or_task, list):
            for task in tasks_or_task:
                task["job"] = jobs.get(task["id"])
        else:
            tasks_or_task["job"] = jobs.get(tasks_or_task["id"])

        return tasks_or_task

    @private
    async def extend(self, cloud_sync):
        cloud_sync["credentials"] = cloud_sync.pop("credential")

        Cron.convert_db_format_to_schedule(cloud_sync)

        return cloud_sync

    @private
    async def _compress(self, cloud_sync):
        cloud_sync["credential"] = cloud_sync.pop("credentials")

        Cron.convert_schedule_to_db_format(cloud_sync)

        cloud_sync.pop('job', None)
        cloud_sync.pop(self.locked_field, None)

        return cloud_sync

    @private
    async def _get_credentials(self, credentials_id):
        try:
            return await self.middleware.call("datastore.query",
                                              "system.cloudcredentials",
                                              [("id", "=", credentials_id)],
                                              {"get": True})
        except IndexError:
            return None

    @private
    async def _basic_validate(self, verrors, name, data):
        if data["encryption"]:
            if not data["encryption_password"]:
                verrors.add(
                    f"{name}.encryption_password",
                    "This field is required when encryption is enabled")

        credentials = await self._get_credentials(data["credentials"])
        if not credentials:
            verrors.add(f"{name}.credentials", "Invalid credentials")

        try:
            shlex.split(data["args"])
        except ValueError as e:
            verrors.add(f"{name}.args", f"Parse error: {e.args[0]}")

        if verrors:
            raise verrors

        provider = REMOTES[credentials["provider"]]

        schema = []

        if provider.buckets:
            schema.append(Str("bucket", required=True, empty=False))

        schema.append(Str("folder", required=True))

        schema.extend(provider.task_schema)

        schema.extend(self.common_task_schema(provider))

        attributes_verrors = validate_attributes(schema,
                                                 data,
                                                 additional_attrs=True)

        if not attributes_verrors:
            await provider.pre_save_task(data, credentials, verrors)

        verrors.add_child(f"{name}.attributes", attributes_verrors)

    @private
    async def _validate(self, verrors, name, data):
        await self._basic_validate(verrors, name, data)

        for i, (limit1,
                limit2) in enumerate(zip(data["bwlimit"],
                                         data["bwlimit"][1:])):
            if limit1["time"] >= limit2["time"]:
                verrors.add(
                    f"{name}.bwlimit.{i + 1}.time",
                    f"Invalid time order: {limit1['time']}, {limit2['time']}")

        await self.validate_path_field(data, name, verrors)

        if data["snapshot"]:
            if data["direction"] != "PUSH":
                verrors.add(f"{name}.snapshot",
                            "This option can only be enabled for PUSH tasks")
            if data["transfer_mode"] == "MOVE":
                verrors.add(
                    f"{name}.snapshot",
                    "This option can not be used for MOVE transfer mode")
            if await self.middleware.call(
                    "pool.dataset.query",
                [["name", "^",
                  os.path.relpath(data["path"], "/mnt") + "/"],
                 ["type", "=", "FILESYSTEM"]]):
                verrors.add(
                    f"{name}.snapshot",
                    "This option is only available for datasets that have no further "
                    "nesting")

    @private
    async def _validate_folder(self, verrors, name, data):
        if data["direction"] == "PULL":
            folder = data["attributes"]["folder"].rstrip("/")
            if folder:
                folder_parent = os.path.normpath(os.path.join(folder, ".."))
                if folder_parent == ".":
                    folder_parent = ""
                folder_basename = os.path.basename(folder)
                ls = await self.list_directory(
                    dict(
                        credentials=data["credentials"],
                        encryption=data["encryption"],
                        filename_encryption=data["filename_encryption"],
                        encryption_password=data["encryption_password"],
                        encryption_salt=data["encryption_salt"],
                        attributes=dict(data["attributes"],
                                        folder=folder_parent),
                        args=data["args"],
                    ))
                for item in ls:
                    if item["Name"] == folder_basename:
                        if not item["IsDir"]:
                            verrors.add(f"{name}.attributes.folder",
                                        "This is not a directory")
                        break
                else:
                    verrors.add(f"{name}.attributes.folder",
                                "Directory does not exist")

        if data["direction"] == "PUSH":
            credentials = await self._get_credentials(data["credentials"])

            provider = REMOTES[credentials["provider"]]

            if provider.readonly:
                verrors.add(f"{name}.direction", "This remote is read-only")

    @accepts(
        Dict(
            "cloud_sync_create",
            Str("description", default=""),
            Str("direction", enum=["PUSH", "PULL"], required=True),
            Str("transfer_mode", enum=["SYNC", "COPY", "MOVE"], required=True),
            Str("path", required=True),
            Int("credentials", required=True),
            Bool("encryption", default=False),
            Bool("filename_encryption", default=False),
            Str("encryption_password", default=""),
            Str("encryption_salt", default=""),
            Cron("schedule", defaults={"minute": "00"}, required=True),
            Bool("follow_symlinks", default=False),
            Int("transfers",
                null=True,
                default=None,
                validators=[Range(min=1)]),
            List("bwlimit",
                 default=[],
                 items=[
                     Dict(
                         "cloud_sync_bwlimit", Str("time",
                                                   validators=[Time()]),
                         Int("bandwidth", validators=[Range(min=1)],
                             null=True))
                 ]),
            List("exclude", default=[], items=[Str("path", empty=False)]),
            Dict("attributes", additional_attrs=True, required=True),
            Bool("snapshot", default=False),
            Str("pre_script", default="", max_length=None),
            Str("post_script", default="", max_length=None),
            Str("args", default="", max_length=None),
            Bool("enabled", default=True),
            register=True,
        ))
    async def do_create(self, cloud_sync):
        """
        Creates a new cloud_sync entry.

        .. examples(websocket)::

          Create a new cloud_sync using amazon s3 attributes, which is supposed to run every hour.

            :::javascript
            {
              "id": "6841f242-840a-11e6-a437-00e04d680384",
              "msg": "method",
              "method": "cloudsync.create",
              "params": [{
                "description": "s3 sync",
                "path": "/mnt/tank",
                "credentials": 1,
                "minute": "00",
                "hour": "*",
                "daymonth": "*",
                "month": "*",
                "attributes": {
                  "bucket": "mybucket",
                  "folder": ""
                },
                "enabled": true
              }]
            }
        """

        verrors = ValidationErrors()

        await self._validate(verrors, "cloud_sync", cloud_sync)

        if verrors:
            raise verrors

        await self._validate_folder(verrors, "cloud_sync", cloud_sync)

        if verrors:
            raise verrors

        cloud_sync = await self._compress(cloud_sync)

        cloud_sync["id"] = await self.middleware.call("datastore.insert",
                                                      "tasks.cloudsync",
                                                      cloud_sync)
        await self.middleware.call("service.restart", "cron")

        cloud_sync = await self.extend(cloud_sync)
        return cloud_sync

    @accepts(Int("id"),
             Patch("cloud_sync_create", "cloud_sync_update", ("attr", {
                 "update": True
             })))
    async def do_update(self, id, data):
        """
        Updates the cloud_sync entry `id` with `data`.
        """
        cloud_sync = await self.get_instance(id)

        # credentials is a foreign key for now
        if cloud_sync["credentials"]:
            cloud_sync["credentials"] = cloud_sync["credentials"]["id"]

        cloud_sync.update(data)

        verrors = ValidationErrors()

        await self._validate(verrors, "cloud_sync_update", cloud_sync)

        if verrors:
            raise verrors

        await self._validate_folder(verrors, "cloud_sync_update", cloud_sync)

        if verrors:
            raise verrors

        cloud_sync = await self._compress(cloud_sync)

        await self.middleware.call("datastore.update", "tasks.cloudsync", id,
                                   cloud_sync)
        await self.middleware.call("service.restart", "cron")

        return await self.get_instance(id)

    @accepts(Int("id"))
    async def do_delete(self, id):
        """
        Deletes cloud_sync entry `id`.
        """
        await self.middleware.call("cloudsync.abort", id)
        await self.middleware.call("datastore.delete", "tasks.cloudsync", id)
        await self.middleware.call("alert.oneshot_delete",
                                   "CloudSyncTaskFailed", id)
        await self.middleware.call("service.restart", "cron")

    @accepts(Int("credentials_id"))
    async def list_buckets(self, credentials_id):
        credentials = await self._get_credentials(credentials_id)
        if not credentials:
            raise CallError("Invalid credentials")

        provider = REMOTES[credentials["provider"]]

        if not provider.buckets:
            raise CallError("This provider does not use buckets")

        return await self.ls({"credentials": credentials}, "")

    @accepts(
        Dict(
            "cloud_sync_ls",
            Int("credentials", required=True),
            Bool("encryption", default=False),
            Bool("filename_encryption", default=False),
            Str("encryption_password", default=""),
            Str("encryption_salt", default=""),
            Dict("attributes", required=True, additional_attrs=True),
            Str("args", default=""),
        ))
    async def list_directory(self, cloud_sync):
        """
        List contents of a remote bucket / directory.

        If remote supports buckets, path is constructed by two keys "bucket"/"folder" in `attributes`.
        If remote does not support buckets, path is constructed using "folder" key only in `attributes`.
        "folder" is directory name and "bucket" is bucket name for remote.

        Path examples:

        S3 Service
        `bucketname/directory/name`

        Dropbox Service
        `directory/name`

        `credentials` is a valid id of a Cloud Sync Credential which will be used to connect to the provider.
        """
        verrors = ValidationErrors()

        await self._basic_validate(verrors, "cloud_sync", dict(cloud_sync))

        if verrors:
            raise verrors

        credentials = await self._get_credentials(cloud_sync["credentials"])

        path = get_remote_path(REMOTES[credentials["provider"]],
                               cloud_sync["attributes"])

        return await self.ls(dict(cloud_sync, credentials=credentials), path)

    @private
    async def ls(self, config, path):
        decrypt_filenames = config.get("encryption") and config.get(
            "filename_encryption")
        async with RcloneConfig(config) as config:
            proc = await run([
                "rclone", "--config", config.config_path, "lsjson",
                "remote:" + path
            ],
                             check=False,
                             encoding="utf8",
                             errors="ignore")
            if proc.returncode == 0:
                result = json.loads(proc.stdout)

                if decrypt_filenames:
                    if result:
                        decrypted_names = {}
                        proc = await run(([
                            "rclone", "--config", config.config_path,
                            "cryptdecode", "encrypted:"
                        ] + [item["Name"] for item in result]),
                                         check=False,
                                         encoding="utf8",
                                         errors="ignore")
                        for line in proc.stdout.splitlines():
                            try:
                                encrypted, decrypted = line.rstrip(
                                    "\r\n").split(" \t ", 1)
                            except ValueError:
                                continue

                            if decrypted != "Failed to decrypt":
                                decrypted_names[encrypted] = decrypted

                        for item in result:
                            if item["Name"] in decrypted_names:
                                item["Decrypted"] = decrypted_names[
                                    item["Name"]]

                return result
            else:
                raise CallError(
                    proc.stderr,
                    extra={"excerpt": lsjson_error_excerpt(proc.stderr)})

    @item_method
    @accepts(Int("id"),
             Dict(
                 "cloud_sync_sync_options",
                 Bool("dry_run", default=False),
                 register=True,
             ))
    @job(lock=lambda args: "cloud_sync:{}".format(args[-1]),
         lock_queue_size=1,
         logs=True)
    async def sync(self, job, id, options):
        """
        Run the cloud_sync job `id`, syncing the local data to remote.
        """

        cloud_sync = await self.get_instance(id)
        if cloud_sync['locked']:
            await self.middleware.call('cloudsync.generate_locked_alert', id)
            return

        await self._sync(cloud_sync, options, job)

    @accepts(
        Patch("cloud_sync_create", "cloud_sync_sync_onetime"),
        Patch("cloud_sync_sync_options", "cloud_sync_sync_onetime_options"),
    )
    @job(logs=True)
    async def sync_onetime(self, job, cloud_sync, options):
        """
        Run cloud sync task without creating it.
        """
        verrors = ValidationErrors()

        await self._validate(verrors, "cloud_sync_sync_onetime", cloud_sync)

        if verrors:
            raise verrors

        await self._validate_folder(verrors, "cloud_sync_sync_onetime",
                                    cloud_sync)

        if verrors:
            raise verrors

        cloud_sync["credentials"] = await self._get_credentials(
            cloud_sync["credentials"])

        await self._sync(cloud_sync, options, job)

    async def _sync(self, cloud_sync, options, job):
        credentials = cloud_sync["credentials"]

        local_path = cloud_sync["path"]
        local_direction = FsLockDirection.READ if cloud_sync[
            "direction"] == "PUSH" else FsLockDirection.WRITE

        remote_path = get_remote_path(REMOTES[credentials["provider"]],
                                      cloud_sync["attributes"])
        remote_direction = FsLockDirection.READ if cloud_sync[
            "direction"] == "PULL" else FsLockDirection.WRITE

        directions = {
            FsLockDirection.READ: "reading",
            FsLockDirection.WRITE: "writing",
        }

        job.set_progress(
            0,
            f"Locking local path {local_path!r} for {directions[local_direction]}"
        )
        async with self.local_fs_lock_manager.lock(local_path,
                                                   local_direction):
            job.set_progress(
                0,
                f"Locking remote path {remote_path!r} for {directions[remote_direction]}"
            )
            async with self.remote_fs_lock_manager.lock(
                    f"{credentials['id']}/{remote_path}", remote_direction):
                job.set_progress(0, "Starting")
                try:
                    await rclone(self.middleware, job, cloud_sync,
                                 options["dry_run"])
                    if "id" in cloud_sync:
                        await self.middleware.call("alert.oneshot_delete",
                                                   "CloudSyncTaskFailed",
                                                   cloud_sync["id"])
                except Exception:
                    if "id" in cloud_sync:
                        await self.middleware.call(
                            "alert.oneshot_create", "CloudSyncTaskFailed", {
                                "id": cloud_sync["id"],
                                "name": cloud_sync["description"],
                            })
                    raise

    @item_method
    @accepts(Int("id"))
    async def abort(self, id):
        """
        Aborts cloud sync task.
        """

        cloud_sync = await self._get_instance(id)

        if cloud_sync["job"] is None:
            return False

        if cloud_sync["job"]["state"] not in ["WAITING", "RUNNING"]:
            return False

        await self.middleware.call("core.job_abort", cloud_sync["job"]["id"])
        return True

    @accepts()
    async def providers(self):
        """
        Returns a list of dictionaries of supported providers for Cloud Sync Tasks.

        `credentials_schema` is JSON schema for credentials attributes.

        `task_schema` is JSON schema for task attributes.

        `buckets` is a boolean value which is set to "true" if provider supports buckets.

        Example of a single provider:

        [
            {
                "name": "AMAZON_CLOUD_DRIVE",
                "title": "Amazon Cloud Drive",
                "credentials_schema": [
                    {
                        "property": "client_id",
                        "schema": {
                            "title": "Amazon Application Client ID",
                            "_required_": true,
                            "type": "string"
                        }
                    },
                    {
                        "property": "client_secret",
                        "schema": {
                            "title": "Application Key",
                            "_required_": true,
                            "type": "string"
                        }
                    }
                ],
                "credentials_oauth": null,
                "buckets": false,
                "bucket_title": "Bucket",
                "task_schema": []
            }
        ]
        """
        return sorted([{
            "name":
            provider.name,
            "title":
            provider.title,
            "credentials_schema": [{
                "property": field.name,
                "schema": field.to_json_schema()
            } for field in provider.credentials_schema],
            "credentials_oauth":
            f"{OAUTH_URL}/{provider.name.lower()}"
            if provider.credentials_oauth else None,
            "buckets":
            provider.buckets,
            "bucket_title":
            provider.bucket_title,
            "task_schema": [{
                "property": field.name,
                "schema": field.to_json_schema()
            } for field in provider.task_schema +
                            self.common_task_schema(provider)],
        } for provider in REMOTES.values()],
                      key=lambda provider: provider["title"].lower())

    def common_task_schema(self, provider):
        schema = []

        if provider.fast_list:
            schema.append(
                Bool("fast_list",
                     default=False,
                     title="Use --fast-list",
                     description=textwrap.dedent("""\
                Use fewer transactions in exchange for more RAM. This may also speed up or slow down your
                transfer. See [rclone documentation](https://rclone.org/docs/#fast-list) for more details.
            """).rstrip()))

        return schema
Пример #3
0
class IPMIService(CRUDService):

    @accepts()
    async def is_loaded(self):
        return os.path.exists('/dev/ipmi0')

    @accepts()
    async def channels(self):
        """
        Return a list with the IPMI channels available.
        """
        return channels

    @filterable
    async def query(self, filters=None, options=None):
        result = []
        for channel in await self.channels():
            try:
                cp = await run('ipmitool', 'lan', 'print', str(channel))
            except subprocess.CalledProcessError as e:
                raise CallError(f'Failed to get details from channel {channel}: {e}')

            output = cp.stdout.decode()
            data = {'channel': channel, 'id': channel}
            for line in output.split('\n'):
                if ':' not in line:
                    continue

                name, value = line.split(':', 1)
                if not name:
                    continue

                name = name.strip()
                value = value.strip()

                if name == 'IP Address':
                    data['ipaddress'] = value
                elif name == 'Subnet Mask':
                    data['netmask'] = value
                elif name == 'Default Gateway IP':
                    data['gateway'] = value
                elif name == '802.1q VLAN ID':
                    if value == 'Disabled':
                        data['vlan'] = None
                    else:
                        data['vlan'] = value
                elif name == 'IP Address Source':
                    data['dhcp'] = False if value == 'Static Address' else True
            result.append(data)
        return filter_list(result, filters, options)

    @accepts(Int('channel'), Dict(
        'ipmi',
        IPAddr('ipaddress'),
        Str('netmask'),
        IPAddr('gateway'),
        Str('password', password=True),
        Bool('dhcp'),
        Int('vlan'),
    ))
    async def do_update(self, id, data):

        if not await self.is_loaded():
            raise CallError('The ipmi device could not be found')

        verrors = ValidationErrors()

        if data.get('password') and len(data.get('password')) > 20:
            verrors.add(
                'ipmi_update.password',
                'A maximum of 20 characters are allowed'
            )

        if not data.get('dhcp'):
            for k in ['ipaddress', 'netmask', 'gateway']:
                if not data.get(k):
                    verrors.add(
                        f'ipmi_update.{k}',
                        'This field is required when dhcp is not given'
                    )

        if verrors:
            raise verrors

        args = ['ipmitool', 'lan', 'set', str(id)]
        rv = 0
        if data.get('dhcp'):
            rv |= (await run(*args, 'ipsrc', 'dhcp', check=False)).returncode
        else:
            rv |= (await run(*args, 'ipsrc', 'static', check=False)).returncode
            rv |= (await run(*args, 'ipaddr', data['ipaddress'], check=False)).returncode
            rv |= (await run(*args, 'netmask', data['netmask'], check=False)).returncode
            rv |= (await run(*args, 'defgw', 'ipaddr', data['gateway'], check=False)).returncode
        rv |= (await run(
            *args, 'vlan', 'id', str(data['vlan']) if data.get('vlan') else 'off'
        )).returncode

        rv |= (await run(*args, 'access', 'on', check=False)).returncode
        rv |= (await run(*args, 'auth', 'USER', 'MD2,MD5', check=False)).returncode
        rv |= (await run(*args, 'auth', 'OPERATOR', 'MD2,MD5', check=False)).returncode
        rv |= (await run(*args, 'auth', 'ADMIN', 'MD2,MD5', check=False)).returncode
        rv |= (await run(*args, 'auth', 'CALLBACK', 'MD2,MD5', check=False)).returncode
        # Setting arp have some issues in some hardwares
        # Do not fail if setting these couple settings do not work
        # See #15578
        await run(*args, 'arp', 'respond', 'on', check=False)
        await run(*args, 'arp', 'generate', 'on', check=False)
        if data.get('password'):
            rv |= (await run(
                'ipmitool', 'user', 'set', 'password', '2', data.get('password'),
            )).returncode
        rv |= (await run('ipmitool', 'user', 'enable', '2')).returncode
        # XXX: according to dwhite, this needs to be executed off the box via
        # the lanplus interface.
        # rv |= (await run('ipmitool', 'sol', 'set', 'enabled', 'true', '1')).returncode
        # )
        return rv

    @accepts(Dict(
        'options',
        Int('seconds'),
        Bool('force'),
    ))
    async def identify(self, options=None):
        """
        Turn on IPMI chassis identify light.

        To turn off specify 0 as `seconds`.
        """
        options = options or {}
        if options.get('force') and options.get('seconds'):
            raise CallError('You have to use either "seconds" or "force" option, not both')

        if options.get('force'):
            cmd = 'force'
        else:
            cmd = str(options.get('seconds'))
        await run('ipmitool', 'chassis', 'identify', cmd)
Пример #4
0
class ReplicationService(CRUDService):
    class Config:
        datastore = "storage.replication"
        datastore_prefix = "repl_"
        datastore_extend = "replication.extend"
        datastore_extend_context = "replication.extend_context"

    @private
    async def extend_context(self, extra):
        return {
            "state": await self.middleware.call("zettarepl.get_state"),
        }

    @private
    async def extend(self, data, context):
        data["periodic_snapshot_tasks"] = [{
            k.replace("task_", ""): v
            for k, v in task.items()
        } for task in data["periodic_snapshot_tasks"]]

        for task in data["periodic_snapshot_tasks"]:
            Cron.convert_db_format_to_schedule(task, begin_end=True)

        if data["direction"] == "PUSH":
            data["also_include_naming_schema"] = data["naming_schema"]
            data["naming_schema"] = []
        if data["direction"] == "PULL":
            data["also_include_naming_schema"] = []

        Cron.convert_db_format_to_schedule(data,
                                           "schedule",
                                           key_prefix="schedule_",
                                           begin_end=True)
        Cron.convert_db_format_to_schedule(data,
                                           "restrict_schedule",
                                           key_prefix="restrict_schedule_",
                                           begin_end=True)

        if "error" in context["state"]:
            data["state"] = context["state"]["error"]
        else:
            data["state"] = context["state"]["tasks"].get(
                f"replication_task_{data['id']}", {
                    "state": "PENDING",
                })

        data["job"] = data["state"].pop("job", None)

        return data

    @private
    async def compress(self, data):
        if data["direction"] == "PUSH":
            data["naming_schema"] = data["also_include_naming_schema"]
        del data["also_include_naming_schema"]

        Cron.convert_schedule_to_db_format(data,
                                           "schedule",
                                           key_prefix="schedule_",
                                           begin_end=True)
        Cron.convert_schedule_to_db_format(data,
                                           "restrict_schedule",
                                           key_prefix="restrict_schedule_",
                                           begin_end=True)

        del data["periodic_snapshot_tasks"]

        return data

    @accepts(
        Dict(
            "replication_create",
            Str("name", required=True),
            Str("direction", enum=["PUSH", "PULL"], required=True),
            Str("transport",
                enum=["SSH", "SSH+NETCAT", "LOCAL"],
                required=True),
            Int("ssh_credentials", null=True, default=None),
            Str("netcat_active_side",
                enum=["LOCAL", "REMOTE"],
                null=True,
                default=None),
            Str("netcat_active_side_listen_address", null=True, default=None),
            Int("netcat_active_side_port_min",
                null=True,
                default=None,
                validators=[Port()]),
            Int("netcat_active_side_port_max",
                null=True,
                default=None,
                validators=[Port()]),
            Str("netcat_passive_side_connect_address", null=True,
                default=None),
            List("source_datasets",
                 items=[Path("dataset", empty=False)],
                 required=True,
                 empty=False),
            Path("target_dataset", required=True, empty=False),
            Bool("recursive", required=True),
            List("exclude", items=[Path("dataset", empty=False)], default=[]),
            Bool("properties", default=True),
            Bool("replicate", default=False),
            List("periodic_snapshot_tasks",
                 items=[Int("periodic_snapshot_task")],
                 default=[],
                 validators=[Unique()]),
            List("naming_schema",
                 items=[
                     Str("naming_schema",
                         validators=[ReplicationSnapshotNamingSchema()])
                 ],
                 default=[]),
            List("also_include_naming_schema",
                 items=[
                     Str("naming_schema",
                         validators=[ReplicationSnapshotNamingSchema()])
                 ],
                 default=[]),
            Bool("auto", required=True),
            Cron("schedule",
                 defaults={"minute": "00"},
                 begin_end=True,
                 null=True,
                 default=None),
            Cron("restrict_schedule",
                 defaults={"minute": "00"},
                 begin_end=True,
                 null=True,
                 default=None),
            Bool("only_matching_schedule", default=False),
            Bool("allow_from_scratch", default=False),
            Str("readonly", enum=["SET", "REQUIRE", "IGNORE"], default="SET"),
            Bool("hold_pending_snapshots", default=False),
            Str("retention_policy",
                enum=["SOURCE", "CUSTOM", "NONE"],
                required=True),
            Int("lifetime_value",
                null=True,
                default=None,
                validators=[Range(min=1)]),
            Str("lifetime_unit",
                null=True,
                default=None,
                enum=["HOUR", "DAY", "WEEK", "MONTH", "YEAR"]),
            Str("compression",
                enum=["LZ4", "PIGZ", "PLZIP"],
                null=True,
                default=None),
            Int("speed_limit",
                null=True,
                default=None,
                validators=[Range(min=1)]),
            Bool("large_block", default=True),
            Bool("embed", default=False),
            Bool("compressed", default=True),
            Int("retries", default=5, validators=[Range(min=1)]),
            Str("logging_level",
                enum=["DEBUG", "INFO", "WARNING", "ERROR"],
                null=True,
                default=None),
            Bool("enabled", default=True),
            register=True,
            strict=True,
        ))
    async def do_create(self, data):
        """
        Create a Replication Task

        Create a Replication Task that will push or pull ZFS snapshots to or from remote host..

        * `name` specifies a name for replication task
        * `direction` specifies whether task will `PUSH` or `PULL` snapshots
        * `transport` is a method of snapshots transfer:
          * `SSH` transfers snapshots via SSH connection. This method is supported everywhere but does not achieve
            great performance
            `ssh_credentials` is a required field for this transport (Keychain Credential ID of type `SSH_CREDENTIALS`)
          * `SSH+NETCAT` uses unencrypted connection for data transfer. This can only be used in trusted networks
            and requires a port (specified by range from `netcat_active_side_port_min` to `netcat_active_side_port_max`)
            to be open on `netcat_active_side`
            `ssh_credentials` is also required for control connection
          * `LOCAL` replicates to or from localhost
        * `source_datasets` is a non-empty list of datasets to replicate snapshots from
        * `target_dataset` is a dataset to put snapshots into. It must exist on target side
        * `recursive` and `exclude` have the same meaning as for Periodic Snapshot Task
        * `properties` control whether we should send dataset properties along with snapshots
        * `periodic_snapshot_tasks` is a list of periodic snapshot task IDs that are sources of snapshots for this
          replication task. Only push replication tasks can be bound to periodic snapshot tasks.
        * `naming_schema` is a list of naming schemas for pull replication
        * `also_include_naming_schema` is a list of naming schemas for push replication
        * `auto` allows replication to run automatically on schedule or after bound periodic snapshot task
        * `schedule` is a schedule to run replication task. Only `auto` replication tasks without bound periodic
          snapshot tasks can have a schedule
        * `restrict_schedule` restricts when replication task with bound periodic snapshot tasks runs. For example,
          you can have periodic snapshot tasks that run every 15 minutes, but only run replication task every hour.
        * Enabling `only_matching_schedule` will only replicate snapshots that match `schedule` or
          `restrict_schedule`
        * `allow_from_scratch` will destroy all snapshots on target side and replicate everything from scratch if none
          of the snapshots on target side matches source snapshots
        * `readonly` controls destination datasets readonly property:
          * `SET` will set all destination datasets to readonly=on after finishing the replication
          * `REQUIRE` will require all existing destination datasets to have readonly=on property
          * `IGNORE` will avoid this kind of behavior
        * `hold_pending_snapshots` will prevent source snapshots from being deleted by retention of replication fails
          for some reason
        * `retention_policy` specifies how to delete old snapshots on target side:
          * `SOURCE` deletes snapshots that are absent on source side
          * `CUSTOM` deletes snapshots that are older than `lifetime_value` and `lifetime_unit`
          * `NONE` does not delete any snapshots
        * `compression` compresses SSH stream. Available only for SSH transport
        * `speed_limit` limits speed of SSH stream. Available only for SSH transport
        * `large_block`, `embed` and `compressed` are various ZFS stream flag documented in `man zfs send`
        * `retries` specifies number of retries before considering replication failed

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.create",
                "params": [{
                    "name": "Work Backup",
                    "direction": "PUSH",
                    "transport": "SSH",
                    "ssh_credentials": [12],
                    "source_datasets", ["data/work"],
                    "target_dataset": "repl/work",
                    "recursive": true,
                    "periodic_snapshot_tasks": [5],
                    "auto": true,
                    "restrict_schedule": {
                        "minute": "0",
                        "hour": "*/2",
                        "dom": "*",
                        "month": "*",
                        "dow": "1,2,3,4,5",
                        "begin": "09:00",
                        "end": "18:00"
                    },
                    "only_matching_schedule": true,
                    "retention_policy": "CUSTOM",
                    "lifetime_value": 1,
                    "lifetime_unit": "WEEK",
                }]
            }
        """

        verrors = ValidationErrors()
        verrors.add_child("replication_create", await self._validate(data))

        if verrors:
            raise verrors

        periodic_snapshot_tasks = data["periodic_snapshot_tasks"]
        await self.compress(data)

        id = await self.middleware.call(
            "datastore.insert", self._config.datastore, data,
            {"prefix": self._config.datastore_prefix})

        await self._set_periodic_snapshot_tasks(id, periodic_snapshot_tasks)

        await self.middleware.call("zettarepl.update_tasks")

        return await self._get_instance(id)

    @accepts(Int("id"),
             Patch(
                 "replication_create",
                 "replication_update",
                 ("attr", {
                     "update": True
                 }),
             ))
    async def do_update(self, id, data):
        """
        Update a Replication Task with specific `id`

        See the documentation for `create` method for information on payload contents

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.update",
                "params": [
                    7,
                    {
                        "name": "Work Backup",
                        "direction": "PUSH",
                        "transport": "SSH",
                        "ssh_credentials": [12],
                        "source_datasets", ["data/work"],
                        "target_dataset": "repl/work",
                        "recursive": true,
                        "periodic_snapshot_tasks": [5],
                        "auto": true,
                        "restrict_schedule": {
                            "minute": "0",
                            "hour": "*/2",
                            "dom": "*",
                            "month": "*",
                            "dow": "1,2,3,4,5",
                            "begin": "09:00",
                            "end": "18:00"
                        },
                        "only_matching_schedule": true,
                        "retention_policy": "CUSTOM",
                        "lifetime_value": 1,
                        "lifetime_unit": "WEEK",
                    }
                ]
            }
        """

        old = await self._get_instance(id)

        new = old.copy()
        if new["ssh_credentials"]:
            new["ssh_credentials"] = new["ssh_credentials"]["id"]
        new["periodic_snapshot_tasks"] = [
            task["id"] for task in new["periodic_snapshot_tasks"]
        ]
        new.update(data)

        verrors = ValidationErrors()
        verrors.add_child("replication_update", await self._validate(new, id))

        if verrors:
            raise verrors

        periodic_snapshot_tasks = new["periodic_snapshot_tasks"]
        await self.compress(new)

        new.pop("state", None)
        new.pop("job", None)

        await self.middleware.call("datastore.update", self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})

        await self._set_periodic_snapshot_tasks(id, periodic_snapshot_tasks)

        await self.middleware.call("zettarepl.update_tasks")

        return await self._get_instance(id)

    @accepts(Int("id"))
    async def do_delete(self, id):
        """
        Delete a Replication Task with specific `id`

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.delete",
                "params": [
                    1
                ]
            }
        """

        response = await self.middleware.call("datastore.delete",
                                              self._config.datastore, id)

        await self.middleware.call("zettarepl.update_tasks")

        return response

    @item_method
    @accepts(Int("id"), Bool("really_run", default=True, hidden=True))
    @job(logs=True)
    async def run(self, job, id, really_run):
        """
        Run Replication Task of `id`.
        """
        if really_run:
            task = await self._get_instance(id)

            if not task["enabled"]:
                raise CallError("Task is not enabled")

            if task["state"]["state"] == "RUNNING":
                raise CallError("Task is already running")

            if task["state"]["state"] == "HOLD":
                raise CallError("Task is on hold")

        await self.middleware.call("zettarepl.run_replication_task", id,
                                   really_run, job)

    async def _validate(self, data, id=None):
        verrors = ValidationErrors()

        await self._ensure_unique(verrors, "", "name", data["name"], id)

        # Direction

        snapshot_tasks = []

        if data["direction"] == "PUSH":
            e, snapshot_tasks = await self._query_periodic_snapshot_tasks(
                data["periodic_snapshot_tasks"])
            verrors.add_child("periodic_snapshot_tasks", e)

            if data["naming_schema"]:
                verrors.add("naming_schema",
                            "This field has no sense for push replication")

            if not snapshot_tasks and not data["also_include_naming_schema"]:
                verrors.add(
                    "periodic_snapshot_tasks",
                    "You must at least either bind a periodic snapshot task or provide "
                    "\"Also Include Naming Schema\" for push replication task")

            if data["schedule"]:
                if data["periodic_snapshot_tasks"]:
                    verrors.add(
                        "schedule",
                        "Push replication can't be bound to periodic snapshot task and have "
                        "schedule at the same time")
            else:
                if data["auto"] and not data["periodic_snapshot_tasks"]:
                    verrors.add(
                        "auto",
                        "Push replication that runs automatically must be either "
                        "bound to periodic snapshot task or have schedule")

        if data["direction"] == "PULL":
            if data["schedule"]:
                pass
            else:
                if data["auto"]:
                    verrors.add(
                        "auto",
                        "Pull replication that runs automatically must have schedule"
                    )

            if data["periodic_snapshot_tasks"]:
                verrors.add(
                    "periodic_snapshot_tasks",
                    "Pull replication can't be bound to periodic snapshot task"
                )

            if not data["naming_schema"]:
                verrors.add("naming_schema",
                            "Naming schema is required for pull replication")

            if data["also_include_naming_schema"]:
                verrors.add("also_include_naming_schema",
                            "This field has no sense for pull replication")

            if data["hold_pending_snapshots"]:
                verrors.add(
                    "hold_pending_snapshots",
                    "Pull replication tasks can't hold pending snapshots because "
                    "they don't do source retention")

        # Transport

        if data["transport"] == "SSH+NETCAT":
            if data["netcat_active_side"] is None:
                verrors.add(
                    "netcat_active_side",
                    "You must choose active side for SSH+netcat replication")

            if data["netcat_active_side_port_min"] is not None and data[
                    "netcat_active_side_port_max"] is not None:
                if data["netcat_active_side_port_min"] > data[
                        "netcat_active_side_port_max"]:
                    verrors.add(
                        "netcat_active_side_port_max",
                        "Please specify value greater or equal than netcat_active_side_port_min"
                    )

            if data["compression"] is not None:
                verrors.add(
                    "compression",
                    "Compression is not supported for SSH+netcat replication")

            if data["speed_limit"] is not None:
                verrors.add(
                    "speed_limit",
                    "Speed limit is not supported for SSH+netcat replication")
        else:
            if data["netcat_active_side"] is not None:
                verrors.add(
                    "netcat_active_side",
                    "This field only has sense for SSH+netcat replication")

            for k in [
                    "netcat_active_side_listen_address",
                    "netcat_active_side_port_min",
                    "netcat_active_side_port_max",
                    "netcat_passive_side_connect_address"
            ]:
                if data[k] is not None:
                    verrors.add(
                        k,
                        "This field only has sense for SSH+netcat replication")

        if data["transport"] == "LOCAL":
            if data["ssh_credentials"] is not None:
                verrors.add(
                    "ssh_credentials",
                    "Remote credentials have no sense for local replication")

            if data["compression"] is not None:
                verrors.add("compression",
                            "Compression has no sense for local replication")

            if data["speed_limit"] is not None:
                verrors.add("speed_limit",
                            "Speed limit has no sense for local replication")
        else:
            if data["ssh_credentials"] is None:
                verrors.add(
                    "ssh_credentials",
                    "SSH Credentials are required for non-local replication")
            else:
                try:
                    await self.middleware.call(
                        "keychaincredential.get_of_type",
                        data["ssh_credentials"], "SSH_CREDENTIALS")
                except CallError as e:
                    verrors.add("ssh_credentials", str(e))

        # Common for all directions and transports

        for i, source_dataset in enumerate(data["source_datasets"]):
            for snapshot_task in snapshot_tasks:
                if is_child(source_dataset, snapshot_task["dataset"]):
                    if data["recursive"]:
                        for exclude in snapshot_task["exclude"]:
                            if is_child(exclude, source_dataset
                                        ) and exclude not in data["exclude"]:
                                verrors.add(
                                    "exclude",
                                    f"You should exclude {exclude!r} as bound periodic snapshot "
                                    f"task dataset {snapshot_task['dataset']!r} does"
                                )
                    else:
                        if source_dataset in snapshot_task["exclude"]:
                            verrors.add(
                                f"source_datasets.{i}",
                                f"Dataset {source_dataset!r} is excluded by bound "
                                f"periodic snapshot task for dataset "
                                f"{snapshot_task['dataset']!r}")

        if not data["recursive"] and data["exclude"]:
            verrors.add(
                "exclude",
                "Excluding child datasets is only supported for recursive replication"
            )

        for i, v in enumerate(data["exclude"]):
            if not any(
                    v.startswith(ds + "/") for ds in data["source_datasets"]):
                verrors.add(
                    f"exclude.{i}",
                    "This dataset is not a child of any of source datasets")

        if data["replicate"]:
            if not data["recursive"]:
                verrors.add(
                    "recursive",
                    "This option is required for full filesystem replication")

            if data["exclude"]:
                verrors.add(
                    "exclude",
                    "This option is not supported for full filesystem replication"
                )

            if not data["properties"]:
                verrors.add(
                    "properties",
                    "This option is required for full filesystem replication")

        if data["schedule"]:
            if not data["auto"]:
                verrors.add(
                    "schedule",
                    "You can't have schedule for replication that does not run automatically"
                )
        else:
            if data["only_matching_schedule"]:
                verrors.add(
                    "only_matching_schedule",
                    "You can't have only-matching-schedule without schedule")

        if data["retention_policy"] == "CUSTOM":
            if data["lifetime_value"] is None:
                verrors.add(
                    "lifetime_value",
                    "This field is required for custom retention policy")
            if data["lifetime_unit"] is None:
                verrors.add(
                    "lifetime_value",
                    "This field is required for custom retention policy")
        else:
            if data["lifetime_value"] is not None:
                verrors.add(
                    "lifetime_value",
                    "This field has no sense for specified retention policy")
            if data["lifetime_unit"] is not None:
                verrors.add(
                    "lifetime_unit",
                    "This field has no sense for specified retention policy")

        if data["enabled"]:
            for i, snapshot_task in enumerate(snapshot_tasks):
                if not snapshot_task["enabled"]:
                    verrors.add(
                        f"periodic_snapshot_tasks.{i}",
                        "You can't bind disabled periodic snapshot task to enabled replication task"
                    )

        return verrors

    async def _set_periodic_snapshot_tasks(self, replication_task_id,
                                           periodic_snapshot_tasks_ids):
        await self.middleware.call(
            "datastore.delete",
            "storage.replication_repl_periodic_snapshot_tasks",
            [["replication_id", "=", replication_task_id]])
        for periodic_snapshot_task_id in periodic_snapshot_tasks_ids:
            await self.middleware.call(
                "datastore.insert",
                "storage.replication_repl_periodic_snapshot_tasks",
                {
                    "replication_id": replication_task_id,
                    "task_id": periodic_snapshot_task_id,
                },
            )

    async def _query_periodic_snapshot_tasks(self, ids):
        verrors = ValidationErrors()

        query_result = await self.middleware.call("pool.snapshottask.query",
                                                  [["id", "in", ids]])

        snapshot_tasks = []
        for i, task_id in enumerate(ids):
            for task in query_result:
                if task["id"] == task_id:
                    snapshot_tasks.append(task)
                    break
            else:
                verrors.add(str(i), "This snapshot task does not exist")

        return verrors, snapshot_tasks

    @accepts(
        Str("transport", enum=["SSH", "SSH+NETCAT", "LOCAL"], required=True),
        Int("ssh_credentials", null=True, default=None))
    async def list_datasets(self, transport, ssh_credentials=None):
        """
        List datasets on remote side

        Accepts `transport` and SSH credentials ID (for non-local transport)

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.list_datasets",
                "params": [
                    "SSH",
                    7
                ]
            }
        """

        return await self.middleware.call("zettarepl.list_datasets", transport,
                                          ssh_credentials)

    @accepts(Str("dataset", required=True),
             Str("transport",
                 enum=["SSH", "SSH+NETCAT", "LOCAL"],
                 required=True), Int("ssh_credentials",
                                     null=True,
                                     default=None))
    async def create_dataset(self, dataset, transport, ssh_credentials=None):
        """
        Creates dataset on remote side

        Accepts `dataset` name, `transport` and SSH credentials ID (for non-local transport)

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.create_dataset",
                "params": [
                    "repl/work",
                    "SSH",
                    7
                ]
            }
        """

        return await self.middleware.call("zettarepl.create_dataset", dataset,
                                          transport, ssh_credentials)

    @accepts()
    async def list_naming_schemas(self):
        """
        List all naming schemas used in periodic snapshot and replication tasks.
        """
        naming_schemas = []
        for snapshottask in await self.middleware.call(
                "pool.snapshottask.query"):
            naming_schemas.append(snapshottask["naming_schema"])
        for replication in await self.middleware.call("replication.query"):
            naming_schemas.extend(replication["naming_schema"])
            naming_schemas.extend(replication["also_include_naming_schema"])
        return sorted(set(naming_schemas))

    @accepts(
        List("datasets", empty=False, items=[
            Path("dataset", empty=False),
        ]),
        List("naming_schema",
             empty=False,
             items=[
                 Str("naming_schema",
                     validators=[ReplicationSnapshotNamingSchema()])
             ]),
        Str("transport", enum=["SSH", "SSH+NETCAT", "LOCAL"], required=True),
        Int("ssh_credentials", null=True, default=None),
    )
    async def count_eligible_manual_snapshots(self, datasets, naming_schema,
                                              transport, ssh_credentials):
        """
        Count how many existing snapshots of `dataset` match `naming_schema`.

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.count_eligible_manual_snapshots",
                "params": [
                    "repl/work",
                    ["auto-%Y-%m-%d_%H-%M"],
                    "SSH",
                    4
                ]
            }
        """
        return await self.middleware.call(
            "zettarepl.count_eligible_manual_snapshots", datasets,
            naming_schema, transport, ssh_credentials)

    @accepts(
        Str("direction", enum=["PUSH", "PULL"], required=True),
        List("source_datasets",
             items=[Path("dataset", empty=False)],
             required=True,
             empty=False),
        Path("target_dataset", required=True, empty=False),
        Str("transport",
            enum=["SSH", "SSH+NETCAT", "LOCAL", "LEGACY"],
            required=True),
        Int("ssh_credentials", null=True, default=None),
    )
    async def target_unmatched_snapshots(self, direction, source_datasets,
                                         target_dataset, transport,
                                         ssh_credentials):
        """
        Check if target has any snapshots that do not exist on source.

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.target_unmatched_snapshots",
                "params": [
                    "PUSH",
                    ["repl/work", "repl/games"],
                    "backup",
                    "SSH",
                    4
                ]
            }

        Returns

            {
                "backup/work": ["auto-2019-10-15_13-00", "auto-2019-10-15_09-00"],
                "backup/games": ["auto-2019-10-15_13-00"],
            }
        """
        return await self.middleware.call(
            "zettarepl.target_unmatched_snapshots", direction, source_datasets,
            target_dataset, transport, ssh_credentials)

    @private
    def new_snapshot_name(self, naming_schema):
        return datetime.now().strftime(naming_schema)

    # Legacy pair support
    @private
    @accepts(
        Dict(
            "replication-pair-data",
            Str("hostname", required=True),
            Str("public-key", required=True),
            Str("user", null=True),
        ))
    async def pair(self, data):
        result = await self.middleware.call(
            "keychaincredential.ssh_pair", {
                "remote_hostname": data["hostname"],
                "username": data["user"] or "root",
                "public_key": data["public-key"],
            })
        return {
            "ssh_port": result["port"],
            "ssh_hostkey": result["host_key"],
        }
Пример #5
0
class GroupService(CRUDService):
    class Config:
        datastore = 'account.bsdgroups'
        datastore_prefix = 'bsdgrp_'
        datastore_extend = 'group.group_extend'

    @private
    async def group_extend(self, group):
        # Get group membership
        group['users'] = [
            gm['user']['id'] for gm in await self.middleware.call(
                'datastore.query', 'account.bsdgroupmembership', [(
                    'group', '=', group['id'])], {'prefix': 'bsdgrpmember_'})
        ]
        group['users'] += [
            gmu['id'] for gmu in await self.middleware.call(
                'datastore.query', 'account.bsdusers', [('bsdusr_group_id',
                                                         '=', group['id'])])
        ]
        return group

    @private
    async def group_compress(self, group):
        if 'local' in group:
            group.pop('local')
        if 'id_type_both' in group:
            group.pop('id_type_both')
        return group

    @filterable
    async def query(self, filters=None, options=None):
        """
        Query groups with `query-filters` and `query-options`. As a performance optimization, only local groups
        will be queried by default.

        Groups from directory services such as NIS, LDAP, or Active Directory will be included in query results
        if the option `{'extra': {'search_dscache': True}}` is specified.
        """
        if not filters:
            filters = []

        options = options or {}
        options['extend'] = self._config.datastore_extend
        options['extend_context'] = self._config.datastore_extend_context
        options['prefix'] = self._config.datastore_prefix

        datastore_options = options.copy()
        datastore_options.pop('count', None)
        datastore_options.pop('get', None)

        extra = options.get('extra', {})
        dssearch = extra.pop('search_dscache', False)

        if dssearch:
            return await self.middleware.call('dscache.query', 'GROUPS',
                                              filters, options)

        result = await self.middleware.call('datastore.query',
                                            self._config.datastore, [],
                                            datastore_options)
        for entry in result:
            entry.update({'local': True, 'id_type_both': False})
        return await self.middleware.run_in_thread(filter_list, result,
                                                   filters, options)

    @accepts(
        Dict(
            'group_create',
            Int('gid'),
            Str('name', required=True),
            Bool('smb', default=True),
            Bool('sudo', default=False),
            Bool('allow_duplicate_gid', default=False),
            List('users', items=[Int('id')], required=False),
            register=True,
        ))
    async def do_create(self, data):
        """
        Create a new group.

        If `gid` is not provided it is automatically filled with the next one available.

        `allow_duplicate_gid` allows distinct group names to share the same gid.

        `users` is a list of user ids (`id` attribute from `user.query`).

        `smb` specifies whether the group should be mapped into an NT group.
        """
        allow_duplicate_gid = data['allow_duplicate_gid']
        verrors = ValidationErrors()
        await self.__common_validation(verrors, data, 'group_create')
        verrors.check()

        if not data.get('gid'):
            data['gid'] = await self.get_next_gid()

        group = data.copy()
        group['group'] = group.pop('name')

        users = group.pop('users', [])

        group = await self.group_compress(group)
        pk = await self.middleware.call('datastore.insert',
                                        'account.bsdgroups', group,
                                        {'prefix': 'bsdgrp_'})

        for user in users:
            await self.middleware.call('datastore.insert',
                                       'account.bsdgroupmembership', {
                                           'bsdgrpmember_group': pk,
                                           'bsdgrpmember_user': user
                                       })

        await self.middleware.call('service.reload', 'user')
        if data['smb']:
            try:
                await self.middleware.call('smb.groupmap_add', data['name'])
            except Exception:
                """
                Samba's group mapping database does not allow duplicate gids.
                Unfortunately, we don't get a useful error message at -d 0.
                """
                if not allow_duplicate_gid:
                    raise
                else:
                    self.logger.debug(
                        'Refusing to generate duplicate gid mapping in group_mapping.tdb: %s -> %s',
                        data['name'], data['gid'])

        return pk

    @accepts(
        Int('id'),
        Patch(
            'group_create',
            'group_update',
            ('attr', {
                'update': True
            }),
        ),
    )
    async def do_update(self, pk, data):
        """
        Update attributes of an existing group.
        """

        group = await self._get_instance(pk)

        verrors = ValidationErrors()
        await self.__common_validation(verrors, data, 'group_update', pk=pk)
        verrors.check()

        group.update(data)
        delete_groupmap = False
        group.pop('users', None)

        if 'name' in data and data['name'] != group['group']:
            delete_groupmap = group['group']
            group['group'] = group.pop('name')
        else:
            group.pop('name', None)

        group = await self.group_compress(group)
        await self.middleware.call('datastore.update', 'account.bsdgroups', pk,
                                   group, {'prefix': 'bsdgrp_'})

        if 'users' in data:
            existing = {
                i['bsdgrpmember_user']['id']: i
                for i in await self.middleware.call(
                    'datastore.query', 'account.bsdgroupmembership', [(
                        'bsdgrpmember_group', '=', pk)])
            }
            to_remove = set(existing.keys()) - set(data['users'])
            for i in to_remove:
                await self.middleware.call('datastore.delete',
                                           'account.bsdgroupmembership',
                                           existing[i]['id'])

            to_add = set(data['users']) - set(existing.keys())
            for i in to_add:
                await self.middleware.call('datastore.insert',
                                           'account.bsdgroupmembership', {
                                               'bsdgrpmember_group': pk,
                                               'bsdgrpmember_user': i
                                           })

        if delete_groupmap:
            await self.middleware.call('smb.groupmap_delete', delete_groupmap)

        await self.middleware.call('service.reload', 'user')

        if group['smb']:
            await self.middleware.call('smb.groupmap_add', group['group'])

        return pk

    @accepts(Int('id'), Dict('options', Bool('delete_users', default=False)))
    async def do_delete(self, pk, options=None):
        """
        Delete group `id`.

        The `delete_users` option deletes all users that have this group as their primary group.
        """

        group = await self._get_instance(pk)
        if group['smb']:
            await self.middleware.call('smb.groupmap_delete', group['group'])

        if group['builtin']:
            raise CallError('A built-in group cannot be deleted.',
                            errno.EACCES)

        nogroup = await self.middleware.call('datastore.query',
                                             'account.bsdgroups',
                                             [('group', '=', 'nogroup')], {
                                                 'prefix': 'bsdgrp_',
                                                 'get': True
                                             })
        for i in await self.middleware.call('datastore.query',
                                            'account.bsdusers',
                                            [('group', '=', group['id'])],
                                            {'prefix': 'bsdusr_'}):
            if options['delete_users']:
                await self.middleware.call('datastore.delete',
                                           'account.bsdusers', i['id'])
            else:
                await self.middleware.call('datastore.update',
                                           'account.bsdusers', i['id'],
                                           {'group': nogroup['id']},
                                           {'prefix': 'bsdusr_'})

        await self.middleware.call('datastore.delete', 'account.bsdgroups', pk)

        await self.middleware.call('service.reload', 'user')

        return pk

    async def get_next_gid(self):
        """
        Get the next available/free gid.
        """
        last_gid = 999
        for i in await self.middleware.call('datastore.query',
                                            'account.bsdgroups',
                                            [('builtin', '=', False)], {
                                                'order_by': ['gid'],
                                                'prefix': 'bsdgrp_'
                                            }):
            # If the difference between the last gid and the current one is
            # bigger than 1, it means we have a gap and can use it.
            if i['gid'] - last_gid > 1:
                return last_gid + 1
            last_gid = i['gid']
        return last_gid + 1

    @accepts(
        Dict('get_group_obj', Str('groupname', default=None),
             Int('gid', default=None)))
    async def get_group_obj(self, data):
        """
        Returns dictionary containing information from struct grp for the group specified by either
        the groupname or gid. Bypasses group cache.
        """
        return await self.middleware.call('dscache.get_uncached_group',
                                          data['groupname'], data['gid'])

    async def __common_validation(self, verrors, data, schema, pk=None):

        exclude_filter = [('id', '!=', pk)] if pk else []

        if 'name' in data:
            existing = await self.middleware.call(
                'datastore.query', 'account.bsdgroups',
                [('group', '=', data['name'])] + exclude_filter,
                {'prefix': 'bsdgrp_'})
            if existing:
                verrors.add(
                    f'{schema}.name',
                    f'A Group with the name "{data["name"]}" already exists.',
                    errno.EEXIST,
                )

            pw_checkname(verrors, f'{schema}.name', data['name'])

        allow_duplicate_gid = data.pop('allow_duplicate_gid', False)
        if data.get('gid') and not allow_duplicate_gid:
            existing = await self.middleware.call(
                'datastore.query', 'account.bsdgroups',
                [('gid', '=', data['gid'])] + exclude_filter,
                {'prefix': 'bsdgrp_'})
            if existing:
                verrors.add(
                    f'{schema}.gid',
                    f'The Group ID "{data["gid"]}" already exists.',
                    errno.EEXIST,
                )

        if 'users' in data:
            existing = set([
                i['id'] for i in await self.middleware.call(
                    'datastore.query', 'account.bsdusers', [('id', 'in',
                                                             data['users'])])
            ])
            notfound = set(data['users']) - existing
            if notfound:
                verrors.add(
                    f'{schema}.users',
                    f'Following users do not exist: {", ".join(map(str, notfound))}',
                )
Пример #6
0
class VMService(CRUDService):
    class Config:
        namespace = 'vm'

    def __init__(self, *args, **kwargs):
        super(VMService, self).__init__(*args, **kwargs)
        self._manager = VMManager(self)
        self.vmutils = VMUtils

    @accepts()
    def flags(self):
        """Returns a dictionary with CPU flags for bhyve."""
        data = {}

        vmx = sysctl.filter('hw.vmm.vmx.initialized')
        data['intel_vmx'] = True if vmx and vmx[0].value else False

        ug = sysctl.filter('hw.vmm.vmx.cap.unrestricted_guest')
        data['unrestricted_guest'] = True if ug and ug[0].value else False

        rvi = sysctl.filter('hw.vmm.svm.features')
        data['amd_rvi'] = True if rvi and rvi[0].value != 0 else False

        asids = sysctl.filter('hw.vmm.svm.num_asids')
        data['amd_asids'] = True if asids and asids[0].value != 0 else False

        return data

    @accepts()
    def identify_hypervisor(self):
        """
        Identify Hypervisors that might work nested with bhyve.

        Returns:
                bool: True if compatible otherwise False.
        """
        compatible_hp = ('VMwareVMware', 'Microsoft Hv', 'KVMKVMKVM',
                         'bhyve bhyve')
        identify_hp = sysctl.filter('hw.hv_vendor')[0].value.strip()

        if identify_hp in compatible_hp:
            return True
        return False

    @filterable
    async def query(self, filters=None, options=None):
        options = options or {}
        options['extend'] = 'vm._extend_vm'
        return await self.middleware.call('datastore.query', 'vm.vm', filters,
                                          options)

    async def _extend_vm(self, vm):
        vm['devices'] = []
        for device in await self.middleware.call('datastore.query',
                                                 'vm.device',
                                                 [('vm__id', '=', vm['id'])]):
            device.pop('id', None)
            device.pop('vm', None)
            vm['devices'].append(device)
        return vm

    @accepts(Int('id'))
    async def get_vnc(self, id):
        """
        Get the vnc devices from a given guest.

        Returns:
            list(dict): with all attributes of the vnc device or an empty list.
        """
        vnc_devices = []
        for device in await self.middleware.call('datastore.query',
                                                 'vm.device',
                                                 [('vm__id', '=', id)]):
            if device['dtype'] == 'VNC':
                vnc = device['attributes']
                vnc_devices.append(vnc)
        return vnc_devices

    @accepts(Int('id'))
    async def get_attached_iface(self, id):
        """
        Get the attached physical interfaces from a given guest.

        Returns:
            list: will return a list with all attached phisycal interfaces or otherwise False.
        """
        ifaces = []
        for device in await self.middleware.call('datastore.query',
                                                 'vm.device',
                                                 [('vm__id', '=', id)]):
            if device['dtype'] == 'NIC':
                if_attached = device['attributes'].get('nic_attach')
                if if_attached:
                    ifaces.append(if_attached)

        if ifaces:
            return ifaces
        else:
            return False

    @accepts(Int('id'))
    async def get_console(self, id):
        """
        Get the console device from a given guest.

        Returns:
            str: with the device path or False.
        """
        try:
            guest_status = await self.status(id)
        except:
            guest_status = None

        if guest_status and guest_status['state'] == 'RUNNING':
            device = "/dev/nmdm{0}B".format(id)
            if stat.S_ISCHR(os.stat(device).st_mode) is True:
                return device

        return False

    @private
    def __activate_sharefs(self, dataset):
        zfs = libzfs.ZFS()
        pool_exist = False
        params = {}
        fstype = getattr(libzfs.DatasetType, 'FILESYSTEM')
        images_fs = '/.bhyve_containers'
        new_fs = dataset + images_fs

        try:
            zfs.get_dataset(new_fs)
            pool_exist = True
        except libzfs.ZFSException:
            # dataset does not exist yet, we need to create it.
            pass

        if pool_exist is False:
            try:
                self.logger.debug("===> Trying to create: {0}".format(new_fs))
                pool = zfs.get(dataset)
                pool.create(new_fs, params, fstype, sparse_vol=False)
            except libzfs.ZFSException as e:
                self.logger.error("Failed to create dataset", exc_info=True)
                raise e
            new_volume = zfs.get_dataset(new_fs)
            new_volume.mount()
            self.vmutils.do_dirtree_container(new_volume.mountpoint)
            return True
        else:
            return False

    @accepts(Str('pool_name'))
    def activate_sharefs(self, pool_name=None):
        """
        Create a pool for pre built containers images.
        """

        zfs = libzfs.ZFS()

        if pool_name:
            return self.__activate_sharefs(pool_name)
        else:
            # Only to keep compatibility with the OLD GUI
            blocked_pools = ['freenas-boot']
            pool_name = None

            # We get the first available pool.
            for pool in zfs.pools:
                if pool.name not in blocked_pools:
                    pool_name = pool.name
                    break
            return self.__activate_sharefs(pool_name)

    @accepts()
    async def get_sharefs(self):
        """
        Return the shared pool for containers images.
        """
        zfs = libzfs.ZFS()
        for dataset in zfs.datasets:
            if '.bhyve_containers' in dataset.name:
                return dataset.mountpoint
        return False

    @accepts(Int('id'))
    async def rm_container_conf(self, id):
        vm_data = await self.middleware.call('datastore.query', 'vm.vm',
                                             [('id', '=', id)])
        if vm_data:
            sharefs = await self.middleware.call('vm.get_sharefs')
            if sharefs:
                cnt_conf_name = str(
                    vm_data[0].get('id')) + '_' + vm_data[0].get('name')
                full_path = sharefs + '/configs/' + cnt_conf_name
                if os.path.exists(full_path):
                    shutil.rmtree(full_path)
                    return True
        return False

    @accepts(Str('raw_path'), Str('size'))
    async def raw_resize(self, raw_path, size=0):
        unit_size = ('M', 'G', 'T')
        truncate_cmd = [
            'truncate',
            '-s',
        ]
        if size is 0:
            return False
        if os.path.exists(raw_path):
            if size is 0:
                return False
            expand_size = size if len(
                size) > 0 and size[-1:] in unit_size else size + 'G'
            truncate_cmd += [expand_size, raw_path]
            self.logger.debug('===> DISK: {0} resize to: {1}'.format(
                raw_path, expand_size))
            error = await (await Popen(truncate_cmd,
                                       stdout=subprocess.PIPE,
                                       stderr=subprocess.PIPE)).wait()
            if error:
                self.logger.debug(
                    "===> Error to resize disk: {0} with size: {1}".format(
                        raw_path, expand_size))
                return False
            else:
                return True
        else:
            return False

    @accepts(
        Dict(
            'vm_create',
            Str('name'),
            Str('description'),
            Int('vcpus'),
            Int('memory'),
            Str('bootloader'),
            List("devices"),
            Str('vm_type'),
            Bool('autostart'),
            register=True,
        ))
    async def do_create(self, data):
        """Create a VM."""

        devices = data.pop('devices')
        pk = await self.middleware.call('datastore.insert', 'vm.vm', data)

        for device in devices:
            device['vm'] = pk
            await self.middleware.call('datastore.insert', 'vm.device', device)
        return pk

    async def __do_update_devices(self, id, devices):
        if devices and isinstance(devices, list) is True:
            device_query = await self.middleware.call(
                'datastore.query', 'vm.device', [('vm__id', '=', int(id))])

            # Make sure both list has the same size.
            if len(device_query) != len(devices):
                return False

            get_devices = []
            for q in device_query:
                q.pop('vm')
                get_devices.append(q)

            while len(devices) > 0:
                update_item = devices.pop(0)
                old_item = get_devices.pop(0)
                if old_item['dtype'] == update_item['dtype']:
                    old_item['attributes'] = update_item['attributes']
                    device_id = old_item.pop('id')
                    await self.middleware.call('datastore.update', 'vm.device',
                                               device_id, old_item)
            return True

    @accepts(Int('id'),
             Patch(
                 'vm_create',
                 'vm_update',
                 ('attr', {
                     'update': True
                 }),
             ))
    async def do_update(self, id, data):
        """Update all information of a specific VM."""
        devices = data.pop('devices', None)
        if devices:
            update_devices = await self.__do_update_devices(id, devices)
        if data:
            return await self.middleware.call('datastore.update', 'vm.vm', id,
                                              data)
        else:
            return update_devices

    @accepts(
        Int('id'),
        Dict('devices', additional_attrs=True),
    )
    async def create_device(self, id, data):
        """Create a new device in an existing vm."""
        devices_type = ('NIC', 'DISK', 'CDROM', 'VNC', 'RAW')
        devices = data.get('devices', None)

        if devices:
            devices[0].update({"vm": id})
            dtype = devices[0].get('dtype', None)
            if dtype in devices_type and isinstance(devices, list) is True:
                devices = devices[0]
                await self.middleware.call('datastore.insert', 'vm.device',
                                           devices)
                return True
            else:
                return False
        else:
            return False

    @accepts(Int('id'))
    async def do_delete(self, id):
        """Delete a VM."""
        status = await self.status(id)
        if isinstance(status, dict):
            if status.get('state') == "RUNNING":
                await self.stop(id)
        try:
            vm_data = await self.middleware.call('datastore.query', 'vm.vm',
                                                 [('id', '=', id)])
            if self.vmutils.is_container(vm_data[0]):
                await self.middleware.call('vm.rm_container_conf', id)
            return await self.middleware.call('datastore.delete', 'vm.vm', id)
        except Exception as err:
            self.logger.error("===> {0}".format(err))
            return False

    @item_method
    @accepts(Int('id'))
    async def start(self, id):
        """Start a VM."""
        try:
            return await self._manager.start(id)
        except Exception as err:
            self.logger.error("===> {0}".format(err))
            return False

    @item_method
    @accepts(Int('id'))
    async def stop(self, id):
        """Stop a VM."""
        try:
            return await self._manager.stop(id)
        except Exception as err:
            self.logger.error("===> {0}".format(err))
            return False

    @item_method
    @accepts(Int('id'))
    async def restart(self, id):
        """Restart a VM."""
        try:
            return await self._manager.restart(id)
        except Exception as err:
            self.logger.error("===> {0}".format(err))
            return False

    @item_method
    @accepts(Int('id'))
    async def status(self, id):
        """Get the status of a VM, if it is RUNNING or STOPPED."""
        try:
            return await self._manager.status(id)
        except Exception as err:
            self.logger.error("===> {0}".format(err))
            return False

    def fetch_hookreport(self, blocknum, blocksize, totalsize, job, file_name):
        """Hook to report the download progress."""
        readchunk = blocknum * blocksize
        if totalsize > 0:
            percent = readchunk * 1e2 / totalsize
            job.set_progress(int(percent), 'Downloading', {
                'downloaded': readchunk,
                'total': totalsize
            })

    @accepts(Str('vmOS'), Bool('force'))
    @job(lock='container')
    async def fetch_image(self, job, vmOS, force=False):
        """Download a pre-built image for bhyve"""
        vm_os = CONTAINER_IMAGES.get(vmOS)
        url = vm_os['URL']

        self.logger.debug("==> IMAGE: {0}".format(vm_os))

        sharefs = await self.middleware.call('vm.get_sharefs')
        vm_os_file = vm_os['GZIPFILE']
        iso_path = sharefs + '/iso_files/'
        file_path = iso_path + vm_os_file

        if os.path.exists(file_path) is False and force is False:
            logger.debug("===> Downloading: %s" % (url))
            await self.middleware.threaded(lambda: urlretrieve(
                url,
                file_path,
                lambda nb, bs, fs, job=job: self.fetch_hookreport(
                    nb, bs, fs, job, file_path)))

    @accepts()
    async def list_images(self):
        return CONTAINER_IMAGES

    @accepts(Int('job_id'))
    async def get_download_status(self, job_id):
        """ Returns the status of the job, if job does not exists it returns False."""
        job_pool = await self.middleware.call(
            'core.get_jobs', [('method', '=', 'vm.fetch_image')])
        for __job in job_pool:
            if __job['id'] == job_id:
                return __job
        return False

    @accepts(Str('vmOS'))
    async def image_path(self, vmOS):
        """Return the prebuilt image path or false in case it is not supported."""
        vm_os = CONTAINER_IMAGES.get(vmOS, None)
        if vm_os:
            image_file = vm_os['GZIPFILE']
            sharefs = await self.middleware.call('vm.get_sharefs')
            file_path = sharefs + '/iso_files/' + image_file
            if os.path.exists(file_path):
                if self.vmutils.check_sha256(file_path, vmOS):
                    self.logger.debug("===> Checksum OK: {}".format(file_path))
                    return file_path
                else:
                    self.logger.debug(
                        "===> Checksum NOK: {}".format(file_path))
                    return False
            else:
                return False
        else:
            return False

    def decompress_hookreport(self, dst_file, job):
        totalsize = 4756340736  # XXX: It will be parsed from a sha256 file.
        fd = os.open(dst_file, os.O_RDONLY)
        try:
            size = os.lseek(fd, 0, os.SEEK_END)
        finally:
            os.close(fd)

        percent = (size / totalsize) * 100
        job.set_progress(int(percent), 'Decompress', {
            'decompressed': size,
            'total': totalsize
        })

    @accepts(Str('src'), Str('dst'))
    def decompress_gzip(self, src, dst):
        if os.path.exists(dst):
            self.logger.error("===> DST: {0} exist, we stop here.".format(dst))
            return False

        if self.vmutils.is_gzip(src) is True:
            self.logger.debug("===> SRC: {0} DST: {1}".format(src, dst))
            with gzip.open(src, 'rb') as src_file, open(dst, 'wb') as dst_file:
                shutil.copyfileobj(src_file, dst_file)
            return True
        else:
            self.logger.error(
                "===> SRC: {0} does not exists or is broken.".format(src))
            return False

    async def __find_clone(self, name):
        data = await self.middleware.call('vm.query', [],
                                          {'order_by': ['name']})
        clone_index = 0
        next_name = ""
        for vm_name in data:
            if name in vm_name['name'] and '_clone' in vm_name['name']:
                name_index = int(vm_name['name'][-1])
                next_name = vm_name['name'][:-1]
                if name_index >= clone_index:
                    clone_index = int(name_index) + 1

        if next_name:
            next_name = next_name + str(clone_index)
        else:
            next_name = name + '_clone' + str(clone_index)

        return next_name

    @accepts(Int('id'))
    async def clone(self, id):
        vm = await self._manager.clone(id)

        if vm is None:
            raise CallError('Cannot clone a VM that does not exist.',
                            errno.EINVAL)

        origin_name = vm['name']
        del vm['id']

        vm['name'] = await self.__find_clone(vm['name'])

        for item in vm['devices']:
            if item['dtype'] == 'NIC':
                if 'mac' in item['attributes']:
                    del item['attributes']['mac']
            if item['dtype'] == 'VNC':
                if 'vnc_port' in item['attributes']:
                    del item['attributes']['vnc_port']
            if item['dtype'] == 'DISK':
                disk_src_path = '/'.join(
                    item['attributes']['path'].split('/dev/zvol/')[-1:])
                disk_snapshot_name = vm['name']
                disk_snapshot_path = disk_src_path + '@' + disk_snapshot_name
                clone_dst_path = disk_src_path + '_' + vm['name']

                data = {'dataset': disk_src_path, 'name': disk_snapshot_name}
                await self.middleware.call('zfs.snapshot.create', data)

                data = {
                    'snapshot': disk_snapshot_path,
                    'dataset_dst': clone_dst_path
                }
                await self.middleware.call('zfs.snapshot.clone', data)

                item['attributes']['path'] = '/dev/zvol/' + clone_dst_path
            if item['dtype'] == 'RAW':
                item['attributes']['path'] = ''
                self.logger.warn(
                    "For RAW disk you need copy it manually inside your NAS.")

        await self.create(vm)
        self.logger.info("VM cloned from {0} to {1}".format(
            origin_name, vm['name']))

        return True

    @accepts(Int('id'))
    async def get_vnc_web(self, id):
        """
            Get the VNC URL from a given VM.

            Returns:
                list: With all URL available.
        """
        vnc_web = []

        for vnc_device in await self.get_vnc(id):
            if vnc_device.get('vnc_web', None) is True:
                vnc_port = vnc_device.get('vnc_port', None)
                if vnc_port is None:
                    vnc_port = 5900 + id
                #  XXX: Create a method for web port.
                split_port = int(str(vnc_port)[:2]) - 1
                vnc_web_port = str(split_port) + str(vnc_port)[2:]
                bind_ip = vnc_device.get('vnc_bind', None)
                vnc_web.append('http://{}:{}/vnc_auto.html'.format(
                    bind_ip, vnc_web_port))

        return vnc_web
Пример #7
0
class SytemAdvancedService(ConfigService):
    class Config:
        datastore = 'system.advanced'
        datastore_prefix = 'adv_'
        datastore_extend = 'system.advanced.system_advanced_extend'
        namespace = 'system.advanced'

    @accepts()
    async def serial_port_choices(self):
        """
        Get available choices for `serialport` attribute in `system.advanced.update`.
        """
        if (not await self.middleware.call('system.is_freenas') and await
                self.middleware.call('failover.hardware') == 'ECHOSTREAM'):
            ports = {'0x3f8': '0x3f8'}
        else:
            pipe = await Popen(
                "/usr/sbin/devinfo -u | grep -A 99999 '^I/O ports:' | "
                "sed -En 's/ *([0-9a-fA-Fx]+).*\(uart[0-9]+\)/\\1/p'",
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                shell=True)
            ports = {
                y: y
                for y in (await pipe.communicate()
                          )[0].decode().strip().strip('\n').split('\n') if y
            }

        if not ports or (await self.config())['serialport'] == '0x2f8':
            # We should always add 0x2f8 if ports is false or current value is the default one in db
            # i.e 0x2f8
            ports['0x2f8'] = '0x2f8'

        return ports

    @private
    async def system_advanced_extend(self, data):

        if data.get('sed_user'):
            data['sed_user'] = data.get('sed_user').upper()

        return data

    async def __validate_fields(self, schema, data):
        verrors = ValidationErrors()

        user = data.get('periodic_notifyuser')
        if user:
            if not (await self.middleware.call('notifier.get_user_object',
                                               user)):
                verrors.add(f'{schema}.periodic_notifyuser',
                            'Specified user does not exist')

        serial_choice = data.get('serialport')
        if data.get('serialconsole'):

            if not serial_choice:
                verrors.add(
                    f'{schema}.serialport',
                    'Please specify a serial port when serial console option is checked'
                )
            elif serial_choice not in await self.serial_port_choices():
                verrors.add(
                    f'{schema}.serialport',
                    'Serial port specified has not been identified by the system'
                )

        elif not serial_choice:
            # TODO: THIS CHECK CAN BE REMOVED WHEN WE DISALLOW NONE VALUES IN THE SCHEMA LAYER

            verrors.add(f'{schema}.serialport',
                        'Empty serial port is not allowed')

        return verrors, data

    @accepts(
        Dict('system_advanced_update',
             Bool('advancedmode'),
             Bool('autotune'),
             Int('boot_scrub', validators=[Range(min=1)]),
             Bool('consolemenu'),
             Bool('consolemsg'),
             Bool('debugkernel'),
             Bool('fqdn_syslog'),
             Str('motd'),
             Str('periodic_notifyuser'),
             Bool('powerdaemon'),
             Bool('serialconsole'),
             Str('serialport'),
             Str('serialspeed',
                 enum=['9600', '19200', '38400', '57600', '115200']),
             Int('swapondrive', validators=[Range(min=0)]),
             Bool('traceback'),
             Bool('uploadcrash'),
             Bool('anonstats'),
             Str('sed_user', enum=['USER', 'MASTER']),
             Str('sed_passwd', private=True),
             update=True))
    async def do_update(self, data):
        config_data = await self.config()
        original_data = config_data.copy()
        config_data.update(data)

        verrors, config_data = await self.__validate_fields(
            'advanced_settings_update', config_data)
        if verrors:
            raise verrors

        if len(set(config_data.items()) ^ set(original_data.items())) > 0:
            if original_data.get('sed_user'):
                original_data['sed_user'] = original_data['sed_user'].lower()
            if config_data.get('sed_user'):
                config_data['sed_user'] = config_data['sed_user'].lower()

            # PASSWORD ENCRYPTION FOR SED IS BEING DONE IN THE MODEL ITSELF

            await self.middleware.call(
                'datastore.update', self._config.datastore, config_data['id'],
                config_data, {'prefix': self._config.datastore_prefix})

            if original_data['boot_scrub'] != config_data['boot_scrub']:
                await self.middleware.call('service.restart', 'cron')

            loader_reloaded = False
            if original_data['motd'] != config_data['motd']:
                await self.middleware.call('service.start', 'motd',
                                           {'onetime': False})

            if original_data['consolemenu'] != config_data['consolemenu']:
                await self.middleware.call('service.start', 'ttys',
                                           {'onetime': False})

            if original_data['powerdaemon'] != config_data['powerdaemon']:
                await self.middleware.call('service.restart', 'powerd',
                                           {'onetime': False})

            if original_data['serialconsole'] != config_data['serialconsole']:
                await self.middleware.call('service.start', 'ttys',
                                           {'onetime': False})
                if not loader_reloaded:
                    await self.middleware.call('service.reload', 'loader',
                                               {'onetime': False})
                    loader_reloaded = True
            elif (original_data['serialspeed'] != config_data['serialspeed']
                  or original_data['serialport'] != config_data['serialport']):
                if not loader_reloaded:
                    await self.middleware.call('service.reload', 'loader',
                                               {'onetime': False})
                    loader_reloaded = True

            if (original_data['autotune'] != config_data['autotune']
                    and not loader_reloaded):
                await self.middleware.call('service.reload', 'loader',
                                           {'onetime': False})
                loader_reloaded = True

            if (original_data['debugkernel'] != config_data['debugkernel']
                    and not loader_reloaded):
                await self.middleware.call('service.reload', 'loader',
                                           {'onetime': False})

            if original_data['periodic_notifyuser'] != config_data[
                    'periodic_notifyuser']:
                await self.middleware.call('service.start', 'ix-periodic',
                                           {'onetime': False})

            if original_data['fqdn_syslog'] != config_data['fqdn_syslog']:
                await self.middleware.call('service.restart', 'syslogd',
                                           {'onetime': False})

        return await self.config()
Пример #8
0
class AFPService(SystemServiceService):
    class Config:
        service = 'afp'
        datastore_extend = 'afp.extend'
        datastore_prefix = 'afp_srv_'

    @private
    async def extend(self, afp):
        for i in ('map_acls', 'chmod_request'):
            afp[i] = afp[i].upper()
        return afp

    @private
    async def compress(self, afp):
        for i in ('map_acls', 'chmod_request'):
            value = afp.get(i)
            if value:
                afp[i] = value.lower()
        return afp

    @accepts(
        Dict('afp_update',
             Bool('guest'),
             Str('guest_user'),
             List('bindip', items=[Str('ip', validators=[IpAddress()])]),
             Int('connections_limit', validators=[Range(min=1, max=65535)]),
             Dir('dbpath'),
             Str('global_aux', max_length=None),
             Str('map_acls', enum=['RIGHTS', 'MODE', 'NONE']),
             Str('chmod_request', enum=['PRESERVE', 'SIMPLE', 'IGNORE']),
             update=True))
    async def do_update(self, data):
        """
        Update AFP service settings.

        `bindip` is a list of IPs to bind AFP to. Leave blank (empty list) to bind to all
        available IPs.

        `map_acls` defines how to map the effective permissions of authenticated users.
        RIGHTS - Unix-style permissions
        MODE - ACLs
        NONE - Do not map

        `chmod_request` defines advanced permission control that deals with ACLs.
        PRESERVE - Preserve ZFS ACEs for named users and groups or POSIX ACL group mask
        SIMPLE - Change permission as requested without any extra steps
        IGNORE - Permission change requests are ignored
        """
        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        if new['dbpath']:
            await check_path_resides_within_volume(
                verrors,
                self.middleware,
                'afp_update.dbpath',
                new['dbpath'],
            )

        verrors.check()

        new = await self.compress(new)
        await self._update_service(old, new)

        return await self.config()

    @accepts()
    async def bindip_choices(self):
        """
        List of valid choices for IP addresses to which to bind the AFP service.
        """
        return {
            d['address']: d['address']
            for d in await self.middleware.call('interface.ip_in_use')
        }
Пример #9
0
class KubernetesService(ConfigService):

    class Config:
        datastore = 'services.kubernetes'
        datastore_extend = 'kubernetes.k8s_extend'
        cli_namespace = 'app.kubernetes'

    @private
    async def k8s_extend(self, data):
        data['dataset'] = applications_ds_name(data['pool']) if data['pool'] else None
        data.pop('cni_config')
        return data

    @private
    async def unused_cidrs(self, network_cidrs):
        return [
            str(network) for network in itertools.chain(
                ipaddress.ip_network('172.16.0.0/12', False).subnets(4),
                ipaddress.ip_network('10.0.0.0/8', False).subnets(8),
                ipaddress.ip_network('192.168.0.0/16', False).subnets(1),
            ) if not any(network.overlaps(used_network) for used_network in network_cidrs)
        ]

    @private
    async def validate_data(self, data, schema, old_data):
        verrors = ValidationErrors()

        if data.pop('migrate_applications', False):
            if data['pool'] == old_data['pool']:
                verrors.add(
                    f'{schema}.migrate_applications',
                    'Migration of applications dataset only happens when a new pool is configured.'
                )
            elif not data['pool']:
                verrors.add(
                    f'{schema}.migrate_applications',
                    'Pool must be specified when migration of ix-application dataset is desired.'
                )
            elif not old_data['pool']:
                verrors.add(
                    f'{schema}.migrate_applications',
                    'A pool must have been configured previously for ix-application dataset migration.'
                )
            else:
                if await self.middleware.call('zfs.dataset.query', [['id', '=', applications_ds_name(data['pool'])]]):
                    verrors.add(
                        f'{schema}.migrate_applications',
                        f'Migration of {applications_ds_name(old_data["pool"])!r} to {data["pool"]!r} not '
                        f'possible as {applications_ds_name(data["pool"])} already exists.'
                    )

                if not await self.middleware.call(
                    'zfs.dataset.query', [['id', '=', applications_ds_name(old_data['pool'])]]
                ):
                    # Edge case but handled just to be sure
                    verrors.add(
                        f'{schema}.migrate_applications',
                        f'{applications_ds_name(old_data["pool"])!r} does not exist, migration not possible.'
                    )

        network_cidrs = set([
            ipaddress.ip_network(f'{ip_config["address"]}/{ip_config["netmask"]}', False)
            for interface in await self.middleware.call('interface.query')
            for ip_config in itertools.chain(interface['aliases'], interface['state']['aliases'])
            if ip_config['type'] != 'LINK'
        ])

        unused_cidrs = []
        if not data['cluster_cidr'] or not data['service_cidr']:
            unused_cidrs = await self.unused_cidrs(network_cidrs)
            # If index 0,1 belong to different classes, let's make sure that is not the case anymore
            if len(unused_cidrs) > 2 and unused_cidrs[0].split('.')[0] != unused_cidrs[1].split('.')[0]:
                unused_cidrs.pop(0)

        if unused_cidrs and not data['cluster_cidr']:
            data['cluster_cidr'] = unused_cidrs.pop(0)

        if unused_cidrs and not data['service_cidr']:
            data['service_cidr'] = unused_cidrs.pop(0)

        if not data['cluster_dns_ip']:
            if data['service_cidr']:
                # Picking 10th ip ( which is the usual default ) from service cidr
                data['cluster_dns_ip'] = str(list(ipaddress.ip_network(data['service_cidr'], False).hosts())[9])
            else:
                verrors.add(f'{schema}.cluster_dns_ip', 'Please specify cluster_dns_ip.')

        if data['pool'] and not await self.middleware.call('pool.query', [['name', '=', data['pool']]]):
            verrors.add(f'{schema}.pool', 'Please provide a valid pool configured in the system.')

        for k in ('cluster_cidr', 'service_cidr'):
            if not data[k]:
                verrors.add(f'{schema}.{k}', f'Please specify a {k.split("_")[0]} CIDR.')
            elif any(ipaddress.ip_network(data[k], False).overlaps(cidr) for cidr in network_cidrs):
                verrors.add(f'{schema}.{k}', 'Requested CIDR is already in use.')

        if data['cluster_cidr'] and data['service_cidr'] and ipaddress.ip_network(data['cluster_cidr'], False).overlaps(
            ipaddress.ip_network(data['service_cidr'], False)
        ):
            verrors.add(f'{schema}.cluster_cidr', 'Must not overlap with service CIDR.')

        if data['service_cidr'] and data['cluster_dns_ip'] and ipaddress.ip_address(
            data['cluster_dns_ip']
        ) not in ipaddress.ip_network(data['service_cidr']):
            verrors.add(f'{schema}.cluster_dns_ip', 'Must be in range of "service_cidr".')

        if data['node_ip'] not in await self.bindip_choices():
            verrors.add(f'{schema}.node_ip', 'Please provide a valid IP address.')

        if not await self.middleware.call('route.configured_default_ipv4_route'):
            if not data['route_v4_gateway']:
                verrors.add(f'{schema}.route_v4_gateway', 'Please set a default route for system or for kubernetes.')
            if not data['route_v4_interface']:
                verrors.add(
                    f'{schema}.route_v4_interface',
                    'Please set a default route for system or specify default interface to be used for kubernetes.'
                )

        for k, _ in await self.validate_interfaces(data):
            verrors.add(f'{schema}.{k}', 'Please specify a valid interface.')

        for k in ('route_v4', 'route_v6'):
            gateway = data[f'{k}_gateway']
            interface = data[f'{k}_interface']
            if (not gateway and not interface) or (gateway and interface):
                continue
            for k2 in ('gateway', 'interface'):
                verrors.add(f'{schema}.{k}_{k2}', f'{k}_gateway and {k}_interface must be specified together.')

        verrors.check()

    @private
    async def validate_interfaces(self, data):
        errors = []
        interfaces = {i['name']: i for i in await self.middleware.call('interface.query')}
        for k in filter(
            lambda k: data[k] and data[k] not in interfaces, ('route_v4_interface', 'route_v6_interface')
        ):
            errors.append((k, data[k]))
        return errors

    @accepts(
        Dict(
            'kubernetes_update',
            Bool('migrate_applications'),
            Str('pool', empty=False, null=True),
            IPAddr('cluster_cidr', cidr=True, empty=True),
            IPAddr('service_cidr', cidr=True, empty=True),
            IPAddr('cluster_dns_ip', empty=True),
            IPAddr('node_ip'),
            Str('route_v4_interface', null=True),
            IPAddr('route_v4_gateway', null=True, v6=False),
            Str('route_v6_interface', null=True),
            IPAddr('route_v6_gateway', null=True, v4=False),
            update=True,
        )
    )
    @job(lock='kubernetes_update')
    async def do_update(self, job, data):
        """
        `pool` must be a valid ZFS pool configured in the system. Kubernetes service will initialise the pool by
        creating datasets under `pool_name/ix-applications`.

        `cluster_cidr` is the CIDR to be used for default NAT network between workloads.

        `service_cidr` is the CIDR to be used for kubernetes services which are an abstraction and refer to a
        logically set of kubernetes pods.

        `cluster_dns_ip` is the IP of the DNS server running for the kubernetes cluster. It must be in the range
        of `service_cidr`.

        Specifying values for `cluster_cidr`, `service_cidr` and `cluster_dns_ip` are permanent and a subsequent change
        requires re-initialisation of the applications. To clarify, system will destroy old `ix-applications` dataset
        and any data within it when any of the values for the above configuration change.

        `node_ip` is the IP address which the kubernetes cluster will assign to the TrueNAS node. It defaults to
        0.0.0.0 and the cluster in this case will automatically manage which IP address to use for managing traffic
        for default NAT network.

        By default kubernetes pods will be using default gateway of the system for outward traffic. This might
        not be desirable for certain users who want to separate NAT traffic over a specific interface / route. System
        will create a L3 network which will be routing the traffic towards default gateway for NAT.

        If users want to restrict traffic over a certain gateway / interface, they can specify a default route
        for the NAT traffic. `route_v4_interface` and `route_v4_gateway` will set a default route for the kubernetes
        cluster IPv4 traffic. Similarly `route_v6_interface` and 'route_v6_gateway` can be used to specify default
        route for IPv6 traffic.

        In case user is switching pools and the new desired pool has not been configured for kubernetes before, it
        is possible to replicate data from old pool to new pool with setting `migrate_applications` attribute. This
        will replicate contents of old pool's ix-applications dataset to the new pool.
        """
        old_config = await self.config()
        old_config.pop('dataset')
        config = old_config.copy()
        config.update(data)
        migrate = config.get('migrate_applications')

        await self.validate_data(config, 'kubernetes_update', old_config)

        if migrate and config['pool'] != old_config['pool']:
            job.set_progress(
                25, f'Migrating {applications_ds_name(old_config["pool"])} to {applications_ds_name(config["pool"])}'
            )
            await self.migrate_ix_applications_dataset(config['pool'], old_config['pool'])
            job.set_progress(40, 'Migration complete for ix-applications dataset')

        if len(set(old_config.items()) ^ set(config.items())) > 0:
            await self.middleware.call('chart.release.clear_update_alerts_for_all_chart_releases')
            config['cni_config'] = {}
            await self.middleware.call('datastore.update', self._config.datastore, old_config['id'], config)
            await self.middleware.call('kubernetes.status_change')
            if not config['pool'] and config['pool'] != old_config['pool']:
                # We only want to do this when we don't have any pool configured and would like to use
                # host catalog repos temporarily. Otherwise, we should call this after k8s datasets have
                # been initialised
                await self.middleware.call('catalog.sync_all')

        return await self.config()

    @private
    async def migrate_ix_applications_dataset(self, new_pool, old_pool):
        snap_details = await self.middleware.call(
            'zfs.snapshot.create', {
                'dataset': applications_ds_name(old_pool),
                'naming_schema': MIGRATION_NAMING_SCHEMA,
                'recursive': True,
            }
        )

        try:
            old_ds = applications_ds_name(old_pool)
            new_ds = applications_ds_name(new_pool)
            migrate_job = await self.middleware.call(
                'replication.run_onetime', {
                    'direction': 'PUSH',
                    'transport': 'LOCAL',
                    'source_datasets': [old_ds],
                    'target_dataset': new_ds,
                    'recursive': True,
                    'also_include_naming_schema': [MIGRATION_NAMING_SCHEMA],
                    'retention_policy': 'NONE',
                    'replicate': True,
                    'readonly': 'IGNORE',
                }
            )
            await migrate_job.wait()
            if migrate_job.error:
                raise CallError(f'Failed to migrate {old_ds} to {new_ds}: {migrate_job.error}')
        finally:
            await self.middleware.call('zfs.snapshot.delete', snap_details['id'], {'recursive': True})
            snap_name = f'{applications_ds_name(new_pool)}@{snap_details["snapshot_name"]}'
            if await self.middleware.call('zfs.snapshot.query', [['id', '=', snap_name]]):
                await self.middleware.call('zfs.snapshot.delete', snap_name, {'recursive': True})

    @accepts()
    async def bindip_choices(self):
        """
        Returns ip choices for Kubernetes service to use.
        """
        return {
            d['address']: d['address'] for d in await self.middleware.call(
                'interface.ip_in_use', {'static': True, 'any': True}
            )
        }

    @private
    async def validate_k8s_setup(self):
        k8s_config = await self.middleware.call('kubernetes.config')
        if not k8s_config['dataset']:
            raise CallError('Please configure kubernetes pool.')
        if not await self.middleware.call('service.started', 'kubernetes'):
            raise CallError('Kubernetes service is not running.')

    @accepts()
    async def node_ip(self):
        """
        Returns IP used by kubernetes which kubernetes uses to allow incoming connections.
        """
        k8s_node_config = await self.middleware.call('k8s.node.config')
        node_ip = None
        if k8s_node_config['node_configured']:
            node_ip = next(
                (addr['address'] for addr in k8s_node_config['status']['addresses'] if addr['type'] == 'InternalIP'),
                None
            )
        if not node_ip:
            node_ip = (await self.middleware.call('kubernetes.config'))['node_ip']

        return node_ip
Пример #10
0
class UpdateService(Service):
    @accepts()
    def get_trains(self):
        """
        Returns available trains dict and the currently configured train as well as the
        train of currently booted environment.
        """
        data = self.middleware.call_sync('datastore.config', 'system.update')
        conf = Configuration.Configuration()
        conf.LoadTrainsConfig()

        selected = None
        trains = {}
        for name, descr in (conf.AvailableTrains() or {}).items():
            train = conf._trains.get(name)
            if train is None:
                train = Train.Train(name, descr)
            if not selected and data['upd_train'] == train.Name():
                selected = data['upd_train']
            trains[train.Name()] = {
                'description': train.Description(),
                'sequence': train.LastSequence(),
            }
        if not data['upd_train'] or not selected:
            selected = conf.CurrentTrain()
        return {
            'trains': trains,
            'current': conf.CurrentTrain(),
            'selected': selected,
        }

    @accepts(
        Dict(
            'update-check-available',
            Str('train', required=False),
            required=False,
        ))
    def check_available(self, attrs=None):
        """
        Checks if there is an update available from update server.

        status:
          - REBOOT_REQUIRED: an update has already been applied
          - AVAILABLE: an update is available
          - UNAVAILABLE: no update available

        .. examples(websocket)::

          Check available update using default train:

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "update.check_available"
            }
        """

        try:
            applied = self.middleware.call_sync('cache.get', 'update.applied')
        except Exception:
            applied = False
        if applied is True:
            return {'status': 'REBOOT_REQUIRED'}

        train = (attrs or {}).get('train') or self.middleware.call_sync(
            'update.get_trains')['selected']

        handler = CheckUpdateHandler()
        manifest = CheckForUpdates(
            diff_handler=handler.diff_call,
            handler=handler.call,
            train=train,
        )

        if not manifest:
            return {'status': 'UNAVAILABLE'}

        data = {
            'status': 'AVAILABLE',
            'changes': handler.changes,
            'notice': manifest.Notice(),
            'notes': manifest.Notes(),
        }

        conf = Configuration.Configuration()
        sys_mani = conf.SystemManifest()
        if sys_mani:
            sequence = sys_mani.Sequence()
        else:
            sequence = ''
        data['changelog'] = get_changelog(train,
                                          start=sequence,
                                          end=manifest.Sequence())

        data['version'] = manifest.Version()
        return data

    @accepts(Str('path'))
    async def get_pending(self, path=None):
        """
        Gets a list of packages already downloaded and ready to be applied.
        Each entry of the lists consists of type of operation and name of it, e.g.

          {
            "operation": "upgrade",
            "name": "baseos-11.0 -> baseos-11.1"
          }
        """
        if path is None:
            path = await self.middleware.call('notifier.get_update_location')
        data = []
        try:
            changes = await self.middleware.run_in_thread(
                Update.PendingUpdatesChanges, path)
        except (
                UpdateIncompleteCacheException,
                UpdateInvalidCacheException,
                UpdateBusyCacheException,
        ):
            changes = []
        if changes:
            if changes.get("Reboot", True) is False:
                for svc in changes.get("Restart", []):
                    data.append({
                        'operation': svc,
                        'name': Update.GetServiceDescription(svc),
                    })
            for new, op, old in changes['Packages']:
                if op == 'upgrade':
                    name = '%s-%s -> %s-%s' % (
                        old.Name(),
                        old.Version(),
                        new.Name(),
                        new.Version(),
                    )
                elif op == 'install':
                    name = '%s-%s' % (new.Name(), new.Version())
                else:
                    # Its unclear why "delete" would feel out new
                    # instead of old, sounds like a pkgtools bug?
                    if old:
                        name = '%s-%s' % (old.Name(), old.Version())
                    else:
                        name = '%s-%s' % (new.Name(), new.Version())

                data.append({
                    'operation': op,
                    'name': name,
                })
        return data

    @accepts(
        Dict(
            'update',
            Str('train', required=False),
            Bool('reboot', default=False),
            required=False,
        ))
    @job(lock='update', process=True)
    async def update(self, job, attrs=None):
        """
        Downloads (if not already in cache) and apply an update.
        """
        attrs = attrs or {}
        train = attrs.get('train') or (
            await self.middleware.call('update.get_trains'))['selected']
        location = await self.middleware.call('notifier.get_update_location')

        job.set_progress(0, 'Retrieving update manifest')

        handler = UpdateHandler(self, job)

        update = Update.DownloadUpdate(
            train,
            location,
            check_handler=handler.check_handler,
            get_handler=handler.get_handler,
        )
        if update is False:
            raise ValueError('No update available')

        new_manifest = Manifest.Manifest(require_signature=True)
        new_manifest.LoadPath('{}/MANIFEST'.format(location))

        Update.ApplyUpdate(
            location,
            install_handler=handler.install_handler,
        )
        await self.middleware.call('cache.put', 'update.applied', True)

        if attrs.get('reboot'):
            await self.middleware.call('system.reboot', {'delay': 10})
        return True

    @accepts()
    @job(lock='updatedownload')
    def download(self, job):
        train = self.middleware.call_sync('update.get_trains')['selected']
        location = self.middleware.call_sync('notifier.get_update_location')

        job.set_progress(0, 'Retrieving update manifest')

        handler = UpdateHandler(self, job, 100)

        Update.DownloadUpdate(
            train,
            location,
            check_handler=handler.check_handler,
            get_handler=handler.get_handler,
        )
        update = Update.CheckForUpdates(train=train, cache_dir=location)

        if not update:
            return False

        notified = False
        try:
            if self.middleware.call_sync('cache.has_key', 'update.notified'):
                notified = self.middleware.call_sync('cache.get',
                                                     'update.notified')
        except Exception:
            pass

        if not notified:
            self.middleware.call_sync('cache.put', 'update.notified', True)
            conf = Configuration.Configuration()
            sys_mani = conf.SystemManifest()
            if sys_mani:
                sequence = sys_mani.Sequence()
            else:
                sequence = ''

            changelog = get_changelog(train,
                                      start=sequence,
                                      end=update.Sequence())
            hostname = socket.gethostname()

            try:
                # FIXME: Translation
                self.middleware.call_sync(
                    'mail.send', {
                        'subject': '{}: {}'.format(hostname,
                                                   'Update Available'),
                        'text':
                        '''A new update is available for the %(train)s train.
Version: %(version)s
Changelog:
%(changelog)s
''' % {
                            'train': train,
                            'version': update.Version(),
                            'changelog': changelog,
                        },
                    }).wait_sync()
            except Exception:
                self.logger.warn('Failed to send email about new update',
                                 exc_info=True)
        return True

    @accepts(Str('path'))
    @job(lock='updatemanual', process=True)
    async def manual(self, job, path):
        """
        Apply manual update of file `path`.
        """
        rv = await self.middleware.call('notifier.validate_update', path)
        if not rv:
            raise CallError('Invalid update file', errno.EINVAL)
        await self.middleware.call('notifier.apply_update', path, timeout=None)
        try:
            await self.middleware.call('notifier.destroy_upload_location')
        except Exception:
            self.logger.warn('Failed to destroy upload location',
                             exc_info=True)

    @accepts(Dict(
        'updatefile',
        Str('destination'),
    ))
    @job(lock='updatemanual', pipes=['input'])
    async def file(self, job, options):
        """
        Updates the system using the uploaded .tar file.

        Use null `destination` to create a temporary location.
        """

        dest = options.get('destination')

        if not dest:
            try:
                await self.middleware.call('notifier.create_upload_location')
                dest = '/var/tmp/firmware'
            except Exception as e:
                raise CallError(str(e))
        elif not dest.startswith('/mnt/'):
            raise CallError('Destination must reside within a pool')

        if not os.path.isdir(dest):
            raise CallError('Destination is not a directory')

        destfile = os.path.join(dest, 'manualupdate.tar')
        dest_extracted = os.path.join(dest, '.update')

        try:
            job.set_progress(10, 'Writing uploaded file to disk')
            with open(destfile, 'wb') as f:
                await self.middleware.run_in_thread(
                    shutil.copyfileobj,
                    job.pipes.input.r,
                    f,
                    1048576,
                )

            def do_update():
                try:
                    job.set_progress(30, 'Extracting uploaded file')
                    ExtractFrozenUpdate(destfile, dest_extracted, verbose=True)
                    job.set_progress(50, 'Applying update')
                    ApplyUpdate(dest_extracted)
                except Exception as e:
                    raise CallError(str(e))

            await self.middleware.run_in_thread(do_update)

            job.set_progress(95, 'Cleaning up')

        finally:
            if os.path.exists(destfile):
                os.unlink(destfile)

            if os.path.exists(dest_extracted):
                shutil.rmtree(dest_extracted, ignore_errors=True)

        if dest == '/var/tmp/firmware':
            await self.middleware.call('notifier.destroy_upload_location')

        job.set_progress(100, 'Update completed')
Пример #11
0
class SharingAFPService(CRUDService):
    class Config:
        namespace = 'sharing.afp'
        datastore = 'sharing.afp_share'
        datastore_prefix = 'afp_'
        datastore_extend = 'sharing.afp.extend'

    @accepts(
        Dict('sharingafp_create',
             Str('path', required=True),
             Bool('home', default=False),
             Str('name'),
             Str('comment'),
             List('allow', default=[]),
             List('deny', default=[]),
             List('ro', default=[]),
             List('rw', default=[]),
             Bool('timemachine', default=False),
             Int('timemachine_quota', default=0),
             Bool('nodev', default=False),
             Bool('nostat', default=False),
             Bool('upriv', default=True),
             UnixPerm('fperm', default='644'),
             UnixPerm('dperm', default='755'),
             UnixPerm('umask', default='000'),
             List('hostsallow', items=[], default=[]),
             List('hostsdeny', items=[], default=[]),
             Str('vuid', null=True, default=''),
             Str('auxparams', max_length=None),
             Bool('enabled', default=True),
             register=True))
    async def do_create(self, data):
        """
        Create AFP share.

        `allow`, `deny`, `ro`, and `rw` are lists of users and groups. Groups are designated by
        an @ prefix.

        `hostsallow` and `hostsdeny` are lists of hosts and/or networks.
        """
        verrors = ValidationErrors()
        path = data['path']

        await self.clean(data, 'sharingafp_create', verrors)
        await self.validate(data, 'sharingafp_create', verrors)

        await check_path_resides_within_volume(verrors, self.middleware,
                                               'sharingafp_create.path', path)

        verrors.check()

        if path and not os.path.exists(path):
            try:
                os.makedirs(path)
            except OSError as e:
                raise CallError(f'Failed to create {path}: {e}')

        await self.compress(data)
        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})
        await self.extend(data)

        await self._service_change('afp', 'reload')

        return data

    @accepts(Int('id'),
             Patch('sharingafp_create', 'sharingafp_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        """
        Update AFP share `id`.
        """
        verrors = ValidationErrors()
        old = await self.middleware.call(
            'datastore.query', self._config.datastore, [('id', '=', id)], {
                'extend': self._config.datastore_extend,
                'prefix': self._config.datastore_prefix,
                'get': True
            })
        path = data.get('path')

        new = old.copy()
        new.update(data)

        await self.clean(new, 'sharingafp_update', verrors, id=id)
        await self.validate(new, 'sharingafp_update', verrors, old=old)

        if path:
            await check_path_resides_within_volume(verrors, self.middleware,
                                                   'sharingafp_create.path',
                                                   path)

        if verrors:
            raise verrors

        if path and not os.path.exists(path):
            try:
                os.makedirs(path)
            except OSError as e:
                raise CallError(f'Failed to create {path}: {e}')

        await self.compress(new)
        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})
        await self.extend(new)

        await self._service_change('afp', 'reload')

        return new

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete AFP share `id`.
        """
        result = await self.middleware.call('datastore.delete',
                                            self._config.datastore, id)
        await self._service_change('afp', 'reload')
        return result

    @private
    async def clean(self, data, schema_name, verrors, id=None):
        data['name'] = await self.name_exists(data, schema_name, verrors, id)

    @private
    async def validate(self, data, schema_name, verrors, old=None):
        await self.home_exists(data['home'], schema_name, verrors, old)
        if data['vuid']:
            try:
                uuid.UUID(data['vuid'], version=4)
            except ValueError:
                verrors.add(f'{schema_name}.vuid',
                            'vuid must be a valid UUID.')

    @private
    async def home_exists(self, home, schema_name, verrors, old=None):
        home_filters = [('home', '=', True)]
        home_result = None

        if home:
            if old and old['id'] is not None:
                id = old['id']

                if not old['home']:
                    home_filters.append(('id', '!=', id))
                    # The user already had this set as the home share
                    home_result = await self.middleware.call(
                        'datastore.query', self._config.datastore,
                        home_filters,
                        {'prefix': self._config.datastore_prefix})

        if home_result:
            verrors.add(f'{schema_name}.home',
                        'Only one share is allowed to be a home share.')

    @private
    async def name_exists(self, data, schema_name, verrors, id=None):
        name = data['name']
        path = data['path']
        home = data['home']
        name_filters = [('name', '=', name)]
        path_filters = [('path', '=', path)]

        if not name:
            if home:
                name = 'Homes'
            else:
                name = path.rsplit('/', 1)[-1]

        if id is not None:
            name_filters.append(('id', '!=', id))
            path_filters.append(('id', '!=', id))

        name_result = await self.middleware.call(
            'datastore.query', self._config.datastore, name_filters,
            {'prefix': self._config.datastore_prefix})
        path_result = await self.middleware.call(
            'datastore.query', self._config.datastore, path_filters,
            {'prefix': self._config.datastore_prefix})

        if name_result:
            verrors.add(f'{schema_name}.name',
                        'A share with this name already exists.')

        if path_result:
            verrors.add(f'{schema_name}.path',
                        'A share with this path already exists.')

        return name

    @private
    async def extend(self, data):
        data['allow'] = data['allow'].split()
        data['deny'] = data['deny'].split()
        data['ro'] = data['ro'].split()
        data['rw'] = data['rw'].split()
        data['hostsallow'] = data['hostsallow'].split()
        data['hostsdeny'] = data['hostsdeny'].split()

        return data

    @private
    async def compress(self, data):
        data['allow'] = ' '.join(data['allow'])
        data['deny'] = ' '.join(data['deny'])
        data['ro'] = ' '.join(data['ro'])
        data['rw'] = ' '.join(data['rw'])
        data['hostsallow'] = ' '.join(data['hostsallow'])
        data['hostsdeny'] = ' '.join(data['hostsdeny'])
        if not data['vuid'] and data['timemachine']:
            data['vuid'] = str(uuid.uuid4())
        return data
Пример #12
0
class ACMERegistrationService(CRUDService):
    class Config:
        datastore = 'system.acmeregistration'
        datastore_extend = 'acme.registration.register_extend'
        namespace = 'acme.registration'
        private = True

    @private
    async def register_extend(self, data):
        data['body'] = {
            key: value
            for key, value in (await self.middleware.call(
                'datastore.query', 'system.acmeregistrationbody',
                [['acme', '=', data['id']]], {'get': True})).items()
            if key != 'acme'
        }
        return data

    @private
    def get_directory(self, acme_directory_uri):
        self.middleware.call_sync('network.general.will_perform_activity',
                                  'acme')

        try:
            acme_directory_uri = acme_directory_uri.rstrip('/')
            response = requests.get(acme_directory_uri).json()
            return messages.Directory({
                key: response[key]
                for key in
                ['newAccount', 'newNonce', 'newOrder', 'revokeCert']
            })
        except (requests.ConnectionError, requests.Timeout,
                json.JSONDecodeError, KeyError) as e:
            raise CallError(f'Unable to retrieve directory : {e}')

    @accepts(
        Dict(
            'acme_registration_create',
            Bool('tos', default=False),
            Dict('JWK_create', Int('key_size', default=2048),
                 Int('public_exponent', default=65537)),
            Str('acme_directory_uri', required=True),
        ))
    def do_create(self, data):
        """
        Register with ACME Server

        Create a registration for a specific ACME Server registering root user with it

        `acme_directory_uri` is a directory endpoint for any ACME Server

        .. examples(websocket)::

          Register with ACME Server

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "acme.registration.create",
                "params": [{
                    "tos": true,
                    "acme_directory_uri": "https://acme-staging-v02.api.letsencrypt.org/directory"
                    "JWK_create": {
                        "key_size": 2048,
                        "public_exponent": 65537
                    }
                }]
            }
        """
        # STEPS FOR CREATION
        # 1) CREATE KEY
        # 2) REGISTER CLIENT
        # 3) SAVE REGISTRATION OBJECT
        # 4) SAVE REGISTRATION BODY

        self.middleware.call_sync('network.general.will_perform_activity',
                                  'acme')

        verrors = ValidationErrors()

        directory = self.get_directory(data['acme_directory_uri'])
        if not isinstance(directory, messages.Directory):
            verrors.add(
                'acme_registration_create.acme_directory_uri',
                f'System was unable to retrieve the directory with the specified acme_directory_uri: {directory}'
            )

        # Normalizing uri after directory call as let's encrypt staging api
        # does not accept a trailing slash right now
        data['acme_directory_uri'] += '/' if data['acme_directory_uri'][
            -1] != '/' else ''

        if not data['tos']:
            verrors.add('acme_registration_create.tos',
                        'Please agree to the terms of service')

        # For now we assume that only root is responsible for certs issued under ACME protocol
        email = (self.middleware.call_sync('user.query',
                                           [['uid', '=', 0]]))[0]['email']
        if not email:
            raise CallError(
                'Please configure an email address for "root" user which will be used with the ACME server'
            )

        if self.middleware.call_sync(
                'acme.registration.query',
            [['directory', '=', data['acme_directory_uri']]]):
            verrors.add(
                'acme_registration_create.acme_directory_uri',
                'A registration with the specified directory uri already exists'
            )

        if verrors:
            raise verrors

        key = jose.JWKRSA(key=rsa.generate_private_key(
            public_exponent=data['JWK_create']['public_exponent'],
            key_size=data['JWK_create']['key_size'],
            backend=default_backend()))
        acme_client = client.ClientV2(directory, client.ClientNetwork(key))
        register = acme_client.new_account(
            messages.NewRegistration.from_data(email=email,
                                               terms_of_service_agreed=True))
        # We have registered with the acme server

        # Save registration object
        registration_id = self.middleware.call_sync(
            'datastore.insert', self._config.datastore, {
                'uri': register.uri,
                'tos': register.terms_of_service,
                'new_account_uri': directory.newAccount,
                'new_nonce_uri': directory.newNonce,
                'new_order_uri': directory.newOrder,
                'revoke_cert_uri': directory.revokeCert,
                'directory': data['acme_directory_uri']
            })

        # Save registration body
        self.middleware.call_sync(
            'datastore.insert', 'system.acmeregistrationbody', {
                'contact': register.body.contact[0],
                'status': register.body.status,
                'key': key.json_dumps(),
                'acme': registration_id
            })

        return self.middleware.call_sync(
            f'{self._config.namespace}.get_instance', registration_id)
Пример #13
0
class VMDeviceService(CRUDService):

    DEVICE_ATTRS = {
        'CDROM': CDROM.schema,
        'RAW': RAW.schema,
        'DISK': DISK.schema,
        'NIC': NIC.schema,
        'PCI': PCI.schema,
        'DISPLAY': DISPLAY.schema,
    }

    class Config:
        namespace = 'vm.device'
        datastore = 'vm.device'
        datastore_extend = 'vm.device.extend_device'
        cli_namespace = 'service.vm.device'

    @private
    async def create_resource(self, device, old=None):
        return (
            (device['dtype'] == 'DISK' and device['attributes'].get('create_zvol')) or (
                device['dtype'] == 'RAW' and (not device['attributes'].get('exists', True) or (
                    old and old['attributes'].get('size') != device['attributes'].get('size')
                ))
            )
        )

    @private
    async def extend_device(self, device):
        if device['vm']:
            device['vm'] = device['vm']['id']
        if not device['order']:
            if device['dtype'] == 'CDROM':
                device['order'] = 1000
            elif device['dtype'] in ('DISK', 'RAW'):
                device['order'] = 1001
            else:
                device['order'] = 1002
        return device

    @accepts()
    def nic_attach_choices(self):
        """
        Available choices for NIC Attach attribute.
        """
        return self.middleware.call_sync('interface.choices', {'exclude': ['epair', 'tap', 'vnet']})

    @accepts()
    async def bind_choices(self):
        """
        Available choices for Bind attribute.
        """
        return {
            d['address']: d['address'] for d in await self.middleware.call(
                'interface.ip_in_use', {'static': True, 'any': True}
            )
        }

    @private
    async def update_device(self, data, old=None):
        if data['dtype'] == 'DISK':
            create_zvol = data['attributes'].pop('create_zvol', False)

            if create_zvol:
                ds_options = {
                    'name': data['attributes'].pop('zvol_name'),
                    'type': 'VOLUME',
                    'volsize': data['attributes'].pop('zvol_volsize'),
                }

                self.logger.debug(f'Creating ZVOL {ds_options["name"]} with volsize {ds_options["volsize"]}')

                zvol_blocksize = await self.middleware.call(
                    'pool.dataset.recommended_zvol_blocksize', ds_options['name'].split('/', 1)[0]
                )
                ds_options['volblocksize'] = zvol_blocksize

                new_zvol = (await self.middleware.call('pool.dataset.create', ds_options))['id']
                data['attributes']['path'] = f'/dev/zvol/{new_zvol}'
        elif data['dtype'] == 'RAW' and (
            not data['attributes'].pop('exists', True) or (
                old and old['attributes']['size'] != data['attributes']['size']
            )
        ):
            path = data['attributes']['path']
            cp = await run(['truncate', '-s', str(data['attributes']['size']), path], check=False)
            if cp.returncode:
                raise CallError(f'Failed to create or update raw file {path}: {cp.stderr}')

        return data

    @accepts(
        Dict(
            'vmdevice_create',
            Str('dtype', enum=['NIC', 'DISK', 'CDROM', 'PCI', 'DISPLAY', 'RAW'],
                required=True),
            Int('vm', required=True),
            Dict('attributes', additional_attrs=True, default=None),
            Int('order', default=None, null=True),
            register=True,
        ),
    )
    async def do_create(self, data):
        """
        Create a new device for the VM of id `vm`.

        If `dtype` is the `RAW` type and a new raw file is to be created, `attributes.exists` will be passed as false.
        This means the API handles creating the raw file and raises the appropriate exception if file creation fails.

        If `dtype` is of `DISK` type and a new Zvol is to be created, `attributes.create_zvol` will be passed as
        true with valid `attributes.zvol_name` and `attributes.zvol_volsize` values.
        """
        data = await self.validate_device(data, update=False)
        data = await self.update_device(data)

        id = await self.middleware.call(
            'datastore.insert', self._config.datastore, data
        )
        await self.__reorder_devices(id, data['vm'], data['order'])

        return await self.get_instance(id)

    @accepts(Int('id'), Patch(
        'vmdevice_create',
        'vmdevice_update',
        ('attr', {'update': True}),
    ))
    async def do_update(self, id, data):
        """
        Update a VM device of `id`.

        Pass `attributes.size` to resize a `dtype` `RAW` device. The raw file will be resized.
        """
        device = await self.get_instance(id)
        new = device.copy()
        new.update(data)

        new = await self.validate_device(new, device)
        new = await self.update_device(new, device)

        await self.middleware.call('datastore.update', self._config.datastore, id, new)
        await self.__reorder_devices(id, device['vm'], new['order'])

        return await self.get_instance(id)

    @private
    async def delete_resource(self, options, device):
        if options['zvol']:
            if device['dtype'] != 'DISK':
                raise CallError('The device is not a disk and has no zvol to destroy.')
            zvol_id = device['attributes'].get('path', '').rsplit('/dev/zvol/')[-1]
            if await self.middleware.call('pool.dataset.query', [['id', '=', zvol_id]]):
                # FIXME: We should use pool.dataset.delete but right now FS attachments will consider
                # the current device as a valid reference. Also should we stopping the vm only when deleting an
                # attachment ?
                await self.middleware.call('zfs.dataset.delete', zvol_id)
        if options['raw_file']:
            if device['dtype'] != 'RAW':
                raise CallError('Device is not of RAW type.')
            try:
                os.unlink(device['attributes']['path'])
            except OSError:
                raise CallError(f'Failed to destroy {device["attributes"]["path"]}')

    @accepts(
        Int('id'),
        Dict(
            'vm_device_delete',
            Bool('zvol', default=False),
            Bool('raw_file', default=False),
            Bool('force', default=False),
        )
    )
    async def do_delete(self, id, options):
        """
        Delete a VM device of `id`.
        """
        device = await self.get_instance(id)
        status = await self.middleware.call('vm.status', device['vm'])
        if status.get('state') == 'RUNNING':
            raise CallError('Please stop associated VM before deleting VM device.')

        try:
            await self.delete_resource(options, device)
        except CallError:
            if not options['force']:
                raise

        if device['dtype'] == 'PCI':
            device_obj = PCI(device, middleware=self.middleware)
            if await self.middleware.run_in_thread(device_obj.safe_to_reattach):
                try:
                    await self.middleware.run_in_thread(device_obj.reattach_device)
                except CallError:
                    if not options['force']:
                        raise

        return await self.middleware.call('datastore.delete', self._config.datastore, id)

    async def __reorder_devices(self, id, vm_id, order):
        if order is None:
            return
        filters = [('vm', '=', vm_id), ('id', '!=', id)]
        if await self.middleware.call('vm.device.query', filters + [('order', '=', order)]):
            used_order = [order]
            for device in await self.middleware.call('vm.device.query', filters, {'order_by': ['order']}):
                if not device['order']:
                    continue

                if device['order'] not in used_order:
                    used_order.append(device['order'])
                    continue

                device['order'] = min(used_order) + 1
                while device['order'] in used_order:
                    device['order'] += 1
                used_order.append(device['order'])
                await self.middleware.call('datastore.update', self._config.datastore, device['id'], device)

    @private
    async def disk_uniqueness_integrity_check(self, device, vm):
        # This ensures that the disk is not already present for `vm`
        def translate_device(dev):
            # A disk should have a path configured at all times, when that is not the case, that means `dtype` is DISK
            # and end user wants to create a new zvol in this case.
            return dev['attributes'].get('path') or f'/dev/zvol/{dev["attributes"]["zvol_name"]}'

        disks = [
            d for d in vm['devices']
            if d['dtype'] in ('DISK', 'RAW', 'CDROM') and translate_device(d) == translate_device(device)
        ]
        if not disks:
            # We don't have that disk path in vm devices, we are good to go
            return True
        elif len(disks) > 1:
            # VM is mis-configured
            return False
        elif not device.get('id') and disks:
            # A new device is being created, however it already exists in vm. This can also happen when VM instance
            # is being created, in that case it's okay. Key here is that we won't have the id field present
            return not bool(disks[0].get('id'))
        elif device.get('id'):
            # The device is being updated, if the device is same as we have in db, we are okay
            return device['id'] == disks[0].get('id')
        else:
            return False

    @private
    async def validate_device(self, device, old=None, vm_instance=None, update=True):
        # We allow vm_instance to be passed for cases where VM devices are being updated via VM and
        # the device checks should be performed with the modified vm_instance object not the one db holds
        # vm_instance should be provided at all times when handled by VMService, if VMDeviceService is interacting,
        # then it means the device is configured with a VM and we can retrieve the VM's data from db
        if not vm_instance:
            vm_instance = await self.middleware.call('vm.get_instance', device['vm'])

        verrors = ValidationErrors()
        schema = self.DEVICE_ATTRS.get(device['dtype'])
        if schema:
            try:
                device['attributes'] = schema.clean(device['attributes'])
            except Error as e:
                verrors.add(f'attributes.{e.attribute}', e.errmsg, e.errno)

            try:
                schema.validate(device['attributes'])
            except ValidationErrors as e:
                verrors.extend(e)

            if verrors:
                raise verrors

        # vm_instance usages SHOULD NOT rely on device `id` field to uniquely identify objects as it's possible
        # VMService is creating a new VM with devices and the id's don't exist yet

        if device.get('dtype') == 'DISK':
            create_zvol = device['attributes'].get('create_zvol')
            path = device['attributes'].get('path')
            if create_zvol:
                for attr in ('zvol_name', 'zvol_volsize'):
                    if not device['attributes'].get(attr):
                        verrors.add(f'attributes.{attr}', 'This field is required.')
                parentzvol = (device['attributes'].get('zvol_name') or '').rsplit('/', 1)[0]
                if parentzvol and not await self.middleware.call('pool.dataset.query', [('id', '=', parentzvol)]):
                    verrors.add(
                        'attributes.zvol_name',
                        f'Parent dataset {parentzvol} does not exist.', errno.ENOENT
                    )
                zvol = await self.middleware.call(
                    'pool.dataset.query', [['id', '=', device['attributes'].get('zvol_name')]]
                )
                if not verrors and create_zvol and zvol:
                    verrors.add(
                        'attributes.zvol_name', f'{device["attributes"]["zvol_name"]} already exists.'
                    )
                elif zvol and zvol[0]['locked']:
                    verrors.add('attributes.zvol_name', f'{zvol[0]["id"]} is locked.')
            elif not path:
                verrors.add('attributes.path', 'Disk path is required.')
            elif path and not os.path.exists(path):
                verrors.add('attributes.path', f'Disk path {path} does not exist.', errno.ENOENT)

            if path and len(path) > 63:
                # SPECNAMELEN is not long enough (63) in 12, 13 will be 255
                verrors.add(
                    'attributes.path',
                    f'Disk path {path} is too long, reduce to less than 63 characters', errno.ENAMETOOLONG
                )
            if not await self.disk_uniqueness_integrity_check(device, vm_instance):
                verrors.add(
                    'attributes.path',
                    f'{vm_instance["name"]} has "{path}" already configured'
                )
        elif device.get('dtype') == 'RAW':
            path = device['attributes'].get('path')
            exists = device['attributes'].get('exists', True)
            if not path:
                verrors.add('attributes.path', 'Path is required.')
            else:
                if exists and not os.path.exists(path):
                    verrors.add('attributes.path', 'Path must exist.')
                if not exists:
                    if os.path.exists(path):
                        verrors.add('attributes.path', 'Path must not exist.')
                    elif not device['attributes'].get('size'):
                        verrors.add('attributes.size', 'Please provide a valid size for the raw file.')
                if (
                    old and old['attributes'].get('size') != device['attributes'].get('size') and
                    not device['attributes'].get('size')
                ):
                    verrors.add('attributes.size', 'Please provide a valid size for the raw file.')
                await check_path_resides_within_volume(
                    verrors, self.middleware, 'attributes.path', path,
                )
                if not await self.disk_uniqueness_integrity_check(device, vm_instance):
                    verrors.add(
                        'attributes.path',
                        f'{vm_instance["name"]} has "{path}" already configured'
                    )
        elif device.get('dtype') == 'CDROM':
            path = device['attributes'].get('path')
            if not path:
                verrors.add('attributes.path', 'Path is required.')
            elif not os.path.exists(path):
                verrors.add('attributes.path', f'Unable to locate CDROM device at {path}')
            elif not await self.disk_uniqueness_integrity_check(device, vm_instance):
                verrors.add('attributes.path', f'{vm_instance["name"]} has "{path}" already configured')
        elif device.get('dtype') == 'NIC':
            nic = device['attributes'].get('nic_attach')
            if nic:
                nic_choices = await self.middleware.call('vm.device.nic_attach_choices')
                if nic not in nic_choices:
                    verrors.add('attributes.nic_attach', 'Not a valid choice.')
            await self.failover_nic_check(device, verrors, 'attributes')
        elif device.get('dtype') == 'PCI':
            pptdev = device['attributes'].get('pptdev')
            if osc.IS_FREEBSD and not RE_PPTDEV_NAME.findall(pptdev):
                verrors.add('attribute.pptdev', 'Please specify correct PCI device for passthru.')
            device_details = await self.middleware.call('vm.device.passthrough_device', pptdev)
            if device_details.get('error'):
                verrors.add(
                    'attribute.pptdev',
                    f'Not a valid choice. The PCI device is not available for passthru: {device_details["error"]}'
                )
            if not await self.middleware.call('vm.device.iommu_enabled'):
                verrors.add('attribute.pptdev', 'IOMMU support is required.')
        elif device.get('dtype') == 'DISPLAY':
            if vm_instance:
                if osc.IS_FREEBSD and vm_instance['bootloader'] != 'UEFI':
                    verrors.add('dtype', 'Display only works with UEFI bootloader.')

                if not update:
                    vm_instance['devices'].append(device)

                await self.validate_display_devices(verrors, vm_instance)

            if osc.IS_FREEBSD and device['attributes']['type'] != 'VNC':
                verrors.add('attributes.type', 'Only VNC Display device is supported for this platform.')

            all_ports = [
                d['attributes'].get('port')
                for d in (await self.middleware.call('vm.device.query', [['dtype', '=', 'DISPLAY']]))
                if d['id'] != device.get('id')
            ]
            if device['attributes'].get('port'):
                if device['attributes']['port'] in all_ports:
                    verrors.add('attributes.port', 'Specified display port is already in use')
            else:
                device['attributes']['port'] = (await self.middleware.call('vm.port_wizard'))['port']

        if device['dtype'] in ('RAW', 'DISK') and device['attributes'].get('physical_sectorsize')\
                and not device['attributes'].get('logical_sectorsize'):
            verrors.add(
                'attributes.logical_sectorsize',
                'This field must be provided when physical_sectorsize is specified.'
            )

        if verrors:
            raise verrors

        return device

    @private
    async def validate_display_devices(self, verrors, vm_instance):
        devs = await self.get_display_devices(vm_instance)
        if len(devs['vnc']) > 1:
            verrors.add('attributes.type', 'Only one VNC Display device is supported')
        if len(devs['spice']) > 1:
            verrors.add('attributes.type', 'Only one SPICE Display device is supported')

    @private
    async def get_display_devices(self, vm_instance):
        devs = {'vnc': [], 'spice': []}
        for dev in filter(lambda d: d['dtype'] == 'DISPLAY', vm_instance['devices']):
            if dev['attributes']['type'] == 'SPICE':
                devs['spice'].append(dev)
            else:
                devs['vnc'].append(dev)
        return devs

    @private
    async def failover_nic_check(self, vm_device, verrors, schema):
        if await self.middleware.call('system.is_enterprise') and await self.middleware.call('failover.licensed'):
            nics = await self.middleware.call('vm.device.nic_capability_checks', [vm_device])
            if nics:
                verrors.add(
                    f'{schema}.nic_attach',
                    f'Capabilities must be disabled for {",".join(nics)} interface '
                    'in Network->Interfaces section before using this device with VM.'
                )
Пример #14
0
    async def validate_attrs(self, data):
        verrors = ValidationErrors()

        additional_params = data.get('additional_params')
        if additional_params:
            # Let's be very generic here and introduce very basic validation
            # Expected format is as following
            # [ipv6.icmpneighbor]
            #   history = 86400
            #   enabled = yes
            #
            # While we are here, we will also introduce basic formatting to the file to ensure
            # that we can make it as compliable as possible

            param_str = ''
            for i in additional_params.split('\n'):
                i = i.strip()
                if not i:
                    continue
                if i.startswith('#'):
                    # Let's not validate this
                    if i.replace('#', '').startswith('['):
                        param_str += f'\n\n{i}'
                    else:
                        param_str += f'\n\t{i}'

                    continue

                if i.startswith('[') and not i.endswith(']'):
                    verrors.add(
                        'netdata_update.additional_params',
                        f'Please correct format for {i}. i.e [system.intr]'
                    )
                elif not i.startswith('[') and '=' not in i:
                    verrors.add(
                        'netdata_update.additional_params',
                        f'Please correct format for {i}. i.e enabled = yes'
                    )

                if i.startswith('['):
                    param_str += f'\n\n{i}'
                else:
                    param_str += f'\n\t{i}'

            data['additional_params'] = param_str + '\n'

        bind_to_ips = data.get('bind')
        if bind_to_ips:
            valid_ips = [ip['address'] for ip in await self.middleware.call('interface.ip_in_use')]
            valid_ips.extend(['127.0.0.1', '::1', '0.0.0.0', '::'])

            for bind_ip in bind_to_ips:
                if bind_ip not in valid_ips:
                    verrors.add(
                        'netdata_update.bind',
                        f'Invalid {bind_ip} bind IP'
                    )
        else:
            verrors.add(
                'netdata_update.bind',
                'This field is required'
            )

        update_alarms = data.pop('update_alarms', {})
        valid_alarms = self._alarms
        if update_alarms:
            for alarm in update_alarms:
                if alarm not in valid_alarms:
                    verrors.add(
                        'netdata_update.alarms',
                        f'{alarm} not a valid alarm'
                    )

            verrors.extend(
                validate_attributes(
                    [Dict(key, Bool('enabled', required=True)) for key in update_alarms],
                    {'attributes': update_alarms}
                )
            )

        # Validating streaming metrics now
        stream_mode = data.get('stream_mode')
        if stream_mode == 'SLAVE':
            for key in ('api_key', 'destination'):
                if not data.get(key):
                    verrors.add(
                        f'netdata_update.{key}',
                        f'{key} is required with stream mode as SLAVE'
                    )

            destinations = data.get('destination')
            if destinations:
                ip_addr = IpAddress()
                port = Port()
                for dest in destinations:
                    ip = dest.split(':')[0]
                    try:
                        ip_addr(ip)
                    except ValueError as e:
                        verrors.add(
                            'netdata_update.destination',
                            str(e)
                        )
                    else:
                        if ':' in dest:
                            try:
                                port(int(dest.split(':')[1]))
                            except ValueError as e:
                                verrors.add(
                                    'netdata_update.destination',
                                    f'Not a valid port: {e}'
                                )
        elif stream_mode == 'MASTER':
            for key in ('allow_from', 'api_key'):
                if not data.get(key):
                    verrors.add(
                        f'netdata_update.{key}',
                        f'{key} is required with stream mode as MASTER'
                    )

        verrors.check()

        data['alarms'].update(update_alarms)

        return data
Пример #15
0
class IdmapDomainService(TDBWrapCRUDService):

    tdb_defaults = [
        {
            "id": 1,
            "name": "DS_TYPE_ACTIVEDIRECTORY",
            "dns_domain_name": None,
            "range_low": 90000001,
            "range_high": 200000001,
            "idmap_backend": "AUTORID",
            "options": {
                "rangesize": 10000000
            },
            "certificate": None
        },
        {
            "id": 2,
            "name": "DS_TYPE_LDAP",
            "dns_domain_name": None,
            "range_low": 10000,
            "range_high": 90000000,
            "idmap_backend": "LDAP",
            "options": {
                "ldap_base_dn": "",
                "ldap_user_dn": "",
                "ldap_url": "",
                "ssl": "OFF"
            },
            "certificate": None
        },
        {
            "id": 5,
            "name": "DS_TYPE_DEFAULT_DOMAIN",
            "dns_domain_name": None,
            "range_low": 90000001,
            "range_high": 100000000,
            "idmap_backend": "TDB",
            "options": {},
            "certificate": None
        }
    ]

    ENTRY = Patch(
        'idmap_domain_create', 'idmap_domain_entry',
        ('add', Int('id')),
    )

    class Config:
        datastore = 'directoryservice.idmap_domain'
        datastore_prefix = 'idmap_domain_'
        namespace = 'idmap'
        datastore_extend = 'idmap.idmap_extend'
        cli_namespace = 'directory_service.idmap'

    @private
    async def idmap_extend(self, data):
        if data.get('idmap_backend'):
            data['idmap_backend'] = data['idmap_backend'].upper()

        opt_enums = ['ssl', 'linked_service']
        if data.get('options'):
            for i in opt_enums:
                if data['options'].get(i):
                    data['options'][i] = data['options'][i].upper()

        return data

    @private
    async def idmap_compress(self, data):
        opt_enums = ['ssl', 'linked_service']
        if data.get('options'):
            for i in opt_enums:
                if data['options'].get(i):
                    data['options'][i] = data['options'][i].lower()

        data['idmap_backend'] = data['idmap_backend'].lower()

        return data

    @private
    async def get_next_idmap_range(self):
        """
        Increment next high range by 100,000,000 ids. This number has
        to accomodate the highest available rid value for a domain.
        Configured idmap ranges _must_ not overlap.
        """
        domains = await self.query()
        sorted_idmaps = sorted(domains, key=lambda domain: domain['range_high'])
        low_range = sorted_idmaps[-1]['range_high'] + 1
        high_range = sorted_idmaps[-1]['range_high'] + 100000000
        return (low_range, high_range)

    @private
    async def snapshot_samba4_dataset(self):
        sysdataset = (await self.middleware.call('systemdataset.config'))['basename']
        ts = str(datetime.datetime.now(datetime.timezone.utc).timestamp())[:10]
        await self.middleware.call('zfs.snapshot.create', {'dataset': f'{sysdataset}/samba4',
                                                           'name': f'wbc-{ts}'})

    @private
    async def remove_winbind_idmap_tdb(self):
        await self.snapshot_samba4_dataset()
        try:
            os.remove('/var/db/system/samba4/winbindd_idmap.tdb')

        except FileNotFoundError:
            self.logger.trace("winbindd_idmap.tdb does not exist. Skipping removal.")

        except Exception:
            self.logger.debug("Failed to remove winbindd_idmap.tdb.", exc_info=True)

    @private
    async def domain_info(self, domain):
        ret = {}

        if domain == 'DS_TYPE_ACTIVEDIRECTORY':
            domain = (await self.middleware.call('smb.config'))['workgroup']

        wbinfo = await run(['wbinfo', '-D', domain], check=False)
        if wbinfo.returncode != 0:
            if 'WBC_ERR_DOMAIN_NOT_FOUND' in wbinfo.stderr.decode():
                err = errno.ENOENT
            else:
                err = errno.EFAULT

            raise CallError(f'Failed to get domain info for {domain}: '
                            f'{wbinfo.stderr.decode().strip()}', err)

        for entry in wbinfo.stdout.splitlines():
            kv = entry.decode().split(':')
            val = kv[1].strip()
            ret.update({kv[0].strip().lower(): val if val not in ('Yes', 'No') else bool(val)})

        return ret

    @private
    async def get_sssd_low_range(self, domain, sssd_config=None, seed=0xdeadbeef):
        """
        This is best effort attempt for SSSD compatibility. It will allocate low
        range for then initial slice in the SSSD environment. The SSSD allocation algorithm
        is non-deterministic. Domain SID string is converted to a 32-bit hashed value
        using murmurhash3 algorithm.

        The modulus of this value with the total number of available slices is used to
        pick the slice. This slice number is then used to calculate the low range for
        RID 0. With the default settings in SSSD this will be deterministic as long as
        the domain has less than 200,000 RIDs.
        """
        sid = (await self.domain_info(domain))['sid']
        sssd_config = {} if not sssd_config else sssd_config
        range_size = sssd_config.get('range_size', 200000)
        range_low = sssd_config.get('range_low', 10001)
        range_max = sssd_config.get('range_max', 2000200000)
        max_slices = int((range_max - range_low) / range_size)

        data = bytearray(sid.encode())
        datalen = len(data)
        hash = seed
        data_bytes = data

        c1 = 0xcc9e2d51
        c2 = 0x1b873593
        r1 = 15
        r2 = 13
        n = 0xe6546b64

        while datalen >= 4:
            k = int.from_bytes(data_bytes[:4], byteorder='little') & 0xFFFFFFFF
            data_bytes = data_bytes[4:]
            datalen = datalen - 4
            k = (k * c1) & 0xFFFFFFFF
            k = (k << r1 | k >> 32 - r1) & 0xFFFFFFFF
            k = (k * c2) & 0xFFFFFFFF
            hash ^= k
            hash = (hash << r2 | hash >> 32 - r2) & 0xFFFFFFFF
            hash = (hash * 5 + n) & 0xFFFFFFFF

        if datalen > 0:
            k = 0
            if datalen >= 3:
                k = k | data_bytes[2] << 16
            if datalen >= 2:
                k = k | data_bytes[1] << 8
            if datalen >= 1:
                k = k | data_bytes[0]
                k = (k * c1) & 0xFFFFFFFF
                k = (k << r1 | k >> 32 - r1) & 0xFFFFFFFF
                k = (k * c2) & 0xFFFFFFFF
                hash ^= k

        hash = (hash ^ len(data)) & 0xFFFFFFFF
        hash ^= hash >> 16
        hash = (hash * 0x85ebca6b) & 0xFFFFFFFF
        hash ^= hash >> 13
        hash = (hash * 0xc2b2ae35) & 0xFFFFFFFF
        hash ^= hash >> 16

        return (hash % max_slices) * range_size + range_size

    @accepts()
    @job(lock='clear_idmap_cache')
    async def clear_idmap_cache(self, job):
        """
        Stop samba, remove the winbindd_cache.tdb file, start samba, flush samba's cache.
        This should be performed after finalizing idmap changes.
        """
        ha_mode = await self.middleware.call('smb.get_smb_ha_mode')
        if ha_mode == 'CLUSTERED':
            self.logger.warning("clear_idmap_cache is unsafe on clustered smb servers.")
            return

        smb_started = await self.middleware.call('service.started', 'cifs')
        await self.middleware.call('service.stop', 'idmap')

        try:
            os.remove('/var/db/system/samba4/winbindd_cache.tdb')

        except FileNotFoundError:
            self.logger.debug("Failed to remove winbindd_cache.tdb. File not found.")

        except Exception:
            self.logger.debug("Failed to remove winbindd_cache.tdb.", exc_info=True)

        gencache_flush = await run(['net', 'cache', 'flush'], check=False)
        if gencache_flush.returncode != 0:
            raise CallError(f'Attempt to flush gencache failed with error: {gencache_flush.stderr.decode().strip()}')

        await self.middleware.call('service.start', 'idmap')
        if smb_started:
            await self.middleware.call('service.start', 'cifs')

    @private
    async def autodiscover_trusted_domains(self):
        smb = await self.middleware.call('smb.config')

        ad_idmap_backend = (await self.query([('name', '=', 'DS_TYPE_ACTIVEDIRECTORY')], {'get': True}))['idmap_backend']
        if ad_idmap_backend == IdmapBackend.AUTORID.name:
            self.logger.trace('Skipping auto-generation of trusted domains due to AutoRID being enabled.')
            return

        wbinfo = await run(['wbinfo', '-m', '--verbose'], check=False)
        if wbinfo.returncode != 0:
            raise CallError(f'wbinfo -m failed with error: {wbinfo.stderr.decode().strip()}')

        for entry in wbinfo.stdout.decode().splitlines():
            c = entry.split()
            range_low, range_high = await self.get_next_idmap_range()
            if len(c) == 6 and c[0] != smb['workgroup']:
                await self.middleware.call('idmap.create', {
                    'name': c[0],
                    'dns_domain_name': c[1],
                    'range_low': range_low,
                    'range_high': range_high,
                    'idmap_backend': 'RID'
                })

    @accepts()
    async def backend_options(self):
        """
        This returns full information about idmap backend options. Not all
        `options` are valid for every backend.
        """
        return {x.name: x.value for x in IdmapBackend}

    @accepts(
        Str('idmap_backend', enum=[x.name for x in IdmapBackend]),
    )
    async def options_choices(self, backend):
        """
        Returns a list of supported keys for the specified idmap backend.
        """
        return IdmapBackend[backend].supported_keys()

    @accepts()
    async def backend_choices(self):
        """
        Returns array of valid idmap backend choices per directory service.
        """
        return IdmapBackend.ds_choices()

    @private
    async def validate(self, schema_name, data, verrors):
        if data['name'] == DSType.DS_TYPE_LDAP.name:
            if data['idmap_backend'] not in (await self.backend_choices())['LDAP']:
                verrors.add(f'{schema_name}.idmap_backend',
                            f'idmap backend [{data["idmap_backend"]}] is not appropriate '
                            f'for the system domain type {data["name"]}')

        elif data['name'] == DSType.DS_TYPE_DEFAULT_DOMAIN.name:
            if data['idmap_backend'] != 'TDB':
                verrors.add(f'{schema_name}.idmap_backend',
                            'TDB is the only supported idmap backend for DS_TYPE_DEFAULT_DOMAIN.')

        if data['range_high'] < data['range_low']:
            """
            If we don't exit at this point further range() operations will raise an IndexError.
            """
            verrors.add(f'{schema_name}.range_low', 'Idmap high range must be greater than idmap low range')
            return

        if data.get('certificate') and not await self.middleware.call(
            'certificate.query', [['id', '=', data['certificate']]]
        ):
            verrors.add(f'{schema_name}.certificate', 'Please specify a valid certificate.')

        configured_domains = await self.query()
        ds_state = await self.middleware.call("directoryservices.get_state")
        ldap_enabled = True if ds_state['ldap'] in ['HEALTHY', 'JOINING'] else False
        ad_enabled = True if ds_state['activedirectory'] in ['HEALTHY', 'JOINING'] else False
        new_range = range(data['range_low'], data['range_high'])
        idmap_backend = data.get('idmap_backend')
        for i in configured_domains:
            # Do not generate validation error comparing to oneself.
            if i['name'] == data['name']:
                continue

            # Do not generate validation errors for overlapping with a disabled DS.
            if not ldap_enabled and i['name'] == 'DS_TYPE_LDAP':
                continue

            if not ad_enabled and i['name'] == 'DS_TYPE_ACTIVEDIRECTORY':
                continue

            # Idmap settings under Services->SMB are ignored when autorid is enabled.
            if idmap_backend == IdmapBackend.AUTORID.name and i['name'] == 'DS_TYPE_DEFAULT_DOMAIN':
                continue

            # Overlap between ranges defined for 'ad' backend are permitted.
            if idmap_backend == IdmapBackend.AD.name and i['idmap_backend'] == IdmapBackend.AD.name:
                continue

            existing_range = range(i['range_low'], i['range_high'])
            if range(max(existing_range[0], new_range[0]), min(existing_range[-1], new_range[-1]) + 1):
                verrors.add(f'{schema_name}.range_low',
                            f'new idmap range [{data["range_low"]}-{data["range_high"]}] '
                            'conflicts with existing range for domain '
                            f'[{i["name"]}], range: [{i["range_low"]}-{i["range_high"]}].')

    @private
    async def validate_options(self, schema_name, data, verrors, check=['MISSING', 'EXTRA']):
        supported_keys = set(IdmapBackend[data['idmap_backend']].supported_keys())
        required_keys = set(IdmapBackend[data['idmap_backend']].required_keys())
        provided_keys = set([str(x) for x in data['options'].keys()])

        missing_keys = required_keys - provided_keys
        extra_keys = provided_keys - supported_keys

        if 'MISSING' in check:
            for k in missing_keys:
                verrors.add(f'{schema_name}.options.{k}',
                            f'[{k}] is a required parameter for the [{data["idmap_backend"]}] idmap backend.')

        if 'EXTRA' in check:
            for k in extra_keys:
                verrors.add(f'{schema_name}.options.{k}',
                            f'[{k}] is not a valid parameter for the [{data["idmap_backend"]}] idmap backend.')

    @private
    async def prune_keys(self, data):
        supported_keys = set(IdmapBackend[data['idmap_backend']].supported_keys())
        provided_keys = set([str(x) for x in data['options'].keys()])

        for k in (provided_keys - supported_keys):
            data['options'].pop(k)

    @private
    async def idmap_conf_to_client_config(self, data):
        options = data['options'].copy()
        if data['idmap_backend'] not in ['LDAP', 'RFC2307']:
            raise CallError(f'{data["idmap_backend"]}: invalid idmap backend')

        if data['idmap_backend'] == 'LDAP':
            uri = options["ldap_url"]
            basedn = options["ldap_base_dn"]
        else:
            if data['options']['ldap_server'] == 'AD':
                uri = options["ldap_domain"]
            else:
                uri = options["ldap_url"]

            basedn = options["bind_path_user"]

        credentials = {
            "binddn": options["ldap_user_dn"],
            "bindpw": options["ldap_user_dn_password"],
        }

        security = {
            "ssl": options["ssl"],
            "sasl": "SEAL",
            "validate_certificates": options["validate_certificates"],
        }

        return {
            "uri_list": [f'{"ldaps://" if security["ssl"] == "ON" else "ldap://"}{uri}'],
            "basedn": basedn,
            "bind_type": "PLAIN",
            "credentials": credentials,
            "security": security,
        }

    @filterable
    async def query(self, filters, options):
        extra = options.get("extra", {})
        more_info = extra.get("additional_information", [])
        ret = await super().query(filters, options)
        if 'DOMAIN_INFO' in more_info:
            for entry in ret:
                try:
                    domain_info = await self.middleware.call('idmap.domain_info', entry['name'])
                except CallError as e:
                    if e.errno != errno.ENOENT:
                        self.logger.debug("Failed to retrieve domain info: %s", e)
                    domain_info = None

                entry.update({'domain_info': domain_info})

        return ret

    @accepts(Dict(
        'idmap_domain_create',
        Str('name', required=True),
        Str('dns_domain_name'),
        Int('range_low', required=True, validators=[Range(min=1000, max=2147483647)]),
        Int('range_high', required=True, validators=[Range(min=1000, max=2147483647)]),
        Str('idmap_backend', required=True, enum=[x.name for x in IdmapBackend]),
        Int('certificate', null=True),
        OROperator(
            Dict(
                'idmap_ad_options',
                Ref('nss_info_ad', 'schema_mode'),
                Bool('unix_primary_group', default=False),
                Bool('unix_nss_info', default=False),
            ),
            Dict(
                'idmap_autorid_options',
                Int('rangesize', default=100000, validators=[Range(min=10000, max=1000000000)]),
                Bool('readonly', default=False),
                Bool('ignore_builtin', default=False),
            ),
            Dict(
                'idmap_ldap_options',
                LDAP_DN('ldap_base_dn'),
                LDAP_DN('ldap_user_dn'),
                Str('ldap_user_dn_password', private=True),
                Str('ldap_url'),
                Bool('readonly', default=False),
                Ref('ldap_ssl_choice', 'ssl'),
                Bool('validate_certificates', default=True),
            ),
            Dict(
                'idmap_nss_options',
                Str('linked_service', default='LOCAL_ACCOUNT', enum=['LOCAL_ACCOUNT', 'LDAP']),
            ),
            Dict(
                'idmap_rfc2307_options',
                Str('ldap_server', required=True, enum=['AD', 'STANDALONE']),
                Bool('ldap_realm', default=False),
                LDAP_DN('bind_path_user'),
                LDAP_DN('bind_path_group'),
                Bool('user_cn', default=False),
                Str('cn_realm'),
                Str('ldap_domain'),
                Str('ldap_url'),
                LDAP_DN('ldap_user_dn'),
                Str('ldap_user_dn_password', private=True),
                Ref('ldap_ssl_choice', 'ssl'),
                Bool('validate_certificates', default=True),
            ),
            Dict(
                'idmap_rid_options',
                Bool('sssd_compat', default=False),
            ),
            Dict(
                'idmap_tdb_options',
            ),
            default={},
            name='options',
            title='idmap_options',
        ),
        register=True
    ))
    async def do_create(self, data):
        """
        Create a new IDMAP domain. These domains must be unique. This table
        will be automatically populated after joining an Active Directory domain
        if "allow trusted domains" is set to True in the AD service configuration.
        There are three default system domains: DS_TYPE_ACTIVEDIRECTORY, DS_TYPE_LDAP, DS_TYPE_DEFAULT_DOMAIN.
        The system domains correspond with the idmap settings under Active Directory, LDAP, and SMB
        respectively.

        `name` the pre-windows 2000 domain name.

        `DNS_domain_name` DNS name of the domain.

        `idmap_backend` provides a plugin interface for Winbind to use varying
        backends to store SID/uid/gid mapping tables. The correct setting
        depends on the environment in which the NAS is deployed.

        `range_low` and `range_high` specify the UID and GID range for which this backend is authoritative.

        `certificate_id` references the certificate ID of the SSL certificate to use for certificate-based
        authentication to a remote LDAP server. This parameter is not supported for all idmap backends as some
        backends will generate SID to ID mappings algorithmically without causing network traffic.

        `options` are additional parameters that are backend-dependent:

        `AD` idmap backend options:
        `unix_primary_group` If True, the primary group membership is fetched from the LDAP attributes (gidNumber).
        If False, the primary group membership is calculated via the "primaryGroupID" LDAP attribute.

        `unix_nss_info` if True winbind will retrieve the login shell and home directory from the LDAP attributes.
        If False or if the AD LDAP entry lacks the SFU attributes the smb4.conf parameters `template shell` and `template homedir` are used.

        `schema_mode` Defines the schema that idmap_ad should use when querying Active Directory regarding user and group information.
        This can be either the RFC2307 schema support included in Windows 2003 R2 or the Service for Unix (SFU) schema.
        For SFU 3.0 or 3.5 please choose "SFU", for SFU 2.0 please choose "SFU20". The behavior of primary group membership is
        controlled by the unix_primary_group option.

        `AUTORID` idmap backend options:
        `readonly` sets the module to read-only mode. No new ranges will be allocated and new mappings
        will not be created in the idmap pool.

        `ignore_builtin` ignores mapping requests for the BUILTIN domain.

        `LDAP` idmap backend options:
        `ldap_base_dn` defines the directory base suffix to use for SID/uid/gid mapping entries.

        `ldap_user_dn` defines the user DN to be used for authentication.

        `ldap_url` specifies the LDAP server to use for SID/uid/gid map entries.

        `ssl` specifies whether to encrypt the LDAP transport for the idmap backend.

        `NSS` idmap backend options:
        `linked_service` specifies the auxiliary directory service ID provider.

        `RFC2307` idmap backend options:
        `domain` specifies the domain for which the idmap backend is being created. Numeric id, short-form
        domain name, or long-form DNS domain name of the domain may be specified. Entry must be entered as
        it appears in `idmap.domain`.

        `range_low` and `range_high` specify the UID and GID range for which this backend is authoritative.

        `ldap_server` defines the type of LDAP server to use. This can either be an LDAP server provided
        by the Active Directory Domain (ad) or a stand-alone LDAP server.

        `bind_path_user` specfies the search base where user objects can be found in the LDAP server.

        `bind_path_group` specifies the search base where group objects can be found in the LDAP server.

        `user_cn` query cn attribute instead of uid attribute for the user name in LDAP.

        `realm` append @realm to cn for groups (and users if user_cn is set) in LDAP queries.

        `ldmap_domain` when using the LDAP server in the Active Directory server, this allows one to
        specify the domain where to access the Active Directory server. This allows using trust relationships
        while keeping all RFC 2307 records in one place. This parameter is optional, the default is to access
        the AD server in the current domain to query LDAP records.

        `ldap_url` when using a stand-alone LDAP server, this parameter specifies the LDAP URL for accessing the LDAP server.

        `ldap_user_dn` defines the user DN to be used for authentication.

        `ldap_user_dn_password` is the password to be used for LDAP authentication.

        `realm` defines the realm to use in the user and group names. This is only required when using cn_realm together with
         a stand-alone ldap server.

        `RID` backend options:
        `sssd_compat` generate idmap low range based on same algorithm that SSSD uses by default.
        """
        verrors = ValidationErrors()

        if 'options' not in data:
            data['options'] = {}

        old = await self.query()
        if data['name'] in [x['name'] for x in old]:
            verrors.add('idmap_domain_create.name', 'Domain names must be unique.')

        if data['options'].get('sssd_compat'):
            if await self.middleware.call('activedirectory.get_state') != 'HEALTHY':
                verrors.add('idmap_domain_create.options',
                            'AD service must be enabled and started to '
                            'generate an SSSD-compatible id range')
                verrors.check()

            data['range_low'] = await self.get_sssd_low_range(data['name'])
            data['range_high'] = data['range_low'] + 100000000

        await self.validate('idmap_domain_create', data, verrors)
        await self.validate_options('idmap_domain_create', data, verrors)
        if data.get('certificate_id') and not data['options'].get('ssl'):
            verrors.add('idmap_domain_create.certificate_id',
                        f'The {data["idmap_backend"]} idmap backend does not '
                        'generate LDAP traffic. Certificates do not apply.')
        verrors.check()

        if data['options'].get('ldap_user_dn_password'):
            try:
                DSType[data["name"]]
                domain = (await self.middleware.call("smb.config"))['workgroup']
            except KeyError:
                domain = data["name"]

            client_conf = await self.idmap_conf_to_client_config(data)
            await self.middleware.call(
                'ldapclient.validate_credentials',
                client_conf
            )

            secret = data['options'].pop('ldap_user_dn_password')

            await self.middleware.call("directoryservices.set_ldap_secret",
                                       domain, secret)
            await self.middleware.call("directoryservices.backup_secrets")

        final_options = IdmapBackend[data['idmap_backend']].defaults()
        final_options.update(data['options'])
        data['options'] = final_options

        id = await super().do_create(data)
        out = await self.query([('id', '=', id)], {'get': True})
        await self.synchronize()
        return out

    async def do_update(self, id, data):
        """
        Update a domain by id.
        """

        old = await self.query([('id', '=', id)], {'get': True})
        new = old.copy()
        new.update(data)
        if data.get('idmap_backend') and data['idmap_backend'] != old['idmap_backend']:
            """
            Remove options from previous backend because they are almost certainly
            not valid for the new backend.
            """
            new['options'] = data.get('options', {})
        else:
            new['options'] = old['options'].copy() | data.get('options', {})

        tmp = data.copy()
        verrors = ValidationErrors()
        if old['name'] in [x.name for x in DSType] and old['name'] != new['name']:
            verrors.add('idmap_domain_update.name',
                        f'Changing name of default domain {old["name"]} is not permitted')

        if new['options'].get('sssd_compat') and not old['options'].get('sssd_compat'):
            if await self.middleware.call('activedirectory.get_state') != 'HEALTHY':
                verrors.add('idmap_domain_update.options',
                            'AD service must be enabled and started to '
                            'generate an SSSD-compatible id range')
                verrors.check()

            new['range_low'] = await self.get_sssd_low_range(new['name'])
            new['range_high'] = new['range_low'] + 100000000

        if new['idmap_backend'] == 'AUTORID' and new['name'] != 'DS_TYPE_ACTIVEDIRECTORY':
            verrors.add("idmap_domain_update.idmap_backend",
                        "AUTORID is only permitted for the default idmap backend for "
                        "the active directory directory service (DS_TYPE_ACTIVEDIRECTORY).")

        await self.validate('idmap_domain_update', new, verrors)
        await self.validate_options('idmap_domain_update', new, verrors, ['MISSING'])
        tmp['idmap_backend'] = new['idmap_backend']
        if data.get('options'):
            await self.validate_options('idmap_domain_update', tmp, verrors, ['EXTRA'])

        if data.get('certificate_id') and not data['options'].get('ssl'):
            verrors.add('idmap_domain_update.certificate_id',
                        f'The {new["idmap_backend"]} idmap backend does not '
                        'generate LDAP traffic. Certificates do not apply.')
        verrors.check()
        await self.prune_keys(new)
        final_options = IdmapBackend[new['idmap_backend']].defaults() | new['options'].copy()
        new['options'] = final_options

        if new['options'].get('ldap_user_dn_password'):
            try:
                DSType[new["name"]]
                domain = (await self.middleware.call("smb.config"))['workgroup']
            except KeyError:
                domain = new["name"]

            client_conf = await self.idmap_conf_to_client_config(new)
            await self.middleware.call(
                'ldapclient.validate_credentials',
                client_conf
            )

            secret = new['options'].pop('ldap_user_dn_password')
            await self.middleware.call("directoryservices.set_ldap_secret",
                                       domain, secret)
            await self.middleware.call("directoryservices.backup_secrets")

        await super().do_update(id, new)

        out = await self.query([('id', '=', id)], {'get': True})
        await self.synchronize()
        cache_job = await self.middleware.call('idmap.clear_idmap_cache')
        await cache_job.wait()
        return out

    async def do_delete(self, id):
        """
        Delete a domain by id. Deletion of default system domains is not permitted.
        In case of registry config for clustered server, this will remove all smb4.conf
        entries for the domain associated with the id.
        """
        if id <= 5:
            entry = await self.get_instance(id)
            raise CallError(f'Deleting system idmap domain [{entry["name"]}] is not permitted.', errno.EPERM)

        ret = await self.direct_delete(id)
        await self.synchronize()
        return ret

    @private
    async def name_to_sid(self, name):
        wb = await run([SMBCmd.WBINFO.value, '--name-to-sid', name], check=False)
        if wb.returncode != 0:
            self.logger.debug("wbinfo failed with error: %s",
                              wb.stderr.decode().strip())

        return wb.stdout.decode().strip()

    @private
    async def sid_to_name(self, sid):
        """
        Last two characters of name string encode the account type.
        """
        wb = await run([SMBCmd.WBINFO.value, '--sid-to-name', sid], check=False)
        if wb.returncode != 0:
            raise CallError(f'wbinfo failed with error: {wb.stderr.decode().strip()}')

        out = wb.stdout.decode().strip()
        return {"name": out[:-2], "type": int(out[-2:])}

    @private
    async def sid_to_unixid(self, sid_str):
        rv = None
        gid = None
        uid = None

        if sid_str.startswith(SID_LOCAL_USER_PREFIX):
            return {"id_type": "USER", "id": int(sid_str.strip(SID_LOCAL_USER_PREFIX))}

        elif sid_str.startswith(SID_LOCAL_GROUP_PREFIX):
            return {"id_type": "GROUP", "id": int(sid_str.strip(SID_LOCAL_GROUP_PREFIX))}

        wb = await run([SMBCmd.WBINFO.value, '--sid-to-gid', sid_str], check=False)
        if wb.returncode == 0:
            gid = int(wb.stdout.decode().strip())

        wb = await run([SMBCmd.WBINFO.value, '--sid-to-uid', sid_str], check=False)
        if wb.returncode == 0:
            uid = int(wb.stdout.decode().strip())

        if gid and (gid == uid):
            rv = {"id_type": "BOTH", "id": gid}
        elif gid:
            rv = {"id_type": "GROUP", "id": gid}
        elif uid:
            rv = {"id_type": "USER", "id": uid}

        return rv

    @private
    async def id_to_name(self, id, id_type):
        idtype = IDType[id_type]
        idmap_timeout = 5.0

        if idtype == IDType.GROUP or idtype == IDType.BOTH:
            method = "group.get_group_obj"
            to_check = {"gid": id}
            key = 'gr_name'
        elif idtype == IDType.USER:
            method = "user.get_user_obj"
            to_check = {"uid": id}
            key = 'pw_name'
        else:
            raise CallError(f"Unsupported id_type: [{idtype.name}]")

        try:
            ret = await asyncio.wait_for(
                self.middleware.call(method, to_check),
                timeout=idmap_timeout
            )
            name = ret[key]
        except asyncio.TimeoutError:
            self.logger.debug(
                "timeout encountered while trying to convert %s id %s "
                "to name. This may indicate significant networking issue.",
                id_type.lower(), id
            )
            name = None
        except KeyError:
            name = None

        return name

    @private
    async def unixid_to_sid(self, data):
        """
        Samba generates SIDs for local accounts that lack explicit mapping in
        passdb.tdb or group_mapping.tdb with a prefix of S-1-22-1 (users) and
        S-1-22-2 (groups). This is not returned by wbinfo, but for consistency
        with what appears when viewed over SMB protocol we'll do the same here.
        """
        unixid = data.get("id")
        id = IDType[data.get("id_type", "GROUP")]

        if id == IDType.USER:
            wb = await run([SMBCmd.WBINFO.value, '--uid-to-sid', str(unixid)], check=False)
        else:
            wb = await run([SMBCmd.WBINFO.value, '--gid-to-sid', str(unixid)], check=False)

        if wb.returncode != 0:
            self.logger.warning("Could not convert [%d] to SID: %s",
                                unixid, wb.stderr.decode().strip())
            if WBCErr.DOMAIN_NOT_FOUND.err() in wb.stderr.decode():
                is_local = await self.middleware.call(
                    f'{"user" if id == IDType.USER else "group"}.query',
                    [("uid" if id == IDType.USER else "gid", '=', unixid)],
                    {"count": True}
                )
                if is_local:
                    return f'S-1-22-{1 if id == IDType.USER else 2}-{unixid}'

            return None

        return wb.stdout.decode().strip()

    @private
    async def get_idmap_info(self, ds, id):
        low_range = None
        id_type_both = False
        domains = await self.query()

        for d in domains:
            if ds == 'activedirectory' and d['name'] == 'DS_TYPE_LDAP':
                continue

            if ds == 'ldap' and d['name'] != 'DS_TYPE_LDAP':
                continue

            if id in range(d['range_low'], d['range_high']):
                low_range = d['range_low']
                id_type_both = d['idmap_backend'] in ['AUTORID', 'RID']
                break

        return (low_range, id_type_both)

    @private
    async def synthetic_user(self, ds, passwd):
        idmap_info = await self.get_idmap_info(ds, passwd['pw_uid'])
        sid = await self.unixid_to_sid({"id": passwd['pw_uid'], "id_type": "USER"})
        rid = int(sid.rsplit('-', 1)[1])
        return {
            'id': 100000 + idmap_info[0] + rid,
            'uid': passwd['pw_uid'],
            'username': passwd['pw_name'],
            'unixhash': None,
            'smbhash': None,
            'group': {},
            'home': '',
            'shell': '',
            'full_name': passwd['pw_gecos'],
            'builtin': False,
            'email': '',
            'password_disabled': False,
            'locked': False,
            'sudo': False,
            'sudo_nopasswd': False,
            'sudo_commands': [],
            'microsoft_account': False,
            'attributes': {},
            'groups': [],
            'sshpubkey': None,
            'local': False,
            'id_type_both': idmap_info[1],
        }

    @private
    async def synthetic_group(self, ds, grp):
        idmap_info = await self.get_idmap_info(ds, grp['gr_gid'])
        sid = await self.unixid_to_sid({"id": grp['gr_gid'], "id_type": "GROUP"})
        rid = int(sid.rsplit('-', 1)[1])
        return {
            'id': 100000 + idmap_info[0] + rid,
            'gid': grp['gr_gid'],
            'name': grp['gr_name'],
            'group': grp['gr_name'],
            'builtin': False,
            'sudo': False,
            'sudo_nopasswd': False,
            'sudo_commands': [],
            'users': [],
            'local': False,
            'id_type_both': idmap_info[1],
        }

    @private
    async def idmap_to_smbconf(self, data=None):
        rv = {}
        if data is None:
            idmap = await self.query()
        else:
            idmap = data

        ds_state = await self.middleware.call('directoryservices.get_state')
        workgroup = await self.middleware.call('smb.getparm', 'workgroup', 'global')
        ad_enabled = ds_state['activedirectory'] in ['HEALTHY', 'JOINING', 'FAULTED']
        ldap_enabled = ds_state['ldap'] in ['HEALTHY', 'JOINING', 'FAULTED']
        ad_idmap = filter_list(idmap, [('name', '=', DSType.DS_TYPE_ACTIVEDIRECTORY.name)], {'get': True}) if ad_enabled else None
        disable_ldap_starttls = False

        for i in idmap:
            if i['name'] == DSType.DS_TYPE_DEFAULT_DOMAIN.name:
                if ad_idmap and ad_idmap['idmap_backend'] == 'AUTORID':
                    continue
                domain = "*"
            elif i['name'] == DSType.DS_TYPE_ACTIVEDIRECTORY.name:
                if not ad_enabled:
                    continue
                if i['idmap_backend'] == 'AUTORID':
                    domain = "*"
                else:
                    domain = workgroup
            elif i['name'] == DSType.DS_TYPE_LDAP.name:
                if not ldap_enabled:
                    continue
                domain = workgroup
                # This will need to be re-implemented once LDAP directory service is clustered
                if i['idmap_backend'] == 'LDAP':
                    """
                    In case of default LDAP backend, populate values from ldap form.
                    """
                    idmap_prefix = f"idmap config {domain} :"
                    ldap = await self.middleware.call('ldap.config')
                    rv.update({
                        f"{idmap_prefix} backend": {"raw": i['idmap_backend'].lower()},
                        f"{idmap_prefix} range": {"raw": f"{i['range_low']} - {i['range_high']}"},
                        f"{idmap_prefix} ldap_base_dn": {"raw": ldap['basedn']},
                        f"{idmap_prefix} ldap_url": {"raw": ' '.join(ldap['uri_list'])},
                    })
                    continue
            else:
                domain = i['name']

            idmap_prefix = f"idmap config {domain} :"
            rv.update({
                f"{idmap_prefix} backend": {"raw": i['idmap_backend'].lower()},
                f"{idmap_prefix} range": {"raw": f"{i['range_low']} - {i['range_high']}"}
            })
            for k, v in i['options'].items():
                backend_parameter = "realm" if k == "cn_realm" else k
                if k == 'ldap_server':
                    v = 'ad' if v == 'AD' else 'stand-alone'
                elif k == 'ldap_url':
                    v = f'{"ldaps://" if i["options"]["ssl"]  == "ON" else "ldap://"}{v}'
                elif k == 'ssl':
                    if v != 'STARTTLS':
                        disable_ldap_starttls = True

                    continue

                rv.update({
                    f"{idmap_prefix} {backend_parameter}": {"parsed": v},
                })

        if ad_enabled:
            rv['ldap ssl'] = {'parsed': 'off' if disable_ldap_starttls else 'start tls'}

        return rv

    @private
    async def diff_conf_and_registry(self, data, idmaps):
        r = idmaps
        s_keys = set(data.keys())
        r_keys = set(r.keys())
        intersect = s_keys.intersection(r_keys)
        return {
            'added': {x: data[x] for x in s_keys - r_keys},
            'removed': {x: r[x] for x in r_keys - s_keys},
            'modified': {x: data[x] for x in intersect if data[x] != r[x]},
        }

    @private
    async def synchronize(self):
        config_idmap = await self.query()
        idmaps = await self.idmap_to_smbconf(config_idmap)
        to_check = (await self.middleware.call('smb.reg_globals'))['idmap']
        diff = await self.diff_conf_and_registry(idmaps, to_check)
        await self.middleware.call('sharing.smb.apply_conf_diff', 'GLOBAL', diff)
        await self.middleware.call('service.restart', 'idmap')
Пример #16
0
class NTPServerService(CRUDService):
    class Config:
        namespace = 'system.ntpserver'
        datastore = 'system.ntpserver'
        datastore_prefix = 'ntp_'
        cli_namespace = 'system.ntp_server'

    ENTRY = Patch(
        'ntp_create',
        'ntp_entry',
        ('rm', {
            'name': 'force'
        }),
        ('add', Int('id')),
    )

    @accepts(
        Dict('ntp_create',
             Str('address'),
             Bool('burst', default=False),
             Bool('iburst', default=True),
             Bool('prefer', default=False),
             Int('minpoll', default=6),
             Int('maxpoll', default=10),
             Bool('force'),
             register=True))
    async def do_create(self, data):
        """
        Add an NTP Server.

        `address` specifies the hostname/IP address of the NTP server.

        `burst` when enabled makes sure that if server is reachable, sends a burst of eight packets instead of one.
        This is designed to improve timekeeping quality with the server command.

        `iburst` when enabled speeds up the initial synchronization, taking seconds rather than minutes.

        `prefer` marks the specified server as preferred. When all other things are equal, this host is chosen
        for synchronization acquisition with the server command. It is recommended that they be used for servers with
        time monitoring hardware.

        `minpoll` is minimum polling time in seconds. It must be a power of 2 and less than `maxpoll`.

        `maxpoll` is maximum polling time in seconds. It must be a power of 2 and greater than `minpoll`.

        `force` when enabled forces the addition of NTP server even if it is currently unreachable.
        """
        await self.clean(data, 'ntpserver_create')

        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})

        await self.middleware.call('service.restart', 'ntpd')

        return await self.get_instance(data['id'])

    @accepts(Int('id'),
             Patch('ntp_create', 'ntp_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        """
        Update NTP server of `id`.
        """
        old = await self._get_instance(id)

        new = old.copy()
        new.update(data)

        await self.clean(new, 'ntpserver_update')

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})

        await self.middleware.call('service.restart', 'ntpd')

        return await self.get_instance(id)

    async def do_delete(self, id):
        """
        Delete NTP server of `id`.
        """
        response = await self.middleware.call('datastore.delete',
                                              self._config.datastore, id)

        await self.middleware.call('service.restart', 'ntpd')

        return response

    @staticmethod
    @private
    def test_ntp_server(addr):
        client = ntplib.NTPClient()
        server_alive = False
        try:
            response = client.request(addr)
            if response.version:
                server_alive = True
        except Exception:
            pass

        return server_alive

    @private
    async def clean(self, data, schema_name):
        verrors = ValidationErrors()
        maxpoll = data['maxpoll']
        minpoll = data['minpoll']
        force = data.pop('force', False)
        usable = True if await self.middleware.run_in_thread(
            self.test_ntp_server, data['address']) else False

        if not force and not usable:
            verrors.add(
                f'{schema_name}.address',
                'Server could not be reached. Check "Force" to '
                'continue regardless.')

        if not maxpoll > minpoll:
            verrors.add(f'{schema_name}.maxpoll',
                        'Max Poll should be higher than Min Poll')

        if verrors:
            raise verrors

        return data
Пример #17
0
class SupportService(Service):

    @accepts(
        Str('username'),
        Str('password'),
    )
    def fetch_categories(self, username, password):
        """
        Fetch all the categories available for `username` using `password`.
        Returns a dict with the category name as a key and id as value.
        """

        sw_name = 'freenas' if self.middleware.call_sync('system.is_freenas') else 'truenas'
        try:
            r = requests.post(
                f'https://{ADDRESS}/{sw_name}/api/v1.0/categories',
                data=json.dumps({
                    'user': username,
                    'password': password,
                }),
                headers={'Content-Type': 'application/json'},
                timeout=10,
            )
            data = r.json()
        except simplejson.JSONDecodeError:
            self.logger.debug(f'Failed to decode ticket attachment response: {r.text}')
            raise CallError('Invalid proxy server response', errno.EBADMSG)
        except requests.ConnectionError as e:
            raise CallError(f'Connection error {e}', errno.EBADF)
        except requests.Timeout:
            raise CallError('Connection time out', errno.ETIMEDOUT)

        if 'error' in data:
            raise CallError(data['message'], errno.EINVAL)

        return data

    @accepts(Dict(
        'new_ticket',
        Str('title', required=True),
        Str('body', required=True),
        Str('category', required=True),
        Bool('attach_debug', default=False),
        Str('username', private=True),
        Str('password', private=True),
        Str('type', enum=['BUG', 'FEATURE']),
        Str('criticality'),
        Str('environment'),
        Str('phone'),
        Str('name'),
        Str('email'),
    ))
    @job()
    async def new_ticket(self, job, data):
        """
        Creates a new ticket for support.
        This is done using the support proxy API.
        For FreeNAS it will be created on Redmine and for TrueNAS on SupportSuite.

        For FreeNAS `criticality`, `environment`, `phone`, `name` and `email` attributes are not required.
        For TrueNAS `username`, `password` and `type` attributes are not required.
        """

        job.set_progress(1, 'Gathering data')

        sw_name = 'freenas' if await self.middleware.call('system.is_freenas') else 'truenas'

        if sw_name == 'freenas':
            required_attrs = ('type', 'username', 'password')
        else:
            required_attrs = ('phone', 'name', 'email', 'criticality', 'environment')
            data['serial'] = (await (await Popen(['/usr/local/sbin/dmidecode', '-s', 'system-serial-number'], stdout=subprocess.PIPE)).communicate())[0].decode().split('\n')[0].upper()
            license = get_license()[0]
            if license:
                data['company'] = license.customer_name
            else:
                data['company'] = 'Unknown'

        for i in required_attrs:
            if i not in data:
                raise CallError(f'{i} is required', errno.EINVAL)

        data['version'] = (await self.middleware.call('system.version')).split('-', 1)[-1]
        if 'username' in data:
            data['user'] = data.pop('username')
        debug = data.pop('attach_debug')

        type_ = data.get('type')
        if type_:
            data['type'] = type_.lower()

        job.set_progress(20, 'Submitting ticket')

        try:
            r = await self.middleware.run_in_thread(lambda: requests.post(
                f'https://{ADDRESS}/{sw_name}/api/v1.0/ticket',
                data=json.dumps(data),
                headers={'Content-Type': 'application/json'},
                timeout=10,
            ))
            result = r.json()
        except simplejson.JSONDecodeError:
            self.logger.debug(f'Failed to decode ticket attachment response: {r.text}')
            raise CallError('Invalid proxy server response', errno.EBADMSG)
        except requests.ConnectionError as e:
            raise CallError(f'Connection error {e}', errno.EBADF)
        except requests.Timeout:
            raise CallError('Connection time out', errno.ETIMEDOUT)

        if r.status_code != 200:
            self.logger.debug(f'Support Ticket failed ({r.status_code}): {r.text}', r.status_code, r.text)
            raise CallError('Ticket creation failed, try again later.', errno.EINVAL)

        if result['error']:
            raise CallError(result['message'], errno.EINVAL)

        ticket = result.get('ticketnum')
        url = result.get('message')
        if not ticket:
            raise CallError('New ticket number was not informed', errno.EINVAL)
        job.set_progress(50, f'Ticket created: {ticket}', extra={'ticket': ticket})

        if debug:
            # FIXME: generate debug from middleware
            mntpt, direc, dump = await self.middleware.run_in_thread(debug_get_settings)

            job.set_progress(60, 'Generating debug file')
            await self.middleware.run_in_thread(debug_generate)

            not_freenas = not (await self.middleware.call('system.is_freenas'))
            if not_freenas:
                not_freenas &= await self.middleware.call('notifier.failover_licensed')
            if not_freenas:
                debug_file = f'{direc}/debug.tar'
                debug_name = 'debug-{}.tar'.format(time.strftime('%Y%m%d%H%M%S'))
            else:
                debug_file = dump
                debug_name = 'debug-{}-{}.txz'.format(
                    socket.gethostname().split('.')[0],
                    time.strftime('%Y%m%d%H%M%S'),
                )

            job.set_progress(80, 'Attaching debug file')

            # 20M filesize limit
            if os.path.getsize(debug_file) > 20971520:
                raise CallError('Debug too large to attach', errno.EFBIG)

            t = {
                'ticket': ticket,
                'filename': debug_name,
            }
            if 'user' in data:
                t['username'] = data['user']
            if 'password' in data:
                t['password'] = data['password']
            tjob = await self.middleware.call('support.attach_ticket', t, pipes=Pipes(input=self.middleware.pipe()))

            with open(debug_file, 'rb') as f:
                await self.middleware.run_in_io_thread(shutil.copyfileobj, f, tjob.pipes.input.w)
                await self.middleware.run_in_io_thread(tjob.pipes.input.w.close)

            await tjob.wait()
        else:
            job.set_progress(100)

        return {
            'ticket': ticket,
            'url': url,
        }

    @accepts(Dict(
        'attach_ticket',
        Int('ticket', required=True),
        Str('filename', required=True),
        Str('username'),
        Str('password'),
    ))
    @job(pipes=["input"])
    async def attach_ticket(self, job, data):
        """
        Method to attach a file to a existing ticket.
        """

        sw_name = 'freenas' if await self.middleware.call('system.is_freenas') else 'truenas'

        if 'username' in data:
            data['user'] = data.pop('username')
        data['ticketnum'] = data.pop('ticket')
        filename = data.pop('filename')

        try:
            r = await self.middleware.run_in_io_thread(lambda: requests.post(
                f'https://{ADDRESS}/{sw_name}/api/v1.0/ticket/attachment',
                data=data,
                timeout=10,
                files={'file': (filename, job.pipes.input.r)},
            ))
            data = r.json()
        except simplejson.JSONDecodeError:
            self.logger.debug(f'Failed to decode ticket attachment response: {r.text}')
            raise CallError('Invalid proxy server response', errno.EBADMSG)
        except requests.ConnectionError as e:
            raise CallError(f'Connection error {e}', errno.EBADF)
        except requests.Timeout:
            raise CallError('Connection time out', errno.ETIMEDOUT)

        if data['error']:
            raise CallError(data['message'], errno.EINVAL)
Пример #18
0
class BackupService(CRUDService):
    @filterable
    async def query(self, filters=None, options=None):
        return await self.middleware.call('datastore.query', 'tasks.cloudsync',
                                          filters, options)

    async def _clean_credential(self, verrors, name, data):

        credential = await self.middleware.call(
            'datastore.query', 'system.cloudcredentials',
            [('id', '=', data['credential'])], {'get': True})
        if credential is None:
            verrors.add(f'{name}.credential',
                        f'Credential {data["credential"]} not found',
                        errno.ENOENT)
            return

        if credential['provider'] == 'AMAZON':
            data['attributes']['region'] = await self.middleware.call(
                'backup.s3.get_bucket_location', credential['id'],
                data['attributes']['bucket'])
        elif credential['provider'] in ('AZURE', 'BACKBLAZE', 'GCLOUD'):
            # AZURE|BACKBLAZE|GCLOUD does not need validation nor new data at this stage
            pass
        else:
            verrors.add(f'{name}.provider',
                        f'Invalid provider: {credential["provider"]}')

        if credential['provider'] == 'AMAZON':
            if data['attributes'].get('encryption') not in (None, 'AES256'):
                verrors.add(f'{name}.attributes.encryption',
                            'Encryption should be null or "AES256"')

    @accepts(
        Dict(
            'backup',
            Str('description'),
            Str('direction', enum=['PUSH', 'PULL']),
            Str('transfer_mode', enum=['SYNC', 'COPY', 'MOVE']),
            Str('path'),
            Int('credential'),
            Str('minute'),
            Str('hour'),
            Str('daymonth'),
            Str('dayweek'),
            Str('month'),
            Dict('attributes', additional_attrs=True),
            Bool('enabled'),
            register=True,
        ))
    async def do_create(self, data):
        """
        Creates a new backup entry.

        .. examples(websocket)::

          Create a new backup using amazon s3 attributes, which is supposed to run every hour.

            :::javascript
            {
              "id": "6841f242-840a-11e6-a437-00e04d680384",
              "msg": "method",
              "method": "backup.create",
              "params": [{
                "description": "s3 sync",
                "path": "/mnt/tank",
                "credential": 1,
                "minute": "00",
                "hour": "*",
                "daymonth": "*",
                "month": "*",
                "attributes": {
                  "bucket": "mybucket",
                  "folder": ""
                },
                "enabled": true
              }]
            }
        """

        verrors = ValidationErrors()

        await self._clean_credential(verrors, 'backup', data)

        if verrors:
            raise verrors

        pk = await self.middleware.call('datastore.insert', 'tasks.cloudsync',
                                        data)
        await self.middleware.call('notifier.restart', 'cron')
        return pk

    @accepts(Int('id'),
             Patch('backup', 'backup_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        """
        Updates the backup entry `id` with `data`.
        """
        backup = await self.middleware.call(
            'datastore.query',
            'tasks.cloudsync',
            [('id', '=', id)],
            {'get': True},
        )
        assert backup is not None
        # credential is a foreign key for now
        if backup['credential']:
            backup['credential'] = backup['credential']['id']

        backup.update(data)

        verrors = ValidationErrors()

        await self._clean_credential(verrors, 'backup_update', backup)

        if verrors:
            raise verrors

        await self.middleware.call('datastore.update', 'tasks.cloudsync', id,
                                   backup)
        await self.middleware.call('notifier.restart', 'cron')
        return id

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Deletes backup entry `id`.
        """
        await self.middleware.call('datastore.delete', 'tasks.cloudsync', id)
        await self.middleware.call('notifier.restart', 'cron')

    @item_method
    @accepts(Int('id'))
    @job(lock=lambda args: 'backup:{}'.format(args[-1]))
    async def sync(self, job, id):
        """
        Run the backup job `id`, syncing the local data to remote.
        """

        backup = await self.middleware.call('datastore.query',
                                            'tasks.cloudsync',
                                            [('id', '=', id)], {'get': True})
        if not backup:
            raise ValueError("Unknown id")

        credential = await self.middleware.call(
            'datastore.query', 'system.cloudcredentials',
            [('id', '=', backup['credential']['id'])], {'get': True})
        if not credential:
            raise ValueError("Backup credential not found.")

        return await self._call_provider_method(credential['provider'], 'sync',
                                                job, backup, credential)

    @accepts(Int('credential_id'), Str('bucket'), Str('path'))
    async def is_dir(self, credential_id, bucket, path):
        credential = await self.middleware.call('datastore.query',
                                                'system.cloudcredentials',
                                                [('id', '=', credential_id)],
                                                {'get': True})
        if not credential:
            raise ValueError("Backup credential not found.")

        return await self._call_provider_method(credential['provider'],
                                                'is_dir', credential_id,
                                                bucket, path)

    @private
    async def _call_provider_method(self, provider, method, *args, **kwargs):
        try:
            plugin = {
                'AMAZON': 's3',
                'AZURE': 'azure',
                'BACKBLAZE': 'b2',
                'GCLOUD': 'gcs',
            }[provider]
        except KeyError:
            raise NotImplementedError(f'Unsupported provider: {provider}')

        return await self.middleware.call(f'backup.{plugin}.{method}', *args,
                                          **kwargs)
Пример #19
0
class VMService(Service, VMSupervisorMixin):

    ZFS_ARC_MAX_INITIAL = None

    @private
    async def get_initial_arc_max(self):
        if osc.IS_FREEBSD:
            tunable = await self.middleware.call(
                'tunable.query',
                [['type', '=', 'SYSCTL'], ['var', '=', 'vfs.zfs.arc.max']])
            if tunable and str(tunable[0]['value']).isdigit():
                return int(tunable[0]['value'])
        return self.ZFS_ARC_MAX_INITIAL

    @private
    async def wait_for_libvirtd(self, timeout):
        async def libvirtd_started(middleware):
            await middleware.call('service.start', 'libvirtd')
            while not await middleware.call('service.started', 'libvirtd'):
                await asyncio.sleep(2)

        try:
            if not await self.middleware.call('service.started', 'libvirtd'):
                await asyncio.wait_for(libvirtd_started(self.middleware),
                                       timeout=timeout)
            # We want to do this before initializing libvirt connection
            self._open()
            await self.middleware.call('vm.setup_libvirt_events')
        except (asyncio.TimeoutError, CallError):
            self.middleware.logger.error('Failed to connect to libvirtd')

    @private
    def setup_libvirt_connection(self, timeout=30):
        self.middleware.call_sync(f'vm.initialize_{osc.SYSTEM.lower()}')
        self.middleware.call_sync('vm.wait_for_libvirtd', timeout)

    @private
    def initialize_vms(self, timeout=30):
        if self.middleware.call_sync('vm.query'):
            self.setup_libvirt_connection(timeout)
        else:
            return

        # We use datastore.query specifically here to avoid a recursive case where vm.datastore_extend calls
        # status method which in turn needs a vm object to retrieve the libvirt status for the specified VM
        if self._is_connection_alive():
            for vm_data in self.middleware.call_sync('datastore.query',
                                                     'vm.vm'):
                vm_data['devices'] = self.middleware.call_sync(
                    'vm.device.query', [['vm', '=', vm_data['id']]])
                try:
                    self._add_with_vm_data(vm_data)
                except Exception as e:
                    # Whatever happens, we don't want middlewared not booting
                    self.middleware.logger.error(
                        'Unable to setup %r VM object: %s', vm_data['name'],
                        str(e))
        else:
            self.middleware.logger.error(
                'Failed to establish libvirt connection')

    @private
    async def initialize_linux(self):
        pass

    @private
    async def initialize_freebsd(self):
        cp = await run(['/sbin/kldstat'], check=False)
        if cp.returncode:
            self.middleware.logger.error(
                'Failed to retrieve kernel modules: %s', cp.stderr.decode())
            return
        else:
            kldstat = cp.stdout.decode()

        for kmod in ('vmm.ko', 'nmdm.ko'):
            if kmod not in kldstat:
                cp = await run(['/sbin/kldload', kmod[:-3]], check=False)
                if cp.returncode:
                    self.middleware.logger.error('Failed to load %r : %s',
                                                 kmod, cp.stderr.decode())

    @private
    async def start_on_boot(self):
        for vm in await self.middleware.call('vm.query',
                                             [('autostart', '=', True)]):
            try:
                await self.middleware.call('vm.start', vm['id'])
            except Exception as e:
                self.middleware.logger.debug(
                    f'Failed to start VM {vm["name"]}: {e}')

    @private
    @accepts(
        Dict(
            'deinitialize_vms_options',
            Bool('stop_libvirt', default=True),
        ))
    async def deinitialize_vms(self, options):
        await self.middleware.call('vm.close_libvirt_connection')
        if options['stop_libvirt']:
            await self.middleware.call('service.stop', 'libvirtd')

    @private
    def close_libvirt_connection(self):
        if self.LIBVIRT_CONNECTION:
            with contextlib.suppress(CallError):
                self._close()

    @private
    def setup_details(self):
        return {
            'connected': self._is_connection_alive(),
            'connection_initialised': bool(self.LIBVIRT_CONNECTION),
            'domains': list(self.vms.keys()),
        }

    @private
    async def terminate(self):
        async with SHUTDOWN_LOCK:
            await self.middleware.call('vm.deinitialize_vms',
                                       {'stop_libvirt': False})

    @private
    async def terminate_timeout(self):
        return max(map(lambda v: v['shutdown_timeout'], await
                       self.middleware.call('vm.query')),
                   default=10)

    @private
    async def update_zfs_arc_max_initial(self):
        self.ZFS_ARC_MAX_INITIAL = await self.middleware.call(
            'sysctl.get_arc_max')
Пример #20
0
class OpenVPNServerService(SystemServiceService):

    class Config:
        namespace = 'openvpn.server'
        service = 'openvpn_server'
        service_model = 'openvpnserver'
        service_verb = 'restart'
        datastore_extend = 'openvpn.server.server_extend'

    @private
    async def server_extend(self, data):
        data['server_certificate'] = None if not data['server_certificate'] else data['server_certificate']['id']
        data['root_ca'] = None if not data['root_ca'] else data['root_ca']['id']
        data['tls_crypt_auth_enabled'] = bool(data['tls_crypt_auth'])
        return data

    @private
    async def config_valid(self):
        config = await self.config()
        if not config['root_ca']:
            raise CallError('Please configure root_ca first.')
        else:
            if not await self.middleware.call(
                'certificateauthority.query', [
                    ['id', '=', config['root_ca']],
                    ['revoked', '=', False]
                ]
            ):
                raise CallError('Root CA has been revoked. Please select another Root CA.')

        if not config['server_certificate']:
            raise CallError('Please configure server certificate first.')
        else:
            if not await self.middleware.call(
                'certificate.query', [
                    ['id', '=', config['server_certificate']],
                    ['revoked', '=', False]
                ]
            ):
                raise CallError('Server certificate has been revoked. Please select another Server certificate.')

        if not await self.validate_nobind(config):
            raise CallError(
                'Please enable "nobind" on OpenVPN Client to concurrently run OpenVPN Server/Client '
                'on the same local port without any issues.'
            )

    @accepts()
    async def authentication_algorithm_choices(self):
        """
        Returns a dictionary of valid authentication algorithms which can be used with OpenVPN server.
        """
        return OpenVPN.digests()

    @accepts()
    async def cipher_choices(self):
        """
        Returns a dictionary of valid ciphers which can be used with OpenVPN server.
        """
        return OpenVPN.ciphers()

    @private
    async def validate(self, data, schema_name):
        verrors, data = await OpenVPN.common_validation(
            self.middleware, data, schema_name, 'server'
        )

        if not await self.validate_nobind(data):
            verrors.add(
                f'{schema_name}.nobind',
                'Please enable "nobind" on OpenVPN Client to concurrently run OpenVPN Server/Client '
                'on the same local port without any issues.'
            )

        if ipaddress.ip_address(data['server']).version == 4 and data['netmask'] > 32:
            verrors.add(
                f'{schema_name}.netmask',
                'For IPv4 server addresses please provide a netmask value from 0-32.'
            )

        verrors.check()

        return data

    @private
    async def validate_nobind(self, config):
        client_config = await self.middleware.call('openvpn.client.config')
        if (
            await self.middleware.call(
                'service.started',
                'openvpn_client'
            ) and config['port'] == client_config['port'] and not client_config['nobind']
        ):
            return False
        else:
            return True

    @private
    async def generate_static_key(self):
        keyfile = tempfile.NamedTemporaryFile(mode='w+', dir='/tmp/')
        await run(
            ['openvpn', '--genkey', '--secret', keyfile.name]
        )
        keyfile.seek(0)
        key = keyfile.read()
        keyfile.close()
        return key.strip()

    @accepts()
    async def renew_static_key(self):
        """
        Reset OpenVPN server's TLS static key which will be used to encrypt/authenticate control channel packets.
        """
        return await self.update({
            'tls_crypt_auth': (await self.generate_static_key()),
            'tls_crypt_auth_enabled': True
        })

    @accepts(
        Int('client_certificate_id'),
        Str('server_address', null=True)
    )
    async def client_configuration_generation(self, client_certificate_id, server_address=None):
        """
        Returns a configuration for OpenVPN client which can be used with any client to connect to FN/TN OpenVPN
        server.

        `client_certificate_id` should be a valid certificate issued for use with OpenVPN client service.

        `server_address` if specified auto-fills the remote directive in the OpenVPN configuration enabling the end
        user to use the file without making any edits to connect to OpenVPN server.
        """
        await self.config_valid()
        config = await self.config()
        root_ca = await self.middleware.call(
            'certificateauthority.query', [
                ['id', '=', config['root_ca']]
            ], {
                'get': True
            }
        )
        client_cert = await self.middleware.call(
            'certificate.query', [
                ['id', '=', client_certificate_id],
                ['revoked', '=', False]
            ]
        )
        if not client_cert:
            raise CallError(
                'Please provide a client certificate id for a certificate which exists on '
                'the system and hasn\'t been marked as revoked.'
            )
        else:
            client_cert = client_cert[0]
            if (
                await OpenVPN.common_validation(
                    self.middleware, {
                        **config,
                        'client_certificate': client_certificate_id
                    }, '', 'client'
                )
            )[0]:
                raise CallError(
                    'Please ensure provided client certificate exists in Root CA chain '
                    'and has necessary extensions set.'
                )

        client_config = [
            'client',
            f'dev {config["device_type"].lower()}',
            f'proto {config["protocol"].lower()}',
            f'port {config["port"]}',
            f'remote "{server_address or "PLEASE FILL OUT SERVER DOMAIN/IP HERE"}"',
            'user nobody',
            'group nobody',
            'persist-key',
            'persist-tun',
            '<ca>',
            f'{root_ca["certificate"]}',
            '</ca>',
            '<cert>',
            client_cert['certificate'],
            '</cert>',
            '<key>',
            client_cert['privatekey'],
            '</key>',
            'verb 3',
            'remote-cert-tls server',
            f'compress {config["compression"].lower()}' if config['compression'] else None,
            f'auth {config["authentication_algorithm"]}' if config['authentication_algorithm'] else None,
            f'cipher {config["cipher"]}' if config['cipher'] else None,
        ]

        if config['tls_crypt_auth_enabled']:
            client_config.extend([
                '<tls-crypt>',
                config['tls_crypt_auth'],
                '</tls-crypt>'
            ])

        return '\n'.join(filter(bool, client_config)).strip()

    @accepts(
        Dict(
            'openvpn_server_update',
            Bool('tls_crypt_auth_enabled'),
            Int('netmask', validators=[Range(min=0, max=128)]),
            Int('server_certificate'),
            Int('port', validators=[Port()]),
            Int('root_ca'),
            IPAddr('server'),
            Str('additional_parameters'),
            Str('authentication_algorithm', null=True),
            Str('cipher', null=True),
            Str('compression', null=True, enum=['LZO', 'LZ4']),
            Str('device_type', enum=['TUN', 'TAP']),
            Str('protocol', enum=['UDP', 'TCP']),
            Str('tls_crypt_auth', null=True),
            Str('topology', null=True, enum=['NET30', 'P2P', 'SUBNET']),
            update=True
        )
    )
    async def do_update(self, data):
        """
        Update OpenVPN Server configuration.

        When `tls_crypt_auth_enabled` is enabled and `tls_crypt_auth` not provided, a static key is automatically
        generated to be used with OpenVPN server.
        """
        old_config = await self.config()
        config = old_config.copy()

        config.update(data)

        # If tls_crypt_auth_enabled is set and we don't have a tls_crypt_auth key,
        # let's generate one please
        if config['tls_crypt_auth_enabled'] and not config['tls_crypt_auth']:
            config['tls_crypt_auth'] = await self.generate_static_key()

        config = await self.validate(config, 'openvpn_server_update')

        await self._update_service(old_config, config)

        return await self.config()
Пример #21
0
class SystemGeneralService(ConfigService):
    class Config:
        namespace = 'system.general'
        datastore = 'system.settings'
        datastore_prefix = 'stg_'
        datastore_extend = 'system.general.general_system_extend'

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._language_choices = self._initialize_languages()
        self._timezone_choices = None
        self._kbdmap_choices = None
        self._country_choices = {}

    @private
    async def general_system_extend(self, data):
        keys = data.keys()
        for key in keys:
            if key.startswith('gui'):
                data['ui_' + key[3:]] = data.pop(key)

        data['sysloglevel'] = data['sysloglevel'].upper()
        if data['ui_certificate']:
            data['ui_certificate'] = await self.middleware.call(
                'certificate.query',
                [['id', '=', data['ui_certificate']['id']]], {'get': True})
        return data

    @accepts()
    def language_choices(self):
        return self._language_choices

    @private
    def _initialize_languages(self):
        languagues = [
            ('af', 'Afrikaans'),
            ('ar', 'Arabic'),
            ('ast', 'Asturian'),
            ('az', 'Azerbaijani'),
            ('bg', 'Bulgarian'),
            ('be', 'Belarusian'),
            ('bn', 'Bengali'),
            ('br', 'Breton'),
            ('bs', 'Bosnian'),
            ('ca', 'Catalan'),
            ('cs', 'Czech'),
            ('cy', 'Welsh'),
            ('da', 'Danish'),
            ('de', 'German'),
            ('dsb', 'Lower Sorbian'),
            ('el', 'Greek'),
            ('en', 'English'),
            ('en-au', 'Australian English'),
            ('en-gb', 'British English'),
            ('eo', 'Esperanto'),
            ('es', 'Spanish'),
            ('es-ar', 'Argentinian Spanish'),
            ('es-co', 'Colombian Spanish'),
            ('es-mx', 'Mexican Spanish'),
            ('es-ni', 'Nicaraguan Spanish'),
            ('es-ve', 'Venezuelan Spanish'),
            ('et', 'Estonian'),
            ('eu', 'Basque'),
            ('fa', 'Persian'),
            ('fi', 'Finnish'),
            ('fr', 'French'),
            ('fy', 'Frisian'),
            ('ga', 'Irish'),
            ('gd', 'Scottish Gaelic'),
            ('gl', 'Galician'),
            ('he', 'Hebrew'),
            ('hi', 'Hindi'),
            ('hr', 'Croatian'),
            ('hsb', 'Upper Sorbian'),
            ('hu', 'Hungarian'),
            ('ia', 'Interlingua'),
            ('id', 'Indonesian'),
            ('io', 'Ido'),
            ('is', 'Icelandic'),
            ('it', 'Italian'),
            ('ja', 'Japanese'),
            ('ka', 'Georgian'),
            ('kab', 'Kabyle'),
            ('kk', 'Kazakh'),
            ('km', 'Khmer'),
            ('kn', 'Kannada'),
            ('ko', 'Korean'),
            ('lb', 'Luxembourgish'),
            ('lt', 'Lithuanian'),
            ('lv', 'Latvian'),
            ('mk', 'Macedonian'),
            ('ml', 'Malayalam'),
            ('mn', 'Mongolian'),
            ('mr', 'Marathi'),
            ('my', 'Burmese'),
            ('nb', 'Norwegian Bokmål'),
            ('ne', 'Nepali'),
            ('nl', 'Dutch'),
            ('nn', 'Norwegian Nynorsk'),
            ('os', 'Ossetic'),
            ('pa', 'Punjabi'),
            ('pl', 'Polish'),
            ('pt', 'Portuguese'),
            ('pt-br', 'Brazilian Portuguese'),
            ('ro', 'Romanian'),
            ('ru', 'Russian'),
            ('sk', 'Slovak'),
            ('sl', 'Slovenian'),
            ('sq', 'Albanian'),
            ('sr', 'Serbian'),
            ('sr-latn', 'Serbian Latin'),
            ('sv', 'Swedish'),
            ('sw', 'Swahili'),
            ('ta', 'Tamil'),
            ('te', 'Telugu'),
            ('th', 'Thai'),
            ('tr', 'Turkish'),
            ('tt', 'Tatar'),
            ('udm', 'Udmurt'),
            ('uk', 'Ukrainian'),
            ('ur', 'Urdu'),
            ('vi', 'Vietnamese'),
            ('zh-hans', 'Simplified Chinese'),
            ('zh-hant', 'Traditional Chinese'),
        ]
        return dict(languagues)

    @private
    async def _initialize_timezone_choices(self):
        pipe = await Popen(
            'find /usr/share/zoneinfo/ -type f -not -name zone.tab -not -regex \'.*/Etc/GMT.*\'',
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
            shell=True)
        self._timezone_choices = (
            await pipe.communicate())[0].decode().strip().split('\n')
        self._timezone_choices = {
            x[20:]: x[20:]
            for x in self._timezone_choices
        }

    @accepts()
    async def timezone_choices(self):
        if not self._timezone_choices:
            await self._initialize_timezone_choices()
        return self._timezone_choices

    @accepts()
    async def country_choices(self):
        if not self._country_choices:
            await self._initialize_country_choices()
        return self._country_choices

    @private
    async def _initialize_country_choices(self):
        def _get_index(country_columns, column):
            index = -1

            i = 0
            for c in country_columns:
                if c.lower() == column.lower():
                    index = i
                    break

                i += 1

            return index

        country_file = '/etc/iso_3166_2_countries.csv'
        cni, two_li = None, None
        with open(country_file, 'r', encoding='utf-8') as csvfile:
            reader = csv.reader(csvfile)

            for index, row in enumerate(reader):
                if index != 0:
                    if row[cni] and row[two_li]:
                        if row[two_li] in self._country_choices:
                            # If two countries in the iso file have the same key, we concatenate their names
                            self._country_choices[
                                row[two_li]] += f' + {row[cni]}'
                        else:
                            self._country_choices[row[two_li]] = row[cni]
                else:
                    # ONLY CNI AND TWO_LI ARE BEING CONSIDERED FROM THE CSV
                    cni = _get_index(row, 'Common Name')
                    two_li = _get_index(row, 'ISO 3166-1 2 Letter Code')

    @private
    async def _initialize_kbdmap_choices(self):
        """Populate choices from /usr/share/vt/keymaps/INDEX.keymaps"""
        index = "/usr/share/vt/keymaps/INDEX.keymaps"

        if not os.path.exists(index):
            return []
        with open(index, 'rb') as f:
            d = f.read().decode('utf8', 'ignore')
        _all = re.findall(r'^(?P<name>[^#\s]+?)\.kbd:en:(?P<desc>.+)$', d,
                          re.M)
        self._kbdmap_choices = {name: desc for name, desc in _all}

    @accepts()
    async def kbdmap_choices(self):
        if not self._kbdmap_choices:
            await self._initialize_kbdmap_choices()
        return self._kbdmap_choices

    @private
    async def validate_general_settings(self, data, schema):
        verrors = ValidationErrors()

        language = data.get('language')
        if language:
            system_languages = self.language_choices()
            if language not in system_languages.keys():
                verrors.add(
                    f'{schema}.language',
                    f'Specified "{language}" language not found, kindly correct it'
                )

        # kbd map needs work

        timezone = data.get('timezone')
        if timezone:
            timezones = await self.timezone_choices()
            if timezone not in timezones:
                verrors.add(f'{schema}.timezone',
                            'Please select a correct timezone')

        ip_addresses = await self.middleware.call('interface.ip_in_use')
        ip4_addresses_list = [
            alias_dict['address'] for alias_dict in ip_addresses
            if alias_dict['type'] == 'INET'
        ]
        ip6_addresses_list = [
            alias_dict['address'] for alias_dict in ip_addresses
            if alias_dict['type'] == 'INET6'
        ]

        ip4_addresses = data.get('ui_address')
        for ip4_address in ip4_addresses:
            if (ip4_address and ip4_address != '0.0.0.0'
                    and ip4_address not in ip4_addresses_list):
                verrors.add(
                    f'{schema}.ui_address',
                    f'{ip4_address} ipv4 address is not associated with this machine'
                )

        ip6_addresses = data.get('ui_v6address')
        for ip6_address in ip6_addresses:
            if (ip6_address and ip6_address != '::'
                    and ip6_address not in ip6_addresses_list):
                verrors.add(
                    f'{schema}.ui_v6address',
                    f'{ip6_address} ipv6 address is not associated with this machine'
                )

        for key, wildcard, ips in [('ui_address', '0.0.0.0', ip4_addresses),
                                   ('ui_v6address', '::', ip6_addresses)]:
            if wildcard in ips and len(ips) > 1:
                verrors.add(
                    f'{schema}.{key}',
                    f'When "{wildcard}" has been selected, selection of other addresses is not allowed'
                )

        syslog_server = data.get('syslogserver')
        if syslog_server:
            match = re.match(r"^[\w\.\-]+(\:\d+)?$", syslog_server)
            if not match:
                verrors.add(f'{schema}.syslogserver',
                            'Invalid syslog server format')
            elif ':' in syslog_server:
                port = int(syslog_server.split(':')[-1])
                if port < 0 or port > 65535:
                    verrors.add(f'{schema}.syslogserver',
                                'Port specified should be between 0 - 65535')

        certificate_id = data.get('ui_certificate')
        cert = await self.middleware.call('certificate.query',
                                          [["id", "=", certificate_id]])
        if not cert:
            verrors.add(
                f'{schema}.ui_certificate',
                'Please specify a valid certificate which exists in the system'
            )
        else:
            cert = cert[0]
            verrors.extend(await self.middleware.call(
                'certificate.cert_services_validation', certificate_id,
                f'{schema}.ui_certificate', False))

            if cert['fingerprint']:
                syslog.openlog(logoption=syslog.LOG_PID,
                               facility=syslog.LOG_USER)
                syslog.syslog(
                    syslog.LOG_ERR,
                    'Fingerprint of the certificate used in UI : ' +
                    cert['fingerprint'])
                syslog.closelog()

        return verrors

    @accepts(
        Dict(
            'general_settings',
            Int('ui_certificate', null=True),
            Int('ui_httpsport', validators=[Range(min=1, max=65535)]),
            Bool('ui_httpsredirect'),
            Int('ui_port', validators=[Range(min=1, max=65535)]),
            List('ui_address', items=[IPAddr('addr')], empty=False),
            List('ui_v6address', items=[IPAddr('addr')], empty=False),
            Str('kbdmap'),
            Str('language'),
            Str('sysloglevel',
                enum=[
                    'F_EMERG', 'F_ALERT', 'F_CRIT', 'F_ERR', 'F_WARNING',
                    'F_NOTICE', 'F_INFO', 'F_DEBUG', 'F_IS_DEBUG'
                ]),
            Str('syslogserver'),
            Str('timezone'),
            update=True,
        ))
    async def do_update(self, data):
        config = await self.config()
        config['ui_certificate'] = config['ui_certificate']['id'] if config[
            'ui_certificate'] else None
        new_config = config.copy()
        new_config.update(data)

        verrors = await self.validate_general_settings(
            new_config, 'general_settings_update')
        if verrors:
            raise verrors

        # Converting new_config to map the database table fields
        new_config['sysloglevel'] = new_config['sysloglevel'].lower()
        keys = new_config.keys()
        for key in list(keys):
            if key.startswith('ui_'):
                new_config['gui' + key[3:]] = new_config.pop(key)

        await self.middleware.call('datastore.update', self._config.datastore,
                                   config['id'], new_config,
                                   {'prefix': 'stg_'})

        # case insensitive comparison should be performed for sysloglevel
        if (config['sysloglevel'].lower() != new_config['sysloglevel'].lower()
                or config['syslogserver'] != new_config['syslogserver']):
            await self.middleware.call('service.restart', 'syslogd')

        if config['timezone'] != new_config['timezone']:
            await self.middleware.call('zettarepl.update_timezone',
                                       new_config['timezone'])
            await self.middleware.call('service.reload', 'timeservices')
            await self.middleware.call('service.restart', 'cron')

        await self.middleware.call('service.start', 'ssl')

        return await self.config()

    @accepts()
    async def local_url(self):
        config = await self.middleware.call('system.general.config')

        if config['ui_certificate']:
            protocol = 'https'
            port = config['ui_httpsport']
        else:
            protocol = 'http'
            port = config['ui_port']

        if '0.0.0.0' in config['ui_address'] or '127.0.0.1' in config[
                'ui_address']:
            hosts = ['127.0.0.1']
        else:
            hosts = config['ui_address']

        errors = []
        for host in hosts:
            try:
                reader, writer = await asyncio.wait_for(
                    asyncio.open_connection(
                        host,
                        port=port,
                    ), timeout=5)
                writer.close()

                return f'{protocol}://{host}:{port}'

            except Exception as e:
                errors.append(f'{host}: {e}')

        raise CallError(
            'Unable to connect to any of the specified UI addresses:\n' +
            '\n'.join(errors))
Пример #22
0
class OpenVPNClientService(SystemServiceService):

    class Config:
        namespace = 'openvpn.client'
        service = 'openvpn_client'
        service_model = 'openvpnclient'
        service_verb = 'restart'
        datastore_extend = 'openvpn.client.client_extend'

    @private
    async def client_extend(self, data):
        data['client_certificate'] = None if not data['client_certificate'] else data['client_certificate']['id']
        data['root_ca'] = None if not data['root_ca'] else data['root_ca']['id']
        data['tls_crypt_auth_enabled'] = bool(data['tls_crypt_auth'])
        return data

    @accepts()
    async def authentication_algorithm_choices(self):
        """
        Returns a dictionary of valid authentication algorithms which can be used with OpenVPN server.
        """
        return OpenVPN.digests()

    @accepts()
    async def cipher_choices(self):
        """
        Returns a dictionary of valid ciphers which can be used with OpenVPN server.
        """
        return OpenVPN.ciphers()

    @private
    async def validate(self, data, schema_name):
        verrors, data = await OpenVPN.common_validation(
            self.middleware, data, schema_name, 'client'
        )

        if not data.get('remote'):
            verrors.add(
                f'{schema_name}.remote',
                'This field is required.'
            )

        if not await self.validate_nobind(data):
            verrors.add(
                f'{schema_name}.nobind',
                'Please enable this to concurrently run OpenVPN Server/Client on the same local port.'
            )

        verrors.check()

        return data

    @private
    async def validate_nobind(self, config):
        if (
            await self.middleware.call(
                'service.started',
                'openvpn_server'
            ) and config['port'] == (
                await self.middleware.call('openvpn.server.config')
            )['port'] and not config['nobind']
        ):
            return False
        else:
            return True

    @private
    async def config_valid(self):
        config = await self.config()
        if not config['root_ca']:
            raise CallError('Please configure root_ca first.')
        else:
            if not await self.middleware.call(
                'certificateauthority.query', [
                    ['id', '=', config['root_ca']],
                    ['revoked', '=', False]
                ]
            ):
                raise CallError('Root CA has been revoked. Please select another Root CA.')

        if not config['client_certificate']:
            raise CallError('Please configure client certificate first.')
        else:
            if not await self.middleware.call(
                'certificate.query', [
                    ['id', '=', config['client_certificate']],
                    ['revoked', '=', False]
                ]
            ):
                raise CallError('Client certificate has been revoked. Please select another Client certificate.')

        if not config['remote']:
            raise CallError('Please configure remote first.')

        if not await self.validate_nobind(config):
            raise CallError(
                'Please enable "nobind" to concurrently run OpenVPN Server/Client on the same local port.'
            )

    @accepts(
        Dict(
            'openvpn_client_update',
            Bool('nobind'),
            Bool('tls_crypt_auth_enabled'),
            Int('client_certificate'),
            Int('root_ca'),
            Int('port', validators=[Port()]),
            Str('additional_parameters'),
            Str('authentication_algorithm', null=True),
            Str('cipher', null=True),
            Str('compression', null=True, enum=['LZO', 'LZ4']),
            Str('device_type', enum=['TUN', 'TAP']),
            Str('protocol', enum=['UDP', 'TCP']),
            Str('remote'),
            Str('tls_crypt_auth', null=True),
            update=True
        )
    )
    async def do_update(self, data):
        """
        Update OpenVPN Client configuration.

        `remote` can be a valid ip address / domain which openvpn will try to connect to.

        `nobind` must be enabled if OpenVPN client / server are to run concurrently.
        """
        old_config = await self.config()
        config = old_config.copy()

        config.update(data)

        config = await self.validate(config, 'openvpn_client_update')

        await self._update_service(old_config, config)

        return await self.config()
Пример #23
0
class UserService(CRUDService):
    class Config:
        datastore = 'account.bsdusers'
        datastore_extend = 'user.user_extend'
        datastore_prefix = 'bsdusr_'

    @private
    async def user_extend(self, user):

        # Normalize email, empty is really null
        if user['email'] == '':
            user['email'] = None

        # Get group membership
        user['groups'] = [
            gm['group']['id'] for gm in await self.middleware.call(
                'datastore.query', 'account.bsdgroupmembership', [(
                    'user', '=', user['id'])], {'prefix': 'bsdgrpmember_'})
        ]

        # Get authorized keys
        keysfile = f'{user["home"]}/.ssh/authorized_keys'
        user['sshpubkey'] = None
        if os.path.exists(keysfile):
            try:
                with open(keysfile, 'r') as f:
                    user['sshpubkey'] = f.read()
            except Exception:
                pass
        return user

    @private
    async def user_compress(self, user):
        if 'local' in user:
            user.pop('local')
        if 'id_type_both' in user:
            user.pop('id_type_both')
        return user

    @filterable
    async def query(self, filters=None, options=None):
        """
        Query users with `query-filters` and `query-options`. As a performance optimization, only local users
        will be queried by default.

        Users from directory services such as NIS, LDAP, or Active Directory will be included in query results
        if the option `{'extra': {'search_dscache': True}}` is specified.
        """
        if not filters:
            filters = []

        options = options or {}
        options['extend'] = self._config.datastore_extend
        options['extend_context'] = self._config.datastore_extend_context
        options['prefix'] = self._config.datastore_prefix

        datastore_options = options.copy()
        datastore_options.pop('count', None)
        datastore_options.pop('get', None)

        extra = options.get('extra', {})
        dssearch = extra.pop('search_dscache', False)

        if dssearch:
            return await self.middleware.call('dscache.query', 'USERS',
                                              filters, options)

        result = await self.middleware.call('datastore.query',
                                            self._config.datastore, [],
                                            datastore_options)
        for entry in result:
            entry.update({'local': True, 'id_type_both': False})
        return await self.middleware.run_in_thread(filter_list, result,
                                                   filters, options)

    @accepts(
        Dict(
            'user_create',
            Int('uid'),
            Str('username', required=True, max_length=16),
            Int('group'),
            Bool('group_create', default=False),
            Str('home', default='/nonexistent'),
            Str('home_mode', default='755'),
            Str('shell', default='/bin/csh' if IS_FREEBSD else '/usr/bin/zsh'),
            Str('full_name', required=True),
            Str('email', validators=[Email()], null=True, default=None),
            Str('password', private=True),
            Bool('password_disabled', default=False),
            Bool('locked', default=False),
            Bool('microsoft_account', default=False),
            Bool('smb', default=True),
            Bool('sudo', default=False),
            Str('sshpubkey', null=True, max_length=None),
            List('groups', default=[]),
            Dict('attributes', additional_attrs=True),
            register=True,
        ))
    async def do_create(self, data):
        """
        Create a new user.

        If `uid` is not provided it is automatically filled with the next one available.

        `group` is required if `group_create` is false.

        `password` is required if `password_disabled` is false.

        Available choices for `shell` can be retrieved with `user.shell_choices`.

        `attributes` is a general-purpose object for storing arbitrary user information.

        `smb` specifies whether the user should be allowed access to SMB shares. User
        willl also automatically be added to the `builtin_users` group.
        """
        verrors = ValidationErrors()

        if (not data.get('group') and not data.get('group_create')) or (
                data.get('group') is not None and data.get('group_create')):
            verrors.add(
                'user_create.group',
                f'Enter either a group name or create a new group to '
                'continue.', errno.EINVAL)

        await self.__common_validation(verrors, data, 'user_create')

        if data.get('sshpubkey') and not data['home'].startswith('/mnt'):
            verrors.add(
                'user_create.sshpubkey',
                'The home directory is not writable. Leave this field blank.')

        verrors.check()

        groups = data.pop('groups')
        create = data.pop('group_create')

        if create:
            group = await self.middleware.call(
                'group.query', [('group', '=', data['username'])])
            if group:
                group = group[0]
            else:
                group = await self.middleware.call('group.create', {
                    'name': data['username'],
                    'smb': False
                })
                group = (await self.middleware.call('group.query',
                                                    [('id', '=', group)]))[0]
            data['group'] = group['id']
        else:
            group = await self.middleware.call('group.query',
                                               [('id', '=', data['group'])])
            if not group:
                raise CallError(f'Group {data["group"]} not found')
            group = group[0]

        if data['smb']:
            groups.append(
                (await self.middleware.call('group.query',
                                            [('group', '=', 'builtin_users')],
                                            {'get': True}))['id'])

        # Is this a new directory or not? Let's not nuke existing directories,
        # e.g. /, /root, /mnt/tank/my-dataset, etc ;).
        new_homedir = False
        home_mode = data.pop('home_mode')
        if data['home'] and data['home'] != '/nonexistent':
            try:
                try:
                    os.makedirs(data['home'], mode=int(home_mode, 8))
                    new_homedir = True
                    await self.middleware.call(
                        'filesystem.setperm', {
                            'path': data['home'],
                            'mode': home_mode,
                            'uid': data['uid'],
                            'gid': group['gid'],
                            'options': {
                                'stripacl': True
                            }
                        })
                except FileExistsError:
                    if not os.path.isdir(data['home']):
                        raise CallError(
                            'Path for home directory already '
                            'exists and is not a directory', errno.EEXIST)

                    # If it exists, ensure the user is owner.
                    await self.middleware.call(
                        'filesystem.chown', {
                            'path': data['home'],
                            'uid': data['uid'],
                            'gid': group['gid'],
                        })
                except OSError as oe:
                    raise CallError('Failed to create the home directory '
                                    f'({data["home"]}) for user: {oe}')
            except Exception:
                if new_homedir:
                    shutil.rmtree(data['home'])
                raise

        if not data.get('uid'):
            data['uid'] = await self.get_next_uid()

        pk = None  # Make sure pk exists to rollback in case of an error
        data = await self.user_compress(data)
        try:
            await self.__set_password(data)
            sshpubkey = data.pop('sshpubkey',
                                 None)  # datastore does not have sshpubkey

            pk = await self.middleware.call('datastore.insert',
                                            'account.bsdusers', data,
                                            {'prefix': 'bsdusr_'})

            await self.__set_groups(pk, groups)

        except Exception:
            if pk is not None:
                await self.middleware.call('datastore.delete',
                                           'account.bsdusers', pk)
            if new_homedir:
                # Be as atomic as possible when creating the user if
                # commands failed to execute cleanly.
                shutil.rmtree(data['home'])
            raise

        await self.middleware.call('service.reload', 'user')

        if data['smb']:
            await self.__set_smbpasswd(data['username'])

        if os.path.exists(data['home']):
            for f in os.listdir(SKEL_PATH):
                if f.startswith('dot'):
                    dest_file = os.path.join(data['home'], f[3:])
                else:
                    dest_file = os.path.join(data['home'], f)
                if not os.path.exists(dest_file):
                    shutil.copyfile(os.path.join(SKEL_PATH, f), dest_file)
                    await self.middleware.call(
                        'filesystem.chown', {
                            'path': dest_file,
                            'uid': data['uid'],
                            'gid': group['gid'],
                            'options': {
                                'recursive': True
                            }
                        })

            data['sshpubkey'] = sshpubkey
            try:
                await self.update_sshpubkey(data['home'], data, group['group'])
            except PermissionError as e:
                self.logger.warn('Failed to update authorized keys',
                                 exc_info=True)
                raise CallError(f'Failed to update authorized keys: {e}')

        return pk

    @accepts(
        Int('id'),
        Patch(
            'user_create',
            'user_update',
            ('attr', {
                'update': True
            }),
            ('rm', {
                'name': 'group_create'
            }),
        ),
    )
    async def do_update(self, pk, data):
        """
        Update attributes of an existing user.
        """

        user = await self._get_instance(pk)

        verrors = ValidationErrors()

        if 'group' in data:
            group = await self.middleware.call('datastore.query',
                                               'account.bsdgroups',
                                               [('id', '=', data['group'])])
            if not group:
                verrors.add('user_update.group',
                            f'Group {data["group"]} not found', errno.ENOENT)
            group = group[0]
        else:
            group = user['group']
            user['group'] = group['id']

        await self.__common_validation(verrors, data, 'user_update', pk=pk)

        home = data.get('home') or user['home']
        has_home = home != '/nonexistent'
        # root user (uid 0) is an exception to the rule
        if data.get('sshpubkey'
                    ) and not home.startswith('/mnt') and user['uid'] != 0:
            verrors.add('user_update.sshpubkey',
                        'Home directory is not writable, leave this blank"')

        # Do not allow attributes to be changed for builtin user
        if user['builtin']:
            for i in ('group', 'home', 'home_mode', 'uid', 'username'):
                if i in data:
                    verrors.add(f'user_update.{i}',
                                'This attribute cannot be changed')

        verrors.check()

        # Copy the home directory if it changed
        if (has_home and 'home' in data and data['home'] != user['home']
                and not data['home'].startswith(f'{user["home"]}/')):
            home_copy = True
            home_old = user['home']
        else:
            home_copy = False

        # After this point user dict has values from data
        user.update(data)

        if home_copy and not os.path.isdir(user['home']):
            try:
                os.makedirs(user['home'])
                await self.middleware.call(
                    'filesystem.chown', {
                        'path': user['home'],
                        'uid': user['uid'],
                        'gid': group['bsdgrp_gid'],
                    })
            except OSError:
                self.logger.warn('Failed to chown homedir', exc_info=True)
            if not os.path.isdir(user['home']):
                raise CallError(f'{user["home"]} is not a directory')

        home_mode = user.pop('home_mode', None)
        if user['builtin']:
            home_mode = None

        def set_home_mode():
            if home_mode is not None:
                try:
                    # Strip ACL before chmod. This is required when aclmode = restricted
                    setfacl = subprocess.run(
                        ['/bin/setfacl', '-b', user['home']], check=False)
                    if setfacl.returncode != 0 and setfacl.stderr:
                        self.logger.debug('Failed to strip ACL: %s',
                                          setfacl.stderr.decode())
                    os.chmod(user['home'], int(home_mode, 8))
                except OSError:
                    self.logger.warn('Failed to set homedir mode',
                                     exc_info=True)

        try:
            update_sshpubkey_args = [
                home_old if home_copy else user['home'],
                user,
                group['bsdgrp_group'],
            ]
            await self.update_sshpubkey(*update_sshpubkey_args)
        except PermissionError as e:
            self.logger.warn('Failed to update authorized keys', exc_info=True)
            raise CallError(f'Failed to update authorized keys: {e}')
        else:
            if user['uid'] == 0:
                if await self.middleware.call('failover.licensed'):
                    try:
                        await self.middleware.call('failover.call_remote',
                                                   'user.update_sshpubkey',
                                                   update_sshpubkey_args)
                    except Exception:
                        self.logger.error(
                            'Failed to sync root ssh pubkey to standby node',
                            exc_info=True)

        if home_copy:

            def do_home_copy():
                try:
                    command = f"/bin/cp -a {shlex.quote(home_old) + '/'} {shlex.quote(user['home'] + '/')}"
                    subprocess.run(
                        ["/usr/bin/su", "-", user["username"], "-c", command],
                        check=True)
                except subprocess.CalledProcessError as e:
                    self.logger.warn(f"Failed to copy homedir: {e}")
                set_home_mode()

            asyncio.ensure_future(self.middleware.run_in_thread(do_home_copy))
        elif has_home:
            asyncio.ensure_future(self.middleware.run_in_thread(set_home_mode))

        user.pop('sshpubkey', None)
        await self.__set_password(user)

        if 'groups' in user:
            groups = user.pop('groups')
            await self.__set_groups(pk, groups)

        user = await self.user_compress(user)
        await self.middleware.call('datastore.update', 'account.bsdusers', pk,
                                   user, {'prefix': 'bsdusr_'})

        await self.middleware.call('service.reload', 'user')
        if user['smb']:
            await self.__set_smbpasswd(user['username'])

        return pk

    @accepts(Int('id'), Dict('options', Bool('delete_group', default=True)))
    async def do_delete(self, pk, options=None):
        """
        Delete user `id`.

        The `delete_group` option deletes the user primary group if it is not being used by
        any other user.
        """

        user = await self._get_instance(pk)

        if user['builtin']:
            raise CallError('Cannot delete a built-in user', errno.EINVAL)

        if options['delete_group'] and not user['group']['bsdgrp_builtin']:
            count = await self.middleware.call(
                'datastore.query', 'account.bsdgroupmembership',
                [('group', '=', user['group']['id'])], {
                    'prefix': 'bsdgrpmember_',
                    'count': True
                })
            count2 = await self.middleware.call(
                'datastore.query', 'account.bsdusers',
                [('group', '=', user['group']['id']), ('id', '!=', pk)], {
                    'prefix': 'bsdusr_',
                    'count': True
                })
            if count == 0 and count2 == 0:
                try:
                    await self.middleware.call('group.delete',
                                               user['group']['id'])
                except Exception:
                    self.logger.warn(
                        f'Failed to delete primary group of {user["username"]}',
                        exc_info=True)

        if user['smb']:
            await run('smbpasswd', '-x', user['username'], check=False)

        # TODO: add a hook in CIFS service
        cifs = await self.middleware.call('datastore.query', 'services.cifs',
                                          [], {'prefix': 'cifs_srv_'})
        if cifs:
            cifs = cifs[0]
            if cifs['guest'] == user['username']:
                await self.middleware.call('datastore.update', 'services.cifs',
                                           cifs['id'], {'guest': 'nobody'},
                                           {'prefix': 'cifs_srv_'})

        await self.middleware.call('datastore.delete', 'account.bsdusers', pk)
        await self.middleware.call('service.reload', 'user')

        return pk

    @accepts(Int('user_id', default=None, null=True))
    def shell_choices(self, user_id=None):
        """
        Return the available shell choices to be used in `user.create` and `user.update`.

        If `user_id` is provided, shell choices are filtered to ensure the user can access the shell choices provided.
        """
        user = self.middleware.call_sync('user.get_instance',
                                         user_id) if user_id else None
        with open('/etc/shells', 'r') as f:
            shells = [x.rstrip() for x in f.readlines() if x.startswith('/')]
        return {
            shell: os.path.basename(shell)
            for shell in (shells + ['/usr/sbin/nologin'])
            if 'netcli' not in shell or (user and user['username'] == 'root')
        }

    @accepts(
        Dict('get_user_obj', Str('username', default=None),
             Int('uid', default=None)))
    async def get_user_obj(self, data):
        """
        Returns dictionary containing information from struct passwd for the user specified by either
        the username or uid. Bypasses user cache.
        """
        return await self.middleware.call('dscache.get_uncached_user',
                                          data['username'], data['uid'])

    @item_method
    @accepts(
        Int('id'),
        Str('key'),
        Any('value'),
    )
    async def set_attribute(self, pk, key, value):
        """
        Set user general purpose `attributes` dictionary `key` to `value`.

        e.g. Setting key="foo" value="var" will result in {"attributes": {"foo": "bar"}}
        """
        user = await self._get_instance(pk)

        user['attributes'][key] = value

        await self.middleware.call('datastore.update', 'account.bsdusers', pk,
                                   {'attributes': user['attributes']},
                                   {'prefix': 'bsdusr_'})

        return True

    @item_method
    @accepts(
        Int('id'),
        Str('key'),
    )
    async def pop_attribute(self, pk, key):
        """
        Remove user general purpose `attributes` dictionary `key`.
        """
        user = await self._get_instance(pk)

        if key in user['attributes']:
            user['attributes'].pop(key)

            await self.middleware.call('datastore.update', 'account.bsdusers',
                                       pk, {'attributes': user['attributes']},
                                       {'prefix': 'bsdusr_'})
            return True
        else:
            return False

    @accepts()
    async def get_next_uid(self):
        """
        Get the next available/free uid.
        """
        last_uid = 999
        for i in await self.middleware.call('datastore.query',
                                            'account.bsdusers',
                                            [('builtin', '=', False)], {
                                                'order_by': ['uid'],
                                                'prefix': 'bsdusr_'
                                            }):
            # If the difference between the last uid and the current one is
            # bigger than 1, it means we have a gap and can use it.
            if i['uid'] - last_uid > 1:
                return last_uid + 1
            last_uid = i['uid']
        return last_uid + 1

    @no_auth_required
    @accepts()
    async def has_root_password(self):
        """
        Return whether the root user has a valid password set.

        This is used when the system is installed without a password and must be set on
        first use/login.
        """
        return (await self.middleware.call(
            'datastore.query', 'account.bsdusers', [
                ('bsdusr_username', '=', 'root')
            ], {'get': True}))['bsdusr_unixhash'] != '*'

    @no_auth_required
    @accepts(Str('password'),
             Dict(
                 'options',
                 Dict(
                     'ec2',
                     Str('instance_id', required=True),
                 ),
                 update=True,
             ))
    @pass_app()
    async def set_root_password(self, app, password, options):
        """
        Set password for root user if it is not already set.
        """
        if not app.authenticated:
            if await self.middleware.call('user.has_root_password'):
                raise CallError(
                    'You cannot call this method anonymously if root already has a password',
                    errno.EACCES)

            if await self.middleware.call('system.environment') == 'EC2':
                if 'ec2' not in options:
                    raise CallError(
                        'You need to specify instance ID when setting initial root password on EC2 instance',
                        errno.EACCES,
                    )

                if options['ec2']['instance_id'] != await self.middleware.call(
                        'ec2.instance_id'):
                    raise CallError('Incorrect EC2 instance ID', errno.EACCES)

        root = await self.middleware.call('user.query',
                                          [('username', '=', 'root')],
                                          {'get': True})
        await self.middleware.call('user.update', root['id'],
                                   {'password': password})

    async def __common_validation(self, verrors, data, schema, pk=None):

        exclude_filter = [('id', '!=', pk)] if pk else []

        if 'username' in data:
            pw_checkname(verrors, f'{schema}.username', data['username'])

            if await self.middleware.call(
                    'datastore.query', 'account.bsdusers',
                [('username', '=', data['username'])] + exclude_filter,
                {'prefix': 'bsdusr_'}):
                verrors.add(
                    f'{schema}.username',
                    f'The username "{data["username"]}" already exists.',
                    errno.EEXIST)

        password = data.get('password')
        if password and '?' in password:
            # See bug #4098
            verrors.add(
                f'{schema}.password',
                'An SMB issue prevents creating passwords containing a '
                'question mark (?).', errno.EINVAL)
        elif not pk and not password and not data.get('password_disabled'):
            verrors.add(f'{schema}.password', 'Password is required')
        elif data.get('password_disabled') and password:
            verrors.add(
                f'{schema}.password_disabled',
                'Leave "Password" blank when "Disable password login" is checked.'
            )

        if 'home' in data:
            if ':' in data['home']:
                verrors.add(f'{schema}.home',
                            '"Home Directory" cannot contain colons (:).')
            if data['home'] != '/nonexistent':
                if not data['home'].startswith('/mnt/'):
                    verrors.add(
                        f'{schema}.home',
                        '"Home Directory" must begin with /mnt/ or set to '
                        '/nonexistent.')
                elif not any(
                        data['home'] == i['path']
                        or data['home'].startswith(i['path'] + '/')
                        for i in await self.middleware.call('pool.query')):
                    verrors.add(
                        f'{schema}.home',
                        f'The path for the home directory "({data["home"]})" '
                        'must include a volume or dataset.')
                elif await self.middleware.call('filesystem.path_is_encrypted',
                                                data['home']):
                    verrors.add(
                        f'{schema}.home',
                        'Path component for "Home Directory" is currently encrypted and locked'
                    )

        if 'home_mode' in data:
            try:
                o = int(data['home_mode'], 8)
                assert o & 0o777 == o
            except (AssertionError, ValueError, TypeError):
                verrors.add(
                    f'{schema}.home_mode',
                    'Please provide a valid value for home_mode attribute')

        if 'groups' in data:
            groups = data.get('groups') or []
            if groups and len(groups) > 64:
                verrors.add(
                    f'{schema}.groups',
                    'A user cannot belong to more than 64 auxiliary groups.')

        if 'full_name' in data and ':' in data['full_name']:
            verrors.add(f'{schema}.full_name',
                        'The ":" character is not allowed in a "Full Name".')

        if 'shell' in data and data['shell'] not in await self.middleware.call(
                'user.shell_choices', pk):
            verrors.add(f'{schema}.shell', 'Please select a valid shell.')

    async def __set_password(self, data):
        if 'password' not in data:
            return
        password = data.pop('password')
        if password:
            data['unixhash'] = crypted_password(password)
            # See http://samba.org.ru/samba/docs/man/manpages/smbpasswd.5.html
            data[
                'smbhash'] = f'{data["username"]}:{data["uid"]}:{"X" * 32}:{nt_password(password)}:[U         ]:LCT-{int(time.time()):X}:'
        else:
            data['unixhash'] = '*'
            data['smbhash'] = '*'
        return password

    async def __set_smbpasswd(self, username):
        """
        This method will update or create an entry in samba's passdb.tdb file.
        Update will only happen if the account's nt_password has changed or
        if the account's 'locked' state has changed. Samba's passdb python
        library will raise an exception if a corresponding Unix user does not
        exist. That is the reason we have two methods/steps to set password.
        """
        await self.middleware.call('smb.update_passdb_user', username)

    async def __set_groups(self, pk, groups):

        groups = set(groups)
        existing_ids = set()
        for gm in await self.middleware.call('datastore.query',
                                             'account.bsdgroupmembership',
                                             [('user', '=', pk)],
                                             {'prefix': 'bsdgrpmember_'}):
            if gm['id'] not in groups:
                await self.middleware.call('datastore.delete',
                                           'account.bsdgroupmembership',
                                           gm['id'])
            else:
                existing_ids.add(gm['id'])

        for _id in groups - existing_ids:
            group = await self.middleware.call('datastore.query',
                                               'account.bsdgroups',
                                               [('id', '=', _id)],
                                               {'prefix': 'bsdgrp_'})
            if not group:
                raise CallError(f'Group {_id} not found', errno.ENOENT)
            await self.middleware.call('datastore.insert',
                                       'account.bsdgroupmembership', {
                                           'group': _id,
                                           'user': pk
                                       }, {'prefix': 'bsdgrpmember_'})

    @private
    async def update_sshpubkey(self, homedir, user, group):
        if 'sshpubkey' not in user:
            return
        if not os.path.isdir(homedir):
            return

        sshpath = f'hotexamples_com/.ssh'
        keysfile = f'{sshpath}/authorized_keys'
        gid = -1

        pubkey = user.get('sshpubkey') or ''
        pubkey = pubkey.strip()
        if pubkey == '':
            try:
                os.unlink(keysfile)
            except OSError:
                pass
            return

        oldpubkey = ''
        try:
            with open(keysfile, 'r') as f:
                oldpubkey = f.read().strip()
        except Exception:
            pass

        if pubkey == oldpubkey:
            return

        if not os.path.isdir(sshpath):
            os.mkdir(sshpath, mode=0o700)
        if not os.path.isdir(sshpath):
            raise CallError(f'{sshpath} is not a directory')

        # Make extra sure to enforce correct mode on .ssh directory.
        # stripping the ACL will allow subsequent chmod calls to succeed even if
        # dataset aclmode is restricted.
        try:
            gid = (await self.middleware.call('group.get_group_obj',
                                              {'groupname': group}))['gr_gid']
        except Exception:
            # leaving gid at -1 avoids altering the GID value.
            self.logger.debug("Failed to convert %s to gid",
                              group,
                              exc_info=True)

        await self.middleware.call(
            'filesystem.setperm', {
                'path': sshpath,
                'mode': str(700),
                'uid': user['uid'],
                'gid': gid,
                'options': {
                    'recursive': True,
                    'stripacl': True
                }
            })

        with open(keysfile, 'w') as f:
            f.write(pubkey)
            f.write('\n')
        await self.middleware.call('filesystem.setperm', {
            'path': keysfile,
            'mode': str(600)
        })
Пример #24
0
class ChartReleaseService(Service):
    class Config:
        namespace = 'chart.release'

    @private
    async def scale_down_workloads_before_snapshot(self, job, release):
        resources = []
        pod_mapping = await self.middleware.call(
            'chart.release.get_workload_to_pod_mapping', release['namespace'])
        pods_to_watch_for = []
        for resource in SCALEABLE_RESOURCES:
            for workload in await self.middleware.call(
                    f'k8s.{resource.name.lower()}.query',
                [
                    [
                        f'metadata.annotations.{SCALE_DOWN_ANNOTATION["key"]}',
                        'in', SCALE_DOWN_ANNOTATION['value']
                    ],
                    ['metadata.namespace', '=', release['namespace']],
                ]):
                resources.append({
                    'replica_count': 0,
                    'type': resource.name,
                    'name': workload['metadata']['name'],
                })
                pods_to_watch_for.extend(
                    pod_mapping[workload['metadata']['uid']])

        if not resources:
            return

        job.set_progress(
            35,
            f'Scaling down {", ".join([r["name"] for r in resources])} workload(s)'
        )
        await self.middleware.call('chart.release.scale_workloads',
                                   release['id'], resources)
        await self.middleware.call(
            'chart.release.wait_for_pods_to_terminate', release['namespace'], [
                ['metadata.name', 'in', pods_to_watch_for],
            ])
        job.set_progress(40, 'Successfully scaled down workload(s)')

    @accepts(Str('release_name'),
             Dict(
                 'upgrade_options',
                 Dict('values', additional_attrs=True),
                 Str('item_version', default='latest'),
             ))
    @returns(Ref('chart_release_entry'))
    @job(lock=lambda args: f'chart_release_upgrade_{args[0]}')
    async def upgrade(self, job, release_name, options):
        """
        Upgrade `release_name` chart release.

        `upgrade_options.item_version` specifies to which item version chart release should be upgraded to.

        System will update container images being used by `release_name` chart release as a chart release
        upgrade is not considered complete until the images in use have also been updated to latest versions.

        During upgrade, `upgrade_options.values` can be specified to apply configuration changes for configuration
        changes for the chart release in question.

        When chart version is upgraded, system will automatically take a snapshot of `ix_volumes` in question
        which can be used to rollback later on.
        """
        await self.middleware.call('kubernetes.validate_k8s_setup')
        release = await self.middleware.call('chart.release.get_instance',
                                             release_name)
        if not release['update_available'] and not release[
                'container_images_update_available']:
            raise CallError('No update is available for chart release')

        # We need to update container images before upgrading chart version as it's possible that the chart version
        # in question needs newer image hashes.
        job.set_progress(10, 'Updating container images')
        await (await
               self.middleware.call('chart.release.pull_container_images',
                                    release_name,
                                    {'redeploy': False})).wait(raise_error=True
                                                               )
        job.set_progress(30, 'Updated container images')

        await self.scale_down_workloads_before_snapshot(job, release)

        # If a snapshot of the volumes already exist with the same name in case of a failed upgrade, we will remove
        # it as we want the current point in time being reflected in the snapshot
        # TODO: Remove volumes/ix_volumes check in next release as we are going to do a recursive snapshot
        #  from parent volumes ds moving on
        for filesystem in ('volumes', 'volumes/ix_volumes'):
            volumes_ds = os.path.join(release['dataset'], filesystem)
            snap_name = f'{volumes_ds}@{release["version"]}'
            if await self.middleware.call('zfs.snapshot.query',
                                          [['id', '=', snap_name]]):
                await self.middleware.call('zfs.snapshot.delete', snap_name,
                                           {'recursive': True})

        await self.middleware.call(
            'zfs.snapshot.create', {
                'dataset': os.path.join(release['dataset'], 'volumes'),
                'name': release['version'],
                'recursive': True
            })
        job.set_progress(50, 'Created snapshot for upgrade')

        if release['update_available']:
            await self.upgrade_chart_release(job, release, options)
        else:
            await (await
                   self.middleware.call('chart.release.redeploy',
                                        release_name)).wait(raise_error=True)

        chart_release = await self.middleware.call(
            'chart.release.get_instance', release_name)
        self.middleware.send_event('chart.release.query',
                                   'CHANGED',
                                   id=release_name,
                                   fields=chart_release)

        await self.chart_releases_update_checks_internal(
            [['id', '=', release_name]])

        job.set_progress(100, 'Upgrade complete for chart release')

        return chart_release

    @accepts(Str('release_name'),
             Dict('options', Str('item_version', default='latest',
                                 empty=False)))
    @returns(
        Dict(
            Bool('image_update_available', required=True),
            Bool('item_update_available', required=True),
            Dict(
                'container_images_to_update',
                additional_attrs=True,
                description=
                'Dictionary of container image(s) which have an update available against the same tag',
            ), Str('latest_version'), Str('latest_human_version'),
            Str('upgrade_version'), Str('upgrade_human_version'),
            Str('changelog', max_length=None, null=True),
            List('available_versions_for_upgrade',
                 items=[
                     Dict(
                         'version_info',
                         Str('version', required=True),
                         Str('human_version', required=True),
                     )
                 ])))
    async def upgrade_summary(self, release_name, options):
        """
        Retrieve upgrade summary for `release_name` which will include which container images will be updated
        and changelog for `options.item_version` chart version specified if applicable. If only container images
        need to be updated, changelog will be `null`.

        If chart release `release_name` does not require an upgrade, an error will be raised.
        """
        release = await self.middleware.call('chart.release.query',
                                             [['id', '=', release_name]], {
                                                 'extra': {
                                                     'retrieve_resources': True
                                                 },
                                                 'get': True
                                             })
        if not release['update_available'] and not release[
                'container_images_update_available']:
            raise CallError('No update is available for chart release',
                            errno=errno.ENOENT)

        version_info = {
            'latest_version': release['chart_metadata']['version'],
            'upgrade_version': release['chart_metadata']['version'],
            'latest_human_version': release['human_version'],
            'upgrade_human_version': release['human_version'],
        }
        changelog = None
        all_newer_versions = []
        if release['update_available']:
            available_items = await self.get_versions(release, options)
            latest_item = available_items['latest_version']
            upgrade_version = available_items['specified_version']
            version_info.update({
                'latest_version':
                latest_item['version'],
                'latest_human_version':
                latest_item['human_version'],
                'upgrade_version':
                upgrade_version['version'],
                'upgrade_human_version':
                upgrade_version['human_version'],
            })
            changelog = upgrade_version['changelog']
            all_newer_versions = [
                {
                    'version': v['version'],
                    'human_version': v['human_version'],
                } for v in available_items['versions'].values()
                if parse_version(v['version']) > parse_version(
                    release['chart_metadata']['version'])
            ]

        return {
            'container_images_to_update': {
                k: v
                for k, v in release['resources']['container_images'].items()
                if v['update_available']
            },
            'changelog': changelog,
            'available_versions_for_upgrade': all_newer_versions,
            'item_update_available': release['update_available'],
            'image_update_available':
            release['container_images_update_available'],
            **version_info,
        }

    @private
    async def get_version(self, release, options):
        return (await self.get_versions(release, options))['specified_version']

    @private
    async def get_versions(self, release, options):
        current_chart = release['chart_metadata']
        chart = current_chart['name']
        item_details = await self.middleware.call(
            'catalog.get_item_details', chart, {
                'catalog': release['catalog'],
                'train': release['catalog_train'],
            })

        new_version = options['item_version']
        if new_version == 'latest':
            new_version = await self.middleware.call(
                'chart.release.get_latest_version_from_item_versions',
                item_details['versions'])

        if new_version not in item_details['versions']:
            raise CallError(
                f'Unable to locate specified {new_version!r} item version.')

        verrors = ValidationErrors()
        if parse_version(new_version) <= parse_version(
                current_chart['version']):
            verrors.add(
                'upgrade_options.item_version',
                f'Upgrade version must be greater than {current_chart["version"]!r} current version.'
            )

        verrors.check()

        return {
            'specified_version':
            item_details['versions'][new_version],
            'versions':
            item_details['versions'],
            'latest_version':
            item_details['versions'][await self.middleware.call(
                'chart.release.get_latest_version_from_item_versions',
                item_details['versions'])]
        }

    @private
    async def upgrade_chart_release(self, job, release, options):
        release_orig = copy.deepcopy(release)
        release_name = release['name']

        catalog_item = await self.get_version(release, options)
        await self.middleware.call('catalog.version_supported_error_check',
                                   catalog_item)

        config = await self.middleware.call('chart.release.upgrade_values',
                                            release, catalog_item['location'])
        release_orig['config'] = config

        # We will be performing validation for values specified. Why we want to allow user to specify values here
        # is because the upgraded catalog item version might have different schema which potentially means that
        # upgrade won't work or even if new k8s are resources are created/deployed, they won't necessarily function
        # as they should because of changed params or expecting new params
        # One tricky bit which we need to account for first is removing any key from current configured values
        # which the upgraded release will potentially not support. So we can safely remove those as otherwise
        # validation will fail as new schema does not expect those keys.
        config = clean_values_for_upgrade(config,
                                          catalog_item['schema']['questions'])
        config.update(options['values'])

        config, context = await self.middleware.call(
            'chart.release.normalise_and_validate_values',
            catalog_item,
            config,
            False,
            release['dataset'],
            release_orig,
        )
        job.set_progress(
            50, 'Initial validation complete for upgrading chart version')

        # We have validated configuration now

        chart_path = os.path.join(release['path'], 'charts',
                                  catalog_item['version'])
        await self.middleware.run_in_thread(shutil.rmtree,
                                            chart_path,
                                            ignore_errors=True)
        await self.middleware.run_in_thread(shutil.copytree,
                                            catalog_item['location'],
                                            chart_path)

        await self.middleware.call('chart.release.perform_actions', context)

        # Let's update context options to reflect that an upgrade is taking place and from which version to which
        # version it's happening.
        # Helm considers simple config change as an upgrade as well, and we have no way of determining the old/new
        # chart versions during helm upgrade in the helm template, hence the requirement for a context object.
        config[CONTEXT_KEY_NAME].update({
            **get_action_context(release_name), 'operation':
            'UPGRADE',
            'isUpgrade':
            True,
            'upgradeMetadata': {
                'oldChartVersion': release['chart_metadata']['version'],
                'newChartVersion': catalog_item['version'],
                'preUpgradeRevision': release['version'],
            }
        })

        job.set_progress(60, 'Upgrading chart release version')

        await self.middleware.call('chart.release.helm_action', release_name,
                                   chart_path, config, 'upgrade')
        await self.middleware.call('chart.release.refresh_events_state',
                                   release_name)

    @private
    def upgrade_values(self, release, new_version_path):
        config = copy.deepcopy(release['config'])
        chart_version = release['chart_metadata']['version']
        migration_path = os.path.join(new_version_path, 'migrations')
        migration_files = [
            os.path.join(migration_path, k)
            for k in (f'migrate_from_{chart_version}', 'migrate')
        ]
        if not os.path.exists(migration_path) or all(not os.access(p, os.X_OK)
                                                     for p in migration_files):
            return config

        # This is guaranteed to exist based on above check
        file_path = next(f for f in migration_files if os.access(f, os.X_OK))

        with tempfile.NamedTemporaryFile(mode='w+') as f:
            f.write(json.dumps(config))
            f.flush()
            cp = subprocess.Popen([file_path, f.name],
                                  stdout=subprocess.PIPE,
                                  stderr=subprocess.PIPE)
            stdout, stderr = cp.communicate()

        if cp.returncode:
            raise CallError(f'Failed to apply migration: {stderr.decode()}')

        if stdout:
            # We add this as a safety net in case something went wrong with the migration and we get a null response
            # or the chart dev mishandled something - although we don't suppress any exceptions which might be raised
            config = json.loads(stdout.decode())

        return config

    @periodic(interval=86400)
    @private
    async def periodic_chart_releases_update_checks(self):
        # We only want to sync catalogs in the following cases:
        # 1) User explicitly visits apps section
        # 2) User requests it
        # 3) Has configured Apps
        # 4) Has catalogs configured
        if not await self.middleware.call(
                'catalog.query', [['builtin', '=', False]]
        ) and not (await self.middleware.call('kubernetes.config'))['dataset']:
            return

        sync_job = await self.middleware.call('catalog.sync_all')
        await sync_job.wait()
        if not await self.middleware.call('service.started', 'kubernetes'):
            return

        await self.chart_releases_update_checks_internal()

    @private
    async def chart_releases_update_checks_internal(self,
                                                    chart_releases_filters=None
                                                    ):
        chart_releases_filters = chart_releases_filters or []
        # Chart release wrt alerts will be considered valid for upgrade/update if either there's a newer
        # catalog item version available or any of the images it's using is outdated

        catalog_items = {
            f'{c["id"]}_{train}_{item}': c['trains'][train][item]
            for c in await self.middleware.call(
                'catalog.query', [], {'extra': {
                    'item_details': True
                }}) for train in c['trains'] for item in c['trains'][train]
        }
        for application in await self.middleware.call('chart.release.query',
                                                      chart_releases_filters):
            if application['container_images_update_available']:
                await self.middleware.call('alert.oneshot_create',
                                           'ChartReleaseUpdate', application)
                continue

            app_id = f'{application["catalog"]}_{application["catalog_train"]}_{application["chart_metadata"]["name"]}'
            catalog_item = catalog_items.get(app_id)
            if not catalog_item:
                continue

            await self.chart_release_update_check(catalog_item, application)

        container_config = await self.middleware.call('container.config')
        if container_config['enable_image_updates']:
            asyncio.ensure_future(
                self.middleware.call('container.image.check_update'))

    @private
    async def chart_release_update_check(self, catalog_item, application):
        latest_version = catalog_item['latest_version']
        if not latest_version:
            return

        if parse_version(latest_version) > parse_version(
                application['chart_metadata']['version']):
            await self.middleware.call('alert.oneshot_create',
                                       'ChartReleaseUpdate', application)
        else:
            await self.middleware.call('alert.oneshot_delete',
                                       'ChartReleaseUpdate', application['id'])

    @accepts(Str('release_name'),
             Dict(
                 'pull_container_images_options',
                 Bool('redeploy', default=True),
             ))
    @returns(
        Dict(
            'container_images',
            additional_attrs=True,
            description=
            'Dictionary of container image(s) with container image tag as key and update status as value',
            example={
                'plexinc/pms-docker:1.23.2.4656-85f0adf5b': 'Updated image',
            }))
    @job(lock=lambda args: f'pull_container_images{args[0]}')
    async def pull_container_images(self, job, release_name, options):
        """
        Update container images being used by `release_name` chart release.

        `redeploy` when set will redeploy pods which will result in chart release using newer updated versions of
        the container images.
        """
        await self.middleware.call('kubernetes.validate_k8s_setup')
        images = [{
            'orig_tag': tag,
            'from_image': tag.rsplit(':', 1)[0],
            'tag': tag.rsplit(':', 1)[-1]
        }
                  for tag in (await self.middleware.call(
                      'chart.release.query', [['id', '=', release_name]], {
                          'extra': {
                              'retrieve_resources': True
                          },
                          'get': True
                      }))['resources']['container_images']]
        results = {}

        bulk_job = await self.middleware.call(
            'core.bulk', 'container.image.pull',
            [[{
                'from_image': image['from_image'],
                'tag': image['tag']
            }] for image in images])
        await bulk_job.wait()
        if bulk_job.error:
            raise CallError(
                f'Failed to update container images for {release_name!r} chart release: {bulk_job.error}'
            )

        for tag, status in zip(images, bulk_job.result):
            if status['error']:
                results[tag[
                    'orig_tag']] = f'Failed to pull image: {status["error"]}'
            else:
                results[tag['orig_tag']] = 'Updated image'

        if options['redeploy']:
            await job.wrap(await self.middleware.call('chart.release.redeploy',
                                                      release_name))

        return results

    @private
    async def clear_update_alerts_for_all_chart_releases(self):
        for chart_release in await self.middleware.call('chart.release.query'):
            await self.middleware.call('alert.oneshot_delete',
                                       'ChartReleaseUpdate',
                                       chart_release['id'])
Пример #25
0
class MailService(ConfigService):
    class Config:
        datastore = 'system.email'
        datastore_prefix = 'em_'
        datastore_extend = 'mail.mail_extend'

    @private
    async def mail_extend(self, cfg):
        if cfg['security']:
            cfg['security'] = cfg['security'].upper()
        return cfg

    @accepts(
        Dict(
            'mail_update',
            Str('fromemail'),
            Str('outgoingserver'),
            Int('port'),
            Str('security', enum=['PLAIN', 'SSL', 'TLS']),
            Bool('smtp'),
            Str('user'),
            Str('pass'),
        ))
    async def do_update(self, data):
        config = await self.config()

        new = config.copy()
        new.update(data)
        new['security'] = new['security'].lower()  # Django Model compatibility

        verrors = ValidationErrors()

        if new['smtp'] and new['user'] == '':
            verrors.add(
                'mail_update.user',
                'This field is required when SMTP authentication is enabled')

        if verrors:
            raise verrors

        await self.middleware.call('datastore.update', 'system.email',
                                   config['id'], new, {'prefix': 'em_'})
        return config

    @accepts(
        Dict(
            'mail-message',
            Str('subject'),
            Str('text', required=True),
            List('to', items=[Str('email')]),
            Int('interval'),
            Str('channel'),
            Int('timeout', default=300),
            Bool('attachments', default=False),
            Bool('queue', default=True),
            Dict('extra_headers', additional_attrs=True),
        ), Dict('mailconfig', additional_attrs=True))
    @job(pipe=True)
    def send(self, job, message, config=None):
        """
        Sends mail using configured mail settings.

        If `attachments` is true, a list compromised of the following dict is required
        via HTTP upload:
          - headers(list)
            - name(str)
            - value(str)
            - params(dict)
          - content (str)

        [
         {
          "headers": [
           {
            "name": "Content-Transfer-Encoding",
            "value": "base64"
           },
           {
            "name": "Content-Type",
            "value": "application/octet-stream",
            "params": {
             "name": "test.txt"
            }
           }
          ],
          "content": "dGVzdAo="
         }
        ]
        """

        syslog.openlog(logoption=syslog.LOG_PID, facility=syslog.LOG_MAIL)
        interval = message.get('interval')
        if interval is None:
            interval = timedelta()
        else:
            interval = timedelta(seconds=interval)

        sw_name = self.middleware.call_sync('system.info')['version'].split(
            '-', 1)[0]

        channel = message.get('channel')
        if not channel:
            channel = sw_name.lower()
        if interval > timedelta():
            channelfile = '/tmp/.msg.%s' % (channel)
            last_update = datetime.now() - interval
            try:
                last_update = datetime.fromtimestamp(
                    os.stat(channelfile).st_mtime)
            except OSError:
                pass
            timediff = datetime.now() - last_update
            if (timediff >= interval) or (timediff < timedelta()):
                # Make sure mtime is modified
                # We could use os.utime but this is simpler!
                with open(channelfile, 'w') as f:
                    f.write('!')
            else:
                raise CallError(
                    'This message was already sent in the given interval')

        if not config:
            config = self.middleware.call_sync('mail.config')
        to = message.get('to')
        if not to:
            to = [
                self.middleware.call_sync('user.query',
                                          [('username', '=', 'root')],
                                          {'get': True})['email']
            ]
            if not to[0]:
                raise CallError('Email address for root is not configured')

        def read_json():
            f = os.fdopen(job.read_fd, 'rb')
            data = b''
            i = 0
            while True:
                read = f.read(1048576)  # 1MiB
                if read == b'':
                    break
                data += read
                i += 1
                if i > 50:
                    raise ValueError(
                        'Attachments bigger than 50MB not allowed yet')
            if data == b'':
                return None
            return json.loads(data)

        attachments = read_json() if message.get('attachments') else None
        if attachments:
            msg = MIMEMultipart()
            msg.preamble = message['text']
            for attachment in attachments:
                m = Message()
                m.set_payload(attachment['content'])
                for header in attachment.get('headers'):
                    m.add_header(header['name'], header['value'],
                                 **(header.get('params') or {}))
                msg.attach(m)
        else:
            msg = MIMEText(message['text'], _charset='utf-8')

        subject = message.get('subject')
        if subject:
            msg['Subject'] = subject

        msg['From'] = config['fromemail']
        msg['To'] = ', '.join(to)
        msg['Date'] = formatdate()

        local_hostname = socket.gethostname()

        msg['Message-ID'] = "<%s-%s.%s@%s>" % (
            sw_name.lower(), datetime.utcnow().strftime("%Y%m%d.%H%M%S.%f"),
            base64.urlsafe_b64encode(os.urandom(3)), local_hostname)

        extra_headers = message.get('extra_headers') or {}
        for key, val in list(extra_headers.items()):
            if key in msg:
                msg.replace_header(key, val)
            else:
                msg[key] = val

        try:
            server = self._get_smtp_server(config,
                                           message['timeout'],
                                           local_hostname=local_hostname)
            # NOTE: Don't do this.
            #
            # If smtplib.SMTP* tells you to run connect() first, it's because the
            # mailserver it tried connecting to via the outgoing server argument
            # was unreachable and it tried to connect to 'localhost' and barfed.
            # This is because FreeNAS doesn't run a full MTA.
            # else:
            #    server.connect()
            syslog.syslog("sending mail to " + ','.join(to) +
                          msg.as_string()[0:140])
            server.sendmail(config['fromemail'], to, msg.as_string())
            server.quit()
        except ValueError as ve:
            # Don't spam syslog with these messages. They should only end up in the
            # test-email pane.
            raise CallError(str(ve))
        except smtplib.SMTPAuthenticationError as e:
            raise CallError(
                f'Authentication error ({e.smtp_code}): {e.smtp_error}',
                errno.EAUTH)
        except Exception as e:
            self.logger.warn('Failed to send email: %s', str(e), exc_info=True)
            if message['queue']:
                with MailQueue() as mq:
                    mq.append(msg)
            raise CallError(f'Failed to send email: {e}')
        return True

    def _get_smtp_server(self, config, timeout=300, local_hostname=None):
        if local_hostname is None:
            local_hostname = socket.gethostname()

        if not config['outgoingserver'] or not config['port']:
            # See NOTE below.
            raise ValueError('you must provide an outgoing mailserver and mail'
                             ' server port when sending mail')
        if config['security'] == 'SSL':
            server = smtplib.SMTP_SSL(config['outgoingserver'],
                                      config['port'],
                                      timeout=timeout,
                                      local_hostname=local_hostname)
        else:
            server = smtplib.SMTP(config['outgoingserver'],
                                  config['port'],
                                  timeout=timeout,
                                  local_hostname=local_hostname)
            if config['security'] == 'TLS':
                server.starttls()
        if config['smtp']:
            server.login(config['user'], config['pass'])
        return server

    @periodic(600, run_on_start=False)
    @private
    def send_mail_queue(self):

        with MailQueue() as mq:
            for queue in list(mq.queue):
                try:
                    config = self.middleware.call_sync('mail.config')
                    server = self._get_smtp_server(config)
                    server.sendmail(queue.message['From'],
                                    queue.message['To'].split(', '),
                                    queue.message.as_string())
                    server.quit()
                except:
                    self.logger.debug('Sending message from queue failed',
                                      exc_info=True)
                    queue.attempts += 1
                    if queue.attempts >= mq.MAX_ATTEMPTS:
                        mq.queue.remove(queue)
                else:
                    mq.queue.remove(queue)
Пример #26
0
class AlertServiceService(CRUDService):
    class Config:
        datastore = "system.alertservice"
        datastore_extend = "alertservice._extend"
        datastore_order_by = ["name"]

    @accepts()
    async def list_types(self):
        return [{
            "name": name,
            "title": factory.title,
        } for name, factory in sorted(ALERT_SERVICES_FACTORIES.items(),
                                      key=lambda i: i[1].title.lower())]

    @private
    async def _extend(self, service):
        try:
            service["type__title"] = ALERT_SERVICES_FACTORIES[
                service["type"]].title
        except KeyError:
            service["type__title"] = "<Unknown>"

        return service

    @private
    async def _compress(self, service):
        return service

    @private
    async def _validate(self, service, schema_name):
        verrors = ValidationErrors()

        factory = ALERT_SERVICES_FACTORIES.get(service["type"])
        if factory is None:
            verrors.add(f"{schema_name}.type", "This field has invalid value")

        try:
            factory.validate(service["attributes"])
        except ValidationErrors as e:
            verrors.add_child(f"{schema_name}.attributes", e)

        validate_settings(verrors, f"{schema_name}.settings",
                          service["settings"])

        if verrors:
            raise verrors

    @accepts(
        Dict(
            "alert_service_create",
            Str("name"),
            Str("type"),
            Dict("attributes", additional_attrs=True),
            Bool("enabled"),
            Dict("settings", additional_attrs=True),
            register=True,
        ))
    async def do_create(self, data):
        await self._validate(data, "alert_service_create")

        data["id"] = await self.middleware.call("datastore.insert",
                                                self._config.datastore, data)

        await self._extend(data)

        return data

    @accepts(Int("id"),
             Patch(
                 "alert_service_create",
                 "alert_service_update",
                 ("attr", {
                     "update": True
                 }),
             ))
    async def do_update(self, id, data):
        old = await self.middleware.call(
            "datastore.query", self._config.datastore, [("id", "=", id)], {
                "extend": self._config.datastore_extend,
                "get": True
            })

        new = old.copy()
        new.update(data)

        await self._validate(data, "alert_service_update")

        await self._compress(data)

        await self.middleware.call("datastore.update", self._config.datastore,
                                   id, data)

        await self._extend(new)

        return new

    @accepts(Int("id"))
    async def do_delete(self, id):
        return await self.middleware.call("datastore.delete",
                                          self._config.datastore, id)

    @accepts(
        Patch(
            "alert_service_create",
            "alert_service_test",
            ("attr", {
                "update": True
            }),
        ))
    async def test(self, data):
        await self._validate(data, "alert_service_test")

        factory = ALERT_SERVICES_FACTORIES.get(data["type"])
        if factory is None:
            self.logger.error("Alert service %r does not exist", data["type"])
            return False

        try:
            alert_service = factory(self.middleware, data["attributes"])
        except Exception:
            self.logger.error(
                "Error creating alert service %r with parameters=%r",
                data["type"],
                data["attributes"],
                exc_info=True)
            return False

        test_alert = Alert(
            title="Test alert",
            node="A",
            datetime=datetime.utcnow(),
            level=AlertLevel.INFO,
        )

        try:
            await alert_service.send([test_alert], [], [test_alert])
        except Exception:
            self.logger.error("Error in alert service %r",
                              data["type"],
                              exc_info=True)
            return False

        return True
Пример #27
0
class SystemDatasetService(ConfigService):
    class Config:
        datastore = 'system.systemdataset'
        datastore_extend = 'systemdataset.config_extend'
        datastore_prefix = 'sys_'

    @private
    async def config_extend(self, config):

        # Add `is_decrypted` dynamic attribute
        if config['pool'] == 'freenas-boot':
            config['is_decrypted'] = True
        else:
            pool = await self.middleware.call('pool.query',
                                              [('name', '=', config['pool'])])
            if pool:
                config['is_decrypted'] = pool[0]['is_decrypted']
            else:
                config['is_decrypted'] = False

        if config['is_decrypted']:
            config['basename'] = f'{config["pool"]}/.system'
        else:
            config['basename'] = None

        # Make `uuid` point to the uuid of current node
        config['uuid_a'] = config['uuid']
        if not await self.middleware.call('system.is_freenas'):
            if await self.middleware.call('failover.node') == 'B':
                config['uuid'] = config['uuid_b']

        if not config['uuid']:
            config['uuid'] = uuid.uuid4().hex
            if not await self.middleware.call('system.is_freenas'
                                              ) and await self.middleware.call(
                                                  'failover.node') == 'B':
                attr = 'uuid_b'
                config[attr] = config['uuid']
            else:
                attr = 'uuid'
            await self.middleware.call('datastore.update',
                                       'system.systemdataset', config['id'],
                                       {f'sys_{attr}': config['uuid']})

        config['syslog'] = config.pop('syslog_usedataset')

        if not os.path.exists(SYSDATASET_PATH) or not os.path.ismount(
                SYSDATASET_PATH):
            config['path'] = None
        else:
            config['path'] = SYSDATASET_PATH

        return config

    @accepts(
        Dict('sysdataset_update',
             Str('pool', null=True),
             Str('pool_exclude', null=True),
             Bool('syslog'),
             update=True))
    @job(lock='sysdataset_update')
    async def do_update(self, job, data):
        """
        Update System Dataset Service Configuration.

        `pool` is the name of a valid pool configured in the system which will be used to host the system dataset.

        `pool_exclude` can be specified to make sure that we don't place the system dataset on that pool if `pool`
        is not provided.
        """
        config = await self.config()

        new = config.copy()
        new.update(data)

        verrors = ValidationErrors()
        if new['pool'] and new['pool'] != 'freenas-boot':
            pool = await self.middleware.call('pool.query',
                                              [['name', '=', new['pool']]])
            if not pool:
                verrors.add('sysdataset_update.pool',
                            f'Pool "{new["pool"]}" not found', errno.ENOENT)
            elif pool[0]['encrypt'] == 2:
                # This will cover two cases - passphrase being set for a pool and that it might be locked as well
                verrors.add(
                    'sysdataset_update.pool',
                    f'Pool "{new["pool"]}" has an encryption passphrase set. '
                    'The system dataset cannot be placed on this pool.')
        elif not new['pool']:
            for pool in await self.middleware.call('pool.query',
                                                   [['encrypt', '!=', 2]]):
                if data.get('pool_exclude') == pool['name']:
                    continue
                new['pool'] = pool['name']
                break
            else:
                new['pool'] = 'freenas-boot'
        verrors.check()

        new['syslog_usedataset'] = new['syslog']

        update_dict = new.copy()
        for key in ('is_decrypted', 'basename', 'uuid_a', 'syslog', 'path',
                    'pool_exclude'):
            update_dict.pop(key, None)

        await self.middleware.call('datastore.update', 'system.systemdataset',
                                   config['id'], update_dict,
                                   {'prefix': 'sys_'})

        if config['pool'] != new['pool']:
            await self.migrate(config['pool'], new['pool'])

        await self.setup()

        if config['syslog'] != new['syslog']:
            await self.middleware.call('service.restart', 'syslogd')

        return await self.config()

    @accepts(Bool('mount', default=True),
             Str('exclude_pool', default=None, null=True))
    @private
    async def setup(self, mount, exclude_pool=None):
        # We default kern.corefile value
        await run('sysctl', "kern.corefile='/var/tmp/%N.core'")

        config = await self.config()

        if not await self.middleware.call('system.is_freenas'):
            if await self.middleware.call('failover.status') == 'BACKUP' and \
                    ('basename' in config and config['basename'] and config['basename'] != 'freenas-boot/.system'):
                try:
                    os.unlink(SYSDATASET_PATH)
                except OSError:
                    pass
                return

        if config['pool'] and config['pool'] != 'freenas-boot':
            if not await self.middleware.call('pool.query',
                                              [('name', '=', config['pool'])]):
                job = await self.middleware.call('systemdataset.update', {
                    'pool': None,
                    'pool_exclude': exclude_pool,
                })
                await job.wait()
                if job.error:
                    raise CallError(job.error)
                config = await self.config()

        if not config['pool'] and not await self.middleware.call(
                'system.is_freenas'):
            job = await self.middleware.call('systemdataset.update',
                                             {'pool': 'freenas-boot'})
            await job.wait()
            if job.error:
                raise CallError(job.error)
            config = await self.config()
        elif not config['pool']:
            pool = None
            for p in await self.middleware.call('pool.query', [],
                                                {'order_by': ['encrypt']}):
                if exclude_pool and p['name'] == exclude_pool:
                    continue
                if p['is_decrypted']:
                    pool = p
                    break
            if pool:
                job = await self.middleware.call('systemdataset.update',
                                                 {'pool': pool['name']})
                await job.wait()
                if job.error:
                    raise CallError(job.error)
                config = await self.config()

        if not config['basename']:
            if os.path.exists(SYSDATASET_PATH):
                try:
                    os.rmdir(SYSDATASET_PATH)
                except Exception:
                    self.logger.debug('Failed to remove system dataset dir',
                                      exc_info=True)
            return config

        if not config['is_decrypted']:
            return

        if await self.__setup_datasets(config['pool'], config['uuid']):
            # There is no need to wait this to finish
            # Restarting rrdcached will ensure that we start/restart collectd as well
            asyncio.ensure_future(
                self.middleware.call('service.restart', 'rrdcached'))

        if not os.path.isdir(SYSDATASET_PATH):
            if os.path.exists(SYSDATASET_PATH):
                os.unlink(SYSDATASET_PATH)
            os.mkdir(SYSDATASET_PATH)

        aclmode = await self.middleware.call('zfs.dataset.query',
                                             [('id', '=', config['basename'])])
        if aclmode and aclmode[0]['properties']['aclmode'][
                'value'] == 'restricted':
            await self.middleware.call(
                'zfs.dataset.update',
                config['basename'],
                {'properties': {
                    'aclmode': {
                        'value': 'passthrough'
                    }
                }},
            )

        if mount:

            await self.__mount(config['pool'], config['uuid'])

            corepath = f'{SYSDATASET_PATH}/cores'
            if os.path.exists(corepath):
                # FIXME: sysctl module not working
                await run('sysctl', f"kern.corefile='{corepath}/%N.core'")
                os.chmod(corepath, 0o775)

            await self.__nfsv4link(config)

        return config

    async def __setup_datasets(self, pool, uuid):
        """
        Make sure system datasets for `pool` exist and have the right mountpoint property
        """
        createdds = False
        datasets = [i[0] for i in self.__get_datasets(pool, uuid)]
        datasets_prop = {
            i['id']: i['properties'].get('mountpoint')
            for i in await self.middleware.call('zfs.dataset.query', [(
                'id', 'in', datasets)])
        }
        for dataset in datasets:
            mountpoint = datasets_prop.get(dataset)
            if mountpoint and mountpoint['value'] != 'legacy':
                await self.middleware.call(
                    'zfs.dataset.update',
                    dataset,
                    {'properties': {
                        'mountpoint': {
                            'value': 'legacy'
                        }
                    }},
                )
            elif not mountpoint:
                await self.middleware.call('zfs.dataset.create', {
                    'name': dataset,
                    'properties': {
                        'mountpoint': 'legacy'
                    },
                })
                createdds = True
        return createdds

    async def __mount(self, pool, uuid, path=SYSDATASET_PATH):
        for dataset, name in self.__get_datasets(pool, uuid):
            if name:
                mountpoint = f'{path}/{name}'
            else:
                mountpoint = path
            if os.path.ismount(mountpoint):
                continue
            if not os.path.isdir(mountpoint):
                os.mkdir(mountpoint)
            await run('mount', '-t', 'zfs', dataset, mountpoint, check=True)

    async def __umount(self, pool, uuid):
        for dataset, name in reversed(self.__get_datasets(pool, uuid)):
            await run('umount', '-f', dataset, check=False)

    def __get_datasets(self, pool, uuid):
        return [(f'{pool}/.system', '')] + [(f'{pool}/.system/{i}', i)
                                            for i in [
                                                'cores',
                                                'samba4',
                                                f'syslog-{uuid}',
                                                f'rrd-{uuid}',
                                                f'configs-{uuid}',
                                                'webui',
                                            ]]

    async def __nfsv4link(self, config):
        syspath = config['path']
        if not syspath:
            return None

        restartfiles = [
            "/var/db/nfs-stablerestart", "/var/db/nfs-stablerestart.bak"
        ]
        if not await self.middleware.call('system.is_freenas'
                                          ) and await self.middleware.call(
                                              'failover.status') == 'BACKUP':
            return None

        for item in restartfiles:
            if os.path.exists(item):
                if os.path.isfile(item) and not os.path.islink(item):
                    # It's an honest to goodness file, this shouldn't ever happen...but
                    path = os.path.join(syspath, os.path.basename(item))
                    if not os.path.isfile(path):
                        # there's no file in the system dataset, so copy over what we have
                        # being careful to nuke anything that is there that happens to
                        # have the same name.
                        if os.path.exists(path):
                            shutil.rmtree(path)
                        shutil.copy(item, path)
                    # Nuke the original file and create a symlink to it
                    # We don't need to worry about creating the file on the system dataset
                    # because it's either been copied over, or was already there.
                    os.unlink(item)
                    os.symlink(path, item)
                elif os.path.isdir(item):
                    # Pathological case that should never happen
                    shutil.rmtree(item)
                    self.__createlink(syspath, item)
                else:
                    if not os.path.exists(os.readlink(item)):
                        # Dead symlink or some other nastiness.
                        shutil.rmtree(item)
                        self.__createlink(syspath, item)
            else:
                # We can get here if item is a dead symlink
                if os.path.islink(item):
                    os.unlink(item)
                self.__createlink(syspath, item)

    def __createlink(self, syspath, item):
        path = os.path.join(syspath, os.path.basename(item))
        if not os.path.isfile(path):
            if os.path.exists(path):
                # There's something here but it's not a file.
                shutil.rmtree(path)
            open(path, 'w').close()
        os.symlink(path, item)

    @private
    async def migrate(self, _from, _to):

        config = await self.config()

        await self.__setup_datasets(_to, config['uuid'])

        if _from:
            path = '/tmp/system.new'
            if not os.path.exists('/tmp/system.new'):
                os.mkdir('/tmp/system.new')
        else:
            path = SYSDATASET_PATH
        await self.__mount(_to, config['uuid'], path=path)

        restart = ['collectd', 'rrdcached', 'syslogd']

        if await self.middleware.call('service.started', 'cifs'):
            restart.insert(0, 'cifs')

        try:
            for i in restart:
                await self.middleware.call('service.stop', i)

            if _from:
                cp = await run('rsync',
                               '-az',
                               f'{SYSDATASET_PATH}/',
                               '/tmp/system.new',
                               check=False)
                if cp.returncode == 0:
                    await self.__umount(_from, config['uuid'])
                    await self.__umount(_to, config['uuid'])
                    await self.__mount(_to, config['uuid'], SYSDATASET_PATH)
                    proc = await Popen(
                        f'zfs list -H -o name {_from}/.system|xargs zfs destroy -r',
                        shell=True)
                    await proc.communicate()

                os.rmdir('/tmp/system.new')
        finally:

            restart.reverse()
            for i in restart:
                await self.middleware.call('service.start', i)

        await self.__nfsv4link(config)
Пример #28
0
class JailService(CRUDService):
    class Config:
        process_pool = True

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        # We want debug for jails starting/stopping
        os.environ['IOCAGE_DEBUG'] = 'TRUE'

    # FIXME: foreign schemas cannot be referenced when
    # using `process_pool`
    # @filterable
    @accepts(
        List('query-filters', default=[]),
        Dict('query-options', additional_attrs=True),
    )
    def query(self, filters=None, options=None):
        """
        Query all jails with `query-filters` and `query-options`.
        """
        options = options or {}
        jail_identifier = None
        jails = []

        if filters and len(filters) == 1 and list(
                filters[0][:2]) == ['host_hostuuid', '=']:
            jail_identifier = filters[0][2]

        recursive = False if jail_identifier == 'default' else True

        try:
            jail_dicts = ioc.IOCage(jail=jail_identifier).get(
                'all', recursive=recursive)

            if jail_identifier == 'default':
                jail_dicts['host_hostuuid'] = 'default'
                jails.append(jail_dicts)
            else:
                for jail in jail_dicts:
                    jail = list(jail.values())[0]
                    jail['id'] = jail['host_hostuuid']
                    if jail['dhcp'] == 'on':
                        uuid = jail['host_hostuuid']

                        if jail['state'] == 'up':
                            interface = jail['interfaces'].split(',')[0].split(
                                ':')[0]
                            if interface == 'vnet0':
                                # Inside jails they are epair0b
                                interface = 'epair0b'
                            ip4_cmd = [
                                'jexec', f'ioc-{uuid}', 'ifconfig', interface,
                                'inet'
                            ]
                            try:
                                out = su.check_output(ip4_cmd)
                                out = out.splitlines()[2].split()[1].decode()
                                jail['ip4_addr'] = f'{interface}|{out}'
                            except (su.CalledProcessError, IndexError):
                                jail['ip4_addr'] = f'{interface}|ERROR'
                        else:
                            jail['ip4_addr'] = 'DHCP (not running)'
                    jails.append(jail)
        except ioc_exceptions.JailMisconfigured as e:
            self.logger.error(e, exc_info=True)
        except BaseException:
            # Brandon is working on fixing this generic except, till then I
            # am not going to make the perfect the enemy of the good enough!
            self.logger.debug('Failed to get list of jails', exc_info=True)

        return filter_list(jails, filters, options)

    query._fiterable = True

    @accepts(
        Dict("options", Str("release", required=True), Str("template"),
             Str("pkglist"), Str("uuid", required=True),
             Bool("basejail", default=False), Bool("empty", default=False),
             Bool("short", default=False), List("props", default=[])))
    async def do_create(self, options):
        """Creates a jail."""
        # Typically one would return the created jail's id in this
        # create call BUT since jail creation may or may not involve
        # fetching a release, which in turn could be time consuming
        # and could then block for a long time. This dictates that we
        # make it a job, but that violates the principle that CRUD methods
        # are not jobs as yet, so I settle on making this a wrapper around
        # the main job that calls this and return said job's id instead of
        # the created jail's id

        return await self.middleware.call('jail.create_job', options)

    @private
    @job(lock=lambda args: f'jail_create:{args[-1]["uuid"]}')
    def create_job(self, job, options):
        verrors = ValidationErrors()
        uuid = options["uuid"]

        job.set_progress(0, f'Creating: {uuid}')

        try:
            self.check_jail_existence(uuid, skip=False)

            verrors.add('uuid', f'A jail with name {uuid} already exists')
            raise verrors
        except CallError:
            # A jail does not exist with the provided name, we can create one
            # now

            verrors = self.common_validation(verrors, options)

            if verrors:
                raise verrors

            job.set_progress(20, 'Initial validation complete')

        iocage = ioc.IOCage(skip_jails=True)

        release = options["release"]
        template = options.get("template", False)
        pkglist = options.get("pkglist", None)
        basejail = options["basejail"]
        empty = options["empty"]
        short = options["short"]
        props = options["props"]
        pool = IOCJson().json_get_value("pool")
        iocroot = IOCJson(pool).json_get_value("iocroot")

        if template:
            release = template

        if (not os.path.isdir(f'{iocroot}/releases/{release}') and not template
                and not empty):
            job.set_progress(50, f'{release} missing, calling fetch')
            self.middleware.call_sync('jail.fetch', {"release": release},
                                      job=True)

        err, msg = iocage.create(release,
                                 props,
                                 0,
                                 pkglist,
                                 template=template,
                                 short=short,
                                 _uuid=uuid,
                                 basejail=basejail,
                                 empty=empty)

        if err:
            raise CallError(msg)

        job.set_progress(100, f'Created: {uuid}')

        return True

    @private
    def validate_ips(self,
                     verrors,
                     options,
                     schema='options.props',
                     exclude=None):
        for item in options['props']:
            for f in ('ip4_addr', 'ip6_addr'):
                # valid ip values can be
                # "none" "interface|accept_rtadv" "interface|ip/subnet" "interface|ip"
                # we explicitly check these
                if f in item:
                    for ip in [
                            ip.split('|')[1].split('/')[0]
                            if '|' in ip else ip.split('/')[0]
                            for ip in item.split('=')[1].split(',')
                            if ip != 'none' and
                        (ip.count('|') and ip.split('|')[1] != 'accept_rtadv')
                    ]:
                        try:
                            IpInUse(self.middleware, exclude)(ip)
                        except ValueError as e:
                            verrors.add(f'{schema}.{f}', str(e))

    @accepts(Str("jail"),
             Dict(
                 "options",
                 Bool("plugin", default=False),
                 additional_attrs=True,
             ))
    def do_update(self, jail, options):
        """Sets a jail property."""
        plugin = options.pop("plugin")
        _, _, iocage = self.check_jail_existence(jail)

        name = options.pop("name", None)

        verrors = ValidationErrors()

        jail = self.query([['id', '=', jail]], {'get': True})

        verrors = self.common_validation(verrors, options, True, jail)

        if name is not None and plugin:
            verrors.add('options.plugin',
                        'Cannot be true while trying to rename')

        if verrors:
            raise verrors

        for prop, val in options.items():
            p = f"{prop}={val}"

            try:
                iocage.set(p, plugin)
            except RuntimeError as err:
                raise CallError(err)

        if name:
            iocage.rename(name)

        return True

    @private
    def common_validation(self, verrors, options, update=False, jail=None):
        if not update:
            # Ensure that api call conforms to format set by iocage for props
            # Example 'key=value'

            for value in options['props']:
                if '=' not in value:
                    verrors.add(
                        'options.props',
                        'Please follow the format specified by iocage for api calls'
                        'e.g "key=value"')
                    break

            if verrors:
                raise verrors

            # normalise vnet mac address
            # expected format here is 'vnet0_mac=00-D0-56-F2-B5-12,00-D0-56-F2-B5-13'
            vnet_macs = {
                f.split('=')[0]: f.split('=')[1]
                for f in options['props']
                if any(f'vnet{i}_mac' in f.split('=')[0] for i in range(0, 4))
            }

            self.validate_ips(verrors, options)
        else:
            vnet_macs = {
                key: value
                for key, value in options.items()
                if any(f'vnet{i}_mac' in key for i in range(0, 4))
            }

            exclude_ips = [
                ip.split('|')[1].split('/')[0]
                if '|' in ip else ip.split('/')[0]
                for f in ('ip4_addr', 'ip6_addr') for ip in jail[f].split(',')
                if ip not in ('none', 'DHCP (not running)')
            ]

            self.validate_ips(
                verrors, {'props': [f'{k}={v}' for k, v in options.items()]},
                'options', exclude_ips)

        # validate vnetX_mac addresses
        for key, value in vnet_macs.items():
            if value and value != 'none':
                value = value.replace(',', ' ')
                try:
                    for mac in value.split():
                        MACAddr()(mac)

                    if (len(value.split()) != 2
                            or any(value.split().count(v) > 1
                                   for v in value.split())):
                        raise ValueError('Exception')
                except ValueError:
                    verrors.add(
                        key, 'Please Enter two valid and different '
                        f'space/comma-delimited MAC addresses for {key}.')

        return verrors

    @accepts(Str("jail"))
    def do_delete(self, jail):
        """Takes a jail and destroys it."""
        _, _, iocage = self.check_jail_existence(jail)

        # TODO: Port children checking, release destroying.
        iocage.destroy_jail()

        return True

    @private
    def check_dataset_existence(self):
        try:
            IOCCheck()
        except ioc_exceptions.PoolNotActivated as e:
            raise CallError(e, errno=errno.ENOENT)

    @private
    def check_jail_existence(self, jail, skip=True, callback=None):
        """Wrapper for iocage's API, as a few commands aren't ported to it"""
        try:
            if callback is not None:
                iocage = ioc.IOCage(callback=callback,
                                    skip_jails=skip,
                                    jail=jail)
            else:
                iocage = ioc.IOCage(skip_jails=skip, jail=jail)
            jail, path = iocage.__check_jail_existence__()
        except (SystemExit, RuntimeError):
            raise CallError(f"jail '{jail}' not found!")

        return jail, path, iocage

    @accepts()
    def get_activated_pool(self):
        """Returns the activated pool if there is one, or None"""
        try:
            pool = ioc.IOCage(skip_jails=True).get('', pool=True)
        except RuntimeError as e:
            raise CallError(f'Error occurred getting activated pool: {e}')
        except (ioc_exceptions.PoolNotActivated, FileNotFoundError):
            self.check_dataset_existence()

            try:
                pool = ioc.IOCage(skip_jails=True).get('', pool=True)
            except ioc_exceptions.PoolNotActivated:
                pool = None

        return pool

    @accepts(
        Dict(
            'options', Str('release'),
            Str('server', default='download.freebsd.org'),
            Str('user', default='anonymous'),
            Str('password', default='anonymous@'),
            Str('name', default=None, null=True), Bool('accept', default=True),
            Bool('https', default=True), List('props', default=[]),
            List('files',
                 default=['MANIFEST', 'base.txz', 'lib32.txz', 'doc.txz']),
            Str('branch', default=None, null=True)))
    @job(lock=lambda args: f"jail_fetch:{args[-1]}")
    def fetch(self, job, options):
        """Fetches a release or plugin."""
        fetch_output = {'install_notes': []}
        release = options.get('release', None)
        https = options.pop('https', False)

        post_install = False

        verrors = ValidationErrors()

        self.validate_ips(verrors, options)

        if verrors:
            raise verrors

        def progress_callback(content, exception):
            msg = content['message'].strip('\r\n')
            rel_up = f'* Updating {release} to the latest patch level... '
            nonlocal post_install

            if options['name'] is None:
                if 'Downloading : base.txz' in msg and '100%' in msg:
                    job.set_progress(5, msg)
                elif 'Downloading : lib32.txz' in msg and '100%' in msg:
                    job.set_progress(10, msg)
                elif 'Downloading : doc.txz' in msg and '100%' in msg:
                    job.set_progress(15, msg)
                elif 'Downloading : src.txz' in msg and '100%' in msg:
                    job.set_progress(20, msg)
                if 'Extracting: base.txz' in msg:
                    job.set_progress(25, msg)
                elif 'Extracting: lib32.txz' in msg:
                    job.set_progress(50, msg)
                elif 'Extracting: doc.txz' in msg:
                    job.set_progress(75, msg)
                elif 'Extracting: src.txz' in msg:
                    job.set_progress(90, msg)
                elif rel_up in msg:
                    job.set_progress(95, msg)
                else:
                    job.set_progress(None, msg)
            else:
                if post_install:
                    for split_msg in msg.split('\n'):
                        fetch_output['install_notes'].append(split_msg)

                if '  These pkgs will be installed:' in msg:
                    job.set_progress(50, msg)
                elif 'Installing plugin packages:' in msg:
                    job.set_progress(75, msg)
                elif 'Running post_install.sh' in msg:
                    job.set_progress(90, msg)
                    # Sets each message going forward as important to the user
                    post_install = True
                else:
                    job.set_progress(None, msg)

        self.check_dataset_existence()  # Make sure our datasets exist.
        start_msg = f'{release} being fetched'
        final_msg = f'{release} fetched'

        iocage = ioc.IOCage(callback=progress_callback, silent=False)

        if options["name"] is not None:
            pool = IOCJson().json_get_value('pool')
            iocroot = IOCJson(pool).json_get_value('iocroot')

            options["plugin_file"] = True
            start_msg = 'Starting plugin install'
            final_msg = f"Plugin: {options['name']} installed"
        elif options['name'] is None and https:
            if 'https' not in options['server']:
                options['server'] = f'https://{options["server"]}'

        options["accept"] = True

        job.set_progress(0, start_msg)
        iocage.fetch(**options)

        if post_install and options['name'] is not None:
            plugin_manifest = pathlib.Path(
                f'{iocroot}/.plugin_index/{options["name"]}.json')
            plugin_json = json.loads(plugin_manifest.read_text())
            schema_version = plugin_json.get('plugin_schema', '1')

            if schema_version.isdigit() and int(schema_version) >= 2:
                plugin_output = pathlib.Path(
                    f'{iocroot}/jails/{options["name"]}/root/root/PLUGIN_INFO')

                if plugin_output.is_file():
                    # Otherwise it will be the verbose output from the
                    # post_install script
                    fetch_output['install_notes'] = [
                        x for x in plugin_output.read_text().split('\n') if x
                    ]

                    # This is to get the admin URL and such
                    fetch_output['install_notes'] += job.progress[
                        'description'].split('\n')

        job.set_progress(100, final_msg)

        return fetch_output

    @accepts(Str("resource", enum=["RELEASE", "TEMPLATE", "PLUGIN"]),
             Bool("remote", default=False))
    def list_resource(self, resource, remote):
        """Returns a JSON list of the supplied resource on the host"""
        self.check_dataset_existence()  # Make sure our datasets exist.
        iocage = ioc.IOCage(skip_jails=True)
        resource = "base" if resource == "RELEASE" else resource.lower()

        if resource == "plugin":
            if remote:
                try:
                    resource_list = self.middleware.call_sync(
                        'cache.get', 'iocage_remote_plugins')

                    return resource_list
                except KeyError:
                    pass

                resource_list = iocage.fetch(list=True,
                                             plugins=True,
                                             header=False)
            else:
                resource_list = iocage.list("all", plugin=True)
                pool = IOCJson().json_get_value("pool")
                iocroot = IOCJson(pool).json_get_value("iocroot")
                index_path = f'{iocroot}/.plugin_index/INDEX'

                if not pathlib.Path(index_path).is_file():
                    index_json = None

                    for plugin in resource_list:
                        plugin += ['N/A', 'N/A']

                    return resource_list
                else:
                    index_fd = open(index_path, 'r')
                    index_json = json.load(index_fd)

            for plugin in resource_list:
                for i, elem in enumerate(plugin):
                    # iocage returns - for None
                    plugin[i] = elem if elem != "-" else None

                if remote:
                    pv = self.get_plugin_version(plugin[2])
                else:
                    pv = self.get_local_plugin_version(plugin[1], index_json,
                                                       iocroot)

                resource_list[resource_list.index(plugin)] = plugin + pv

            if remote:
                self.middleware.call_sync('cache.put', 'iocage_remote_plugins',
                                          resource_list, 86400)
            else:
                index_fd.close()
        elif resource == "base":
            try:
                if remote:
                    resource_list = self.middleware.call_sync(
                        'cache.get', 'iocage_remote_releases')

                    return resource_list
            except KeyError:
                pass

            resource_list = iocage.fetch(list=True, remote=remote, http=True)

            if remote:
                self.middleware.call_sync('cache.put',
                                          'iocage_remote_releases',
                                          resource_list, 86400)
        else:
            resource_list = iocage.list(resource)

        return resource_list

    @accepts(Str("action", enum=["START", "STOP", "RESTART"]))
    def rc_action(self, action):
        """Does specified action on rc enabled (boot=on) jails"""
        iocage = ioc.IOCage(rc=True)

        try:
            if action == "START":
                iocage.start()
            elif action == "STOP":
                iocage.stop()
            else:
                iocage.restart()
        except BaseException as e:
            raise CallError(str(e))

        return True

    @accepts(Str('jail'))
    @job(lock=lambda args: f'jail_start:{args[-1]}')
    def start(self, job, jail):
        """Takes a jail and starts it."""
        uuid, _, iocage = self.check_jail_existence(jail)
        status, _ = IOCList.list_get_jid(uuid)

        if not status:
            try:
                iocage.start()
            except BaseException as e:
                raise CallError(str(e))

        return True

    @accepts(Str("jail"), Bool('force', default=False))
    @job(lock=lambda args: f'jail_stop:{args[-1]}')
    def stop(self, job, jail, force):
        """Takes a jail and stops it."""
        uuid, _, iocage = self.check_jail_existence(jail)
        status, _ = IOCList.list_get_jid(uuid)

        if status:
            try:
                iocage.stop(force=force)
            except BaseException as e:
                raise CallError(str(e))

            return True

    @accepts(Str('jail'))
    @job(lock=lambda args: f"jail_restart:{args[-1]}")
    def restart(self, job, jail):
        """Takes a jail and restarts it."""
        uuid, _, iocage = self.check_jail_existence(jail)
        status, _ = IOCList.list_get_jid(uuid)

        if status:
            try:
                iocage.stop()
            except BaseException as e:
                raise CallError(str(e))

        try:
            iocage.start()
        except BaseException as e:
            raise CallError(str(e))

        return True

    @private
    def get_iocroot(self):
        pool = IOCJson().json_get_value("pool")
        return IOCJson(pool).json_get_value("iocroot")

    @accepts(Str("jail"),
             Dict(
                 "options",
                 Str("action",
                     enum=["ADD", "EDIT", "REMOVE", "REPLACE", "LIST"],
                     required=True),
                 Str("source"),
                 Str("destination"),
                 Str("fstype", default='nullfs'),
                 Str("fsoptions", default='ro'),
                 Str("dump", default='0'),
                 Str("pass", default='0'),
                 Int("index", default=None),
             ))
    def fstab(self, jail, options):
        """Adds an fstab mount to the jail"""
        uuid, _, iocage = self.check_jail_existence(jail, skip=False)
        status, jid = IOCList.list_get_jid(uuid)
        action = options['action'].lower()

        if status and action != 'list':
            raise CallError(
                f'{jail} should not be running when adding a mountpoint')

        verrors = ValidationErrors()

        source = options.get('source')
        if source:
            if not os.path.exists(source):
                verrors.add('options.source',
                            'Provided path for source does not exist')

        destination = options.get('destination')
        if destination:
            destination = f'/{destination}' if destination[
                0] != '/' else destination
            dst = f'{self.get_iocroot()}/jails/{jail}/root'
            if dst not in destination:
                destination = f'{dst}{destination}'

            if os.path.exists(destination):
                if not os.path.isdir(destination):
                    verrors.add(
                        'options.destination',
                        'Destination is not a directory, please provide a valid destination'
                    )
                elif os.listdir(destination):
                    verrors.add('options.destination',
                                'Destination directory should be empty')
            else:
                os.makedirs(destination)

        if action != 'list':
            for f in options:
                if not options.get(f) and f not in ('index', ):
                    verrors.add(f'options.{f}', 'This field is required')

        fstype = options.get('fstype')
        fsoptions = options.get('fsoptions')
        dump = options.get('dump')
        _pass = options.get('pass')
        index = options.get('index')

        if action == 'replace' and index is None:
            verrors.add('options.index',
                        'Index must not be None when replacing fstab entry')

        if verrors:
            raise verrors

        try:
            _list = iocage.fstab(action,
                                 source,
                                 destination,
                                 fstype,
                                 fsoptions,
                                 dump,
                                 _pass,
                                 index=index)
        except ioc_exceptions.ValidationFailed as e:
            # CallError uses strings, the exception message may not always be a
            # list.
            if not isinstance(e.message, str) and isinstance(
                    e.message, Iterable):
                e.message = '\n'.join(e.message)

            self.logger.error(f'{e!r}')
            raise CallError(e.message)

        if action == "list":
            split_list = {}
            system_mounts = ('/root/bin', '/root/boot', '/root/lib',
                             '/root/libexec', '/root/rescue', '/root/sbin',
                             '/root/usr/bin', '/root/usr/include',
                             '/root/usr/lib', '/root/usr/libexec',
                             '/root/usr/sbin', '/root/usr/share',
                             '/root/usr/libdata', '/root/usr/lib32')

            for i in _list:
                fstab_entry = i[1].split('\t')
                _fstab_type = 'SYSTEM' if fstab_entry[0].endswith(
                    system_mounts) else 'USER'

                split_list[i[0]] = {'entry': fstab_entry, 'type': _fstab_type}

            return split_list

        return True

    @accepts(Str("pool"))
    def activate(self, pool):
        """Activates a pool for iocage usage, and deactivates the rest."""
        zfs = libzfs.ZFS(history=True, history_prefix="<iocage>")
        pools = zfs.pools
        prop = "org.freebsd.ioc:active"
        activated = False

        for _pool in pools:
            if _pool.name == pool:
                ds = zfs.get_dataset(_pool.name)
                ds.properties[prop] = libzfs.ZFSUserProperty("yes")
                activated = True
            else:
                ds = zfs.get_dataset(_pool.name)
                ds.properties[prop] = libzfs.ZFSUserProperty("no")

        return activated

    @accepts(Str("ds_type", enum=["ALL", "JAIL", "TEMPLATE", "RELEASE"]))
    def clean(self, ds_type):
        """Cleans all iocage datasets of ds_type"""

        if ds_type == "JAIL":
            IOCClean().clean_jails()
        elif ds_type == "ALL":
            IOCClean().clean_all()
        elif ds_type == "TEMPLATE":
            IOCClean().clean_templates()

        return True

    @accepts(Str("jail"), List("command", required=True),
             Dict("options", Str("host_user", default="root"),
                  Str("jail_user")))
    @job(lock=lambda args: f"jail_exec:{args[-1]}")
    def exec(self, job, jail, command, options):
        """Issues a command inside a jail."""
        _, _, iocage = self.check_jail_existence(jail, skip=False)

        host_user = options["host_user"]
        jail_user = options.get("jail_user", None)

        if isinstance(command[0], list):
            # iocage wants a flat list, not a list inside a list
            command = list(itertools.chain.from_iterable(command))

        # We may be getting ';', '&&' and so forth. Adding the shell for
        # safety.
        if len(command) == 1:
            command = ["/bin/sh", "-c"] + command

        host_user = "" if jail_user and host_user == "root" else host_user
        try:
            msg = iocage.exec(command,
                              host_user,
                              jail_user,
                              start_jail=True,
                              msg_return=True)
        except BaseException as e:
            raise CallError(str(e))

        return '\n'.join(msg)

    @accepts(Str("jail"))
    @job(lock=lambda args: f"jail_update:{args[-1]}")
    def update_to_latest_patch(self, job, jail):
        """Updates specified jail to latest patch level."""
        job.set_progress(0, f'Updating {jail}')
        msg_queue = deque(maxlen=10)

        def progress_callback(content, exception):
            msg = content['message'].strip('\n')
            msg_queue.append(msg)
            final_msg = '\n'.join(msg_queue)

            if 'Inspecting system... done' in msg:
                job.set_progress(20)
            elif 'Preparing to download files... done.' in msg:
                job.set_progress(50)
            elif 'Applying patches... done.' in msg:
                job.set_progress(75)
            elif 'Installing updates... done.' in msg:
                job.set_progress(90)
            elif f'{jail} has been updated successfully' in msg:
                job.set_progress(100)

            job.set_progress(None, description=final_msg)

        _, _, iocage = self.check_jail_existence(jail,
                                                 callback=progress_callback)
        iocage.update()

        return True

    @accepts(Str("jail"),
             Dict("options", Str("release", required=False),
                  Bool("plugin", default=False)))
    @job(lock=lambda args: f"jail_upgrade:{args[-1]}")
    def upgrade(self, job, jail, options):
        """Upgrades specified jail to specified RELEASE."""
        verrors = ValidationErrors()
        release = options.get('release', None)
        plugin = options['plugin']

        if release is None and not plugin:
            verrors.add('options.release',
                        'Must not be None if options.plugin is False.')
            raise verrors

        job.set_progress(0, f'Upgrading {jail}')
        msg_queue = deque(maxlen=10)

        def progress_callback(content, exception):
            msg = content['message'].strip('\n')
            msg_queue.append(msg)
            final_msg = '\n'.join(msg_queue)

            if plugin:
                plugin_progress(job, msg)
            else:
                jail_progress(job, msg)

            job.set_progress(None, description=final_msg)

        def plugin_progress(job, msg):
            if 'Snapshotting' in msg:
                job.set_progress(20)
            elif 'Updating plugin INDEX' in msg:
                job.set_progress(40)
            elif 'Running upgrade' in msg:
                job.set_progress(70)
            elif 'Installing plugin packages' in msg:
                job.set_progress(90)
            elif f'{jail} successfully upgraded' in msg:
                job.set_progress(100)

        def jail_progress(job, msg):
            if 'Inspecting system' in msg:
                job.set_progress(20)
            elif 'Preparing to download files' in msg:
                job.set_progress(50)
            elif 'Applying patches' in msg:
                job.set_progress(75)
            elif 'Installing updates' in msg:
                job.set_progress(90)
            elif f'{jail} successfully upgraded' in msg:
                job.set_progress(100)

        _, _, iocage = self.check_jail_existence(jail,
                                                 callback=progress_callback)
        iocage.upgrade(release=release)

        return True

    @accepts(Str("jail"))
    @job(lock=lambda args: f"jail_export:{args[-1]}")
    def export(self, job, jail):
        """Exports jail to zip file"""
        uuid, path, _ = self.check_jail_existence(jail)
        status, jid = IOCList.list_get_jid(uuid)
        started = False

        if status:
            self.stop(jail)
            started = True

        IOCImage().export_jail(uuid, path)

        if started:
            self.start(jail)

        return True

    @accepts(Str("jail"))
    @job(lock=lambda args: f"jail_import:{args[-1]}")
    def _import(self, job, jail):
        """Imports jail from zip file"""

        IOCImage().import_jail(jail)

        return True

    @private
    def get_plugin_version(self, pkg):
        """
        Fetches a list of pkg's from the http://pkg.cdn.trueos.org/iocage/
        repo and returns a list with the pkg version and plugin revision
        """
        try:
            pkg_dict = self.middleware.call_sync('cache.get',
                                                 'iocage_rpkgdict')
            r_plugins = self.middleware.call_sync('cache.get',
                                                  'iocage_rplugins')
        except KeyError:
            branch = self.get_version()
            r_pkgs = requests.get(
                f'http://pkg.cdn.trueos.org/iocage/{branch}/All')
            r_pkgs.raise_for_status()
            pkg_dict = {}
            for i in r_pkgs.iter_lines():
                i = i.decode().split('"')

                try:
                    pkg, version = i[1].rsplit('-', 1)
                    pkg_dict[pkg] = version
                except (ValueError, IndexError):
                    continue  # It's not a pkg
            self.middleware.call_sync('cache.put', 'iocage_rpkgdict', pkg_dict,
                                      86400)

            r_plugins = requests.get(
                'https://raw.githubusercontent.com/freenas/'
                f'iocage-ix-plugins/{branch}/INDEX')
            r_plugins.raise_for_status()

            r_plugins = r_plugins.json()
            self.middleware.call_sync('cache.put', 'iocage_rplugins',
                                      r_plugins, 86400)

        if pkg == 'bru-server':
            return ['N/A', '1']
        elif pkg == 'sickrage':
            return ['Git branch - master', '1']

        try:
            primary_pkg = r_plugins[pkg]['primary_pkg'].split('/', 1)[-1]

            version = pkg_dict[primary_pkg]
            version = [version.rsplit('%2', 1)[0].replace('.txz', ''), '1']
        except KeyError:
            version = ['N/A', 'N/A']

        return version

    @private
    def get_local_plugin_version(self, plugin, index_json, iocroot):
        """
        Checks the primary_pkg key in the INDEX with the pkg version
        inside the jail.
        """
        if index_json is None:
            return ['N/A', 'N/A']

        try:
            base_plugin = plugin.rsplit('_', 1)[0]  # May have multiple
            primary_pkg = index_json[base_plugin]['primary_pkg']
            version = ['N/A', 'N/A']

            # Since these are plugins, we don't want to spin them up just to
            # check a pkg, directly accessing the db is best in this case.
            db_rows = self.read_plugin_pkg_db(
                f'{iocroot}/jails/{plugin}/root/var/db/pkg/local.sqlite',
                primary_pkg)

            for row in db_rows:
                if primary_pkg == row[1] or primary_pkg == row[2]:
                    version = [row[3], '1']
                    break
        except (KeyError, sqlite3.OperationalError):
            version = ['N/A', 'N/A']

        return version

    @private
    def read_plugin_pkg_db(self, db, pkg):
        try:
            conn = sqlite3.connect(db)
        except sqlite3.Error as e:
            raise CallError(e)

        with conn:
            cur = conn.cursor()
            cur.execute(
                f'SELECT * FROM packages WHERE origin="{pkg}" OR name="{pkg}"')

            rows = cur.fetchall()

            return rows

    @private
    def start_on_boot(self):
        self.logger.debug('Starting jails on boot: PENDING')
        ioc.IOCage(rc=True).start()
        self.logger.debug('Starting jails on boot: SUCCESS')

        return True

    @private
    def stop_on_shutdown(self):
        self.logger.debug('Stopping jails on shutdown: PENDING')
        ioc.IOCage(rc=True).stop()
        self.logger.debug('Stopping jails on shutdown: SUCCESS')

        return True

    @private
    async def terminate(self):
        await SHUTDOWN_LOCK.acquire()

    @private
    def get_version(self):
        """
        Uses system.version and parses it out for the RELEASE branch we need
        """
        r = os.uname().release
        version = f'{round(float(r.split("-")[0]), 1)}-RELEASE'

        return version
Пример #29
0
class ChartReleaseService(Service):
    class Config:
        namespace = 'chart.release'

    @accepts(Str('release_name'),
             Dict(
                 'upgrade_options',
                 Bool('update_container_images', default=True),
                 Dict('values', additional_attrs=True),
                 Str('item_version', default='latest'),
             ))
    @job(lock=lambda args: f'chart_release_upgrade_{args[0]}')
    async def upgrade(self, job, release_name, options):
        """
        Upgrade `release_name` chart release.

        `upgrade_options.item_version` specifies to which item version chart release should be upgraded to.

        System will update container images being used by `release_name` chart release. This can be controlled
        right now by `upgrade_options.update_container_images` option but this is deprecated and will be removed
        in the future where system will always update container images in use by a chart release as a chart release
        upgrade is not considered complete until the images in use have also been updated to latest versions.

        During upgrade, `upgrade_options.values` can be specified to apply configuration changes for configuration
        changes for the chart release in question.

        When chart version is upgraded, system will automatically take a snapshot of `ix_volumes` in question
        which can be used to rollback later on.
        """
        await self.middleware.call('kubernetes.validate_k8s_setup')
        release = await self.middleware.call('chart.release.get_instance',
                                             release_name)
        if not release['update_available'] and not release[
                'container_images_update_available']:
            raise CallError('No update is available for chart release')

        # We need to update container images before upgrading chart version as it's possible that the chart version
        # in question needs newer image hashes.
        if options['update_container_images']:
            # TODO: Always do this in the future
            job.set_progress(10, 'Updating container images')
            await (await self.middleware.call(
                'chart.release.pull_container_images', release_name,
                {'redeploy': False})).wait(raise_error=True)
            job.set_progress(30, 'Updated container images')

        job.set_progress(40, 'Created snapshot for upgrade')
        # If a snapshot of the volumes already exist with the same name in case of a failed upgrade, we will remove
        # it as we want the current point in time being reflected in the snapshot
        volumes_ds = os.path.join(release['dataset'], 'volumes/ix_volumes')
        snap_name = f'{volumes_ds}@{release["version"]}'
        if await self.middleware.call('zfs.snapshot.query',
                                      [['id', '=', snap_name]]):
            await self.middleware.call('zfs.snapshot.delete', snap_name,
                                       {'recursive': True})

        await self.middleware.call('zfs.snapshot.create', {
            'dataset': volumes_ds,
            'name': release['version'],
            'recursive': True
        })

        if release['update_available']:
            await self.upgrade_chart_release(job, release, options)
        else:
            await (await
                   self.middleware.call('chart.release.redeploy',
                                        release_name)).wait(raise_error=True)

        chart_release = await self.middleware.call(
            'chart.release.get_instance', release_name)
        self.middleware.send_event('chart.release.query',
                                   'CHANGED',
                                   id=release_name,
                                   fields=chart_release)

        await self.chart_releases_update_checks_internal(
            [['id', '=', release_name]])

        job.set_progress(100, 'Upgrade complete for chart release')

        return chart_release

    @accepts(Str('release_name'),
             Dict('options', Str('item_version', default='latest',
                                 empty=False)))
    def upgrade_summary(self, release_name, options):
        """
        Retrieve upgrade summary for `release_name` which will include which container images will be updated
        and changelog for `options.item_version` chart version specified if applicable. If only container images
        need to be updated, changelog will be `null`.

        If chart release `release_name` does not require an upgrade, an error will be raised.
        """
        release = self.middleware.call_sync('chart.release.query',
                                            [['id', '=', release_name]], {
                                                'extra': {
                                                    'retrieve_resources': True
                                                },
                                                'get': True
                                            })
        if not release['update_available'] and not release[
                'container_images_update_available']:
            raise CallError('No update is available for chart release',
                            errno=errno.ENOENT)

        latest_version = release['human_version']
        changelog = None
        if release['update_available']:
            catalog_item = self.middleware.call_sync(
                'chart.release.get_version', release, options)
            latest_version = catalog_item['human_version']
            changelog = catalog_item['changelog']

        return {
            'container_images_to_update': {
                k: v
                for k, v in release['resources']['container_images'].items()
                if v['update_available']
            },
            'latest_version': latest_version,
            'changelog': changelog,
        }

    @private
    async def get_version(self, release, options):
        catalog = await self.middleware.call(
            'catalog.query',
            [['id', '=', release['catalog']]],
            {'extra': {
                'item_details': True
            }},
        )
        if not catalog:
            raise CallError(f'Unable to locate {release["catalog"]!r} catalog',
                            errno=errno.ENOENT)
        else:
            catalog = catalog[0]

        current_chart = release['chart_metadata']
        chart = current_chart['name']
        if release['catalog_train'] not in catalog['trains']:
            raise CallError(
                f'Unable to locate {release["catalog_train"]!r} catalog train in {release["catalog"]!r}',
                errno=errno.ENOENT,
            )
        if chart not in catalog['trains'][release['catalog_train']]:
            raise CallError(
                f'Unable to locate {chart!r} catalog item in {release["catalog"]!r} '
                f'catalog\'s {release["catalog_train"]!r} train.',
                errno=errno.ENOENT)

        new_version = options['item_version']
        if new_version == 'latest':
            new_version = await self.middleware.call(
                'chart.release.get_latest_version_from_item_versions',
                catalog['trains'][release['catalog_train']][chart]['versions'])

        if new_version not in catalog['trains'][
                release['catalog_train']][chart]['versions']:
            raise CallError(
                f'Unable to locate specified {new_version!r} item version.')

        verrors = ValidationErrors()
        if parse_version(new_version) <= parse_version(
                current_chart['version']):
            verrors.add(
                'upgrade_options.item_version',
                f'Upgrade version must be greater than {current_chart["version"]!r} current version.'
            )

        verrors.check()

        return catalog['trains'][
            release['catalog_train']][chart]['versions'][new_version]

    async def upgrade_chart_release(self, job, release, options):
        release_name = release['name']

        catalog_item = await self.get_version(release, options)
        await self.middleware.call('catalog.version_supported_error_check',
                                   catalog_item)

        config = await self.middleware.call('chart.release.upgrade_values',
                                            release, catalog_item['location'])

        # We will be performing validation for values specified. Why we want to allow user to specify values here
        # is because the upgraded catalog item version might have different schema which potentially means that
        # upgrade won't work or even if new k8s are resources are created/deployed, they won't necessarily function
        # as they should because of changed params or expecting new params
        # One tricky bit which we need to account for first is removing any key from current configured values
        # which the upgraded release will potentially not support. So we can safely remove those as otherwise
        # validation will fail as new schema does not expect those keys.
        config = clean_values_for_upgrade(config,
                                          catalog_item['schema']['questions'])
        config.update(options['values'])

        config, context = await self.middleware.call(
            'chart.release.normalise_and_validate_values',
            catalog_item,
            config,
            False,
            release['dataset'],
        )
        job.set_progress(
            50, 'Initial validation complete for upgrading chart version')

        # We have validated configuration now

        chart_path = os.path.join(release['path'], 'charts',
                                  catalog_item['version'])
        await self.middleware.run_in_thread(shutil.rmtree,
                                            chart_path,
                                            ignore_errors=True)
        await self.middleware.run_in_thread(shutil.copytree,
                                            catalog_item['location'],
                                            chart_path)

        await self.middleware.call('chart.release.perform_actions', context)

        # Let's update context options to reflect that an upgrade is taking place and from which version to which
        # version it's happening.
        # Helm considers simple config change as an upgrade as well, and we have no way of determining the old/new
        # chart versions during helm upgrade in the helm template, hence the requirement for a context object.
        config[CONTEXT_KEY_NAME].update({
            'operation': 'UPGRADE',
            'isUpgrade': True,
            'upgradeMetadata': {
                'oldChartVersion': release['chart_metadata']['version'],
                'newChartVersion': catalog_item['version'],
                'preUpgradeRevision': release['version'],
            }
        })

        job.set_progress(60, 'Upgrading chart release version')

        await self.middleware.call('chart.release.helm_action', release_name,
                                   chart_path, config, 'upgrade')

    @private
    def upgrade_values(self, release, new_version_path):
        config = copy.deepcopy(release['config'])
        chart_version = release['chart_metadata']['version']
        migration_path = os.path.join(new_version_path, 'migrations')
        migration_files = [
            os.path.join(migration_path, k)
            for k in (f'migrate_from_{chart_version}', 'migrate')
        ]
        if not os.path.exists(migration_path) or all(not os.access(p, os.X_OK)
                                                     for p in migration_files):
            return config

        # This is guaranteed to exist based on above check
        file_path = next(f for f in migration_files if os.access(f, os.X_OK))

        with tempfile.NamedTemporaryFile(mode='w+') as f:
            f.write(json.dumps(config))
            f.flush()
            cp = subprocess.Popen([file_path, f.name],
                                  stdout=subprocess.PIPE,
                                  stderr=subprocess.PIPE)
            stdout, stderr = cp.communicate()

        if cp.returncode:
            raise CallError(f'Failed to apply migration: {stderr.decode()}')

        if stdout:
            # We add this as a safety net in case something went wrong with the migration and we get a null response
            # or the chart dev mishandled something - although we don't suppress any exceptions which might be raised
            config = json.loads(stdout.decode())

        return config

    @periodic(interval=86400)
    @private
    async def periodic_chart_releases_update_checks(self):
        sync_job = await self.middleware.call('catalog.sync_all')
        await sync_job.wait()
        if not await self.middleware.call('service.started', 'kubernetes'):
            return

        await self.chart_releases_update_checks_internal()

    @private
    async def chart_releases_update_checks_internal(self,
                                                    chart_releases_filters=None
                                                    ):
        chart_releases_filters = chart_releases_filters or []
        # Chart release wrt alerts will be considered valid for upgrade/update if either there's a newer
        # catalog item version available or any of the images it's using is outdated

        catalog_items = {
            f'{c["id"]}_{train}_{item}': c['trains'][train][item]
            for c in await self.middleware.call(
                'catalog.query', [], {'extra': {
                    'item_details': True
                }}) for train in c['trains'] for item in c['trains'][train]
        }
        for application in await self.middleware.call('chart.release.query',
                                                      chart_releases_filters):
            if application['container_images_update_available']:
                await self.middleware.call('alert.oneshot_create',
                                           'ChartReleaseUpdate', application)
                continue

            app_id = f'{application["catalog"]}_{application["catalog_train"]}_{application["chart_metadata"]["name"]}'
            catalog_item = catalog_items.get(app_id)
            if not catalog_item:
                continue

            await self.chart_release_update_check(catalog_item, application)

        container_config = await self.middleware.call('container.config')
        if container_config['enable_image_updates']:
            asyncio.ensure_future(
                self.middleware.call('container.image.check_update'))

    @private
    async def chart_release_update_check(self, catalog_item, application):
        available_versions = [
            parse_version(version)
            for version, data in catalog_item['versions'].items()
            if data['healthy']
        ]
        if not available_versions:
            return

        available_versions.sort(reverse=True)
        if available_versions[0] > parse_version(
                application['chart_metadata']['version']):
            await self.middleware.call('alert.oneshot_create',
                                       'ChartReleaseUpdate', application)
        else:
            await self.middleware.call('alert.oneshot_delete',
                                       'ChartReleaseUpdate', application['id'])

    @accepts(Str('release_name'),
             Dict(
                 'pull_container_images_options',
                 Bool('redeploy', default=True),
             ))
    @job(lock=lambda args: f'pull_container_images{args[0]}')
    async def pull_container_images(self, job, release_name, options):
        """
        Update container images being used by `release_name` chart release.

        `redeploy` when set will redeploy pods which will result in chart release using newer updated versions of
        the container images.
        """
        await self.middleware.call('kubernetes.validate_k8s_setup')
        images = [
            {
                'orig_tag':
                tag,
                **(await self.middleware.call(
                    'container.image.parse_image_tag', tag))
            } for tag in (await self.middleware.call(
                'chart.release.query', [['id', '=', release_name]], {
                    'extra': {
                        'retrieve_resources': True
                    },
                    'get': True
                }))['resources']['container_images']
        ]
        results = {}

        bulk_job = await self.middleware.call(
            'core.bulk', 'container.image.pull', [[{
                'from_image': f'{image["registry"]}/{image["image"]}',
                'tag': image['tag']
            }] for image in images])
        await bulk_job.wait()
        if bulk_job.error:
            raise CallError(
                f'Failed to update container images for {release_name!r} chart release: {bulk_job.error}'
            )

        for tag, status in zip(images, bulk_job.result):
            if status['error']:
                results[tag[
                    'orig_tag']] = f'Failed to pull image: {status["error"]}'
            else:
                results[tag['orig_tag']] = 'Updated image'

        if options['redeploy']:
            await job.wrap(await self.middleware.call('chart.release.redeploy',
                                                      release_name))

        return results
Пример #30
0
class FilesystemService(Service):
    @accepts(Str('path', required=True), Ref('query-filters'),
             Ref('query-options'))
    def listdir(self, path, filters=None, options=None):
        """
        Get the contents of a directory.

        Each entry of the list consists of:
          name(str): name of the file
          path(str): absolute path of the entry
          realpath(str): absolute real path of the entry (if SYMLINK)
          type(str): DIRECTORY | FILESYSTEM | SYMLINK | OTHER
          size(int): size of the entry
          mode(int): file mode/permission
          uid(int): user id of entry owner
          gid(int): group id of entry onwer
          acl(bool): extended ACL is present on file
        """
        if not os.path.exists(path):
            raise CallError(f'Directory {path} does not exist', errno.ENOENT)

        if not os.path.isdir(path):
            raise CallError(f'Path {path} is not a directory', errno.ENOTDIR)

        rv = []
        for entry in os.scandir(path):
            if entry.is_dir():
                etype = 'DIRECTORY'
            elif entry.is_file():
                etype = 'FILE'
            elif entry.is_symlink():
                etype = 'SYMLINK'
            else:
                etype = 'OTHER'

            data = {
                'name':
                entry.name,
                'path':
                entry.path,
                'realpath':
                os.path.realpath(entry.path)
                if etype == 'SYMLINK' else entry.path,
                'type':
                etype,
            }
            try:
                stat = entry.stat()
                data.update({
                    'size':
                    stat.st_size,
                    'mode':
                    stat.st_mode,
                    'acl':
                    False if self.acl_is_trivial(data["realpath"]) else True,
                    'uid':
                    stat.st_uid,
                    'gid':
                    stat.st_gid,
                })
            except FileNotFoundError:
                data.update({
                    'size': None,
                    'mode': None,
                    'acl': None,
                    'uid': None,
                    'gid': None
                })
            rv.append(data)
        return filter_list(rv, filters=filters or [], options=options or {})

    @accepts(Str('path'))
    def stat(self, path):
        """
        Return the filesystem stat(2) for a given `path`.
        """
        try:
            stat = os.stat(path, follow_symlinks=False)
        except FileNotFoundError:
            raise CallError(f'Path {path} not found', errno.ENOENT)

        stat = {
            'size': stat.st_size,
            'mode': stat.st_mode,
            'uid': stat.st_uid,
            'gid': stat.st_gid,
            'atime': stat.st_atime,
            'mtime': stat.st_mtime,
            'ctime': stat.st_ctime,
            'dev': stat.st_dev,
            'inode': stat.st_ino,
            'nlink': stat.st_nlink,
        }

        try:
            stat['user'] = pwd.getpwuid(stat['uid']).pw_name
        except KeyError:
            stat['user'] = None

        try:
            stat['group'] = grp.getgrgid(stat['gid']).gr_name
        except KeyError:
            stat['group'] = None

        stat['acl'] = False if self.middleware.call_sync(
            'filesystem.acl_is_trivial', path) else True

        return stat

    @private
    @accepts(
        Str('path'),
        Str('content', max_length=512000),
        Dict(
            'options',
            Bool('append', default=False),
            Int('mode'),
        ),
    )
    def file_receive(self, path, content, options=None):
        """
        Simplified file receiving method for small files.

        `content` must be a base 64 encoded file content.
        """
        options = options or {}
        dirname = os.path.dirname(path)
        if not os.path.exists(dirname):
            os.makedirs(dirname)
        if options.get('append'):
            openmode = 'ab'
        else:
            openmode = 'wb+'
        with open(path, openmode) as f:
            f.write(binascii.a2b_base64(content))
        mode = options.get('mode')
        if mode:
            os.chmod(path, mode)
        return True

    @private
    @accepts(
        Str('path'),
        Dict(
            'options',
            Int('offset'),
            Int('maxlen'),
        ),
    )
    def file_get_contents(self, path, options=None):
        """
        Get contents of a file `path` in base64 encode.

        DISCLAIMER: DO NOT USE THIS FOR BIG FILES (> 500KB).
        """
        options = options or {}
        if not os.path.exists(path):
            return None
        with open(path, 'rb') as f:
            if options.get('offset'):
                f.seek(options['offset'])
            data = binascii.b2a_base64(f.read(
                options.get('maxlen'))).decode().strip()
        return data

    @accepts(Str('path'))
    @job(pipes=["output"])
    async def get(self, job, path):
        """
        Job to get contents of `path`.
        """

        if not os.path.isfile(path):
            raise CallError(f'{path} is not a file')

        with open(path, 'rb') as f:
            await self.middleware.run_in_thread(shutil.copyfileobj, f,
                                                job.pipes.output.w)

    @accepts(
        Str('path'),
        Dict(
            'options',
            Bool('append', default=False),
            Int('mode'),
        ),
    )
    @job(pipes=["input"])
    async def put(self, job, path, options=None):
        """
        Job to put contents to `path`.
        """
        options = options or {}
        dirname = os.path.dirname(path)
        if not os.path.exists(dirname):
            os.makedirs(dirname)
        if options.get('append'):
            openmode = 'ab'
        else:
            openmode = 'wb+'

        with open(path, openmode) as f:
            await self.middleware.run_in_thread(shutil.copyfileobj,
                                                job.pipes.input.r, f)

        mode = options.get('mode')
        if mode:
            os.chmod(path, mode)
        return True

    @accepts(Str('path'))
    def statfs(self, path):
        """
        Return stats from the filesystem of a given path.

        Raises:
            CallError(ENOENT) - Path not found
        """
        try:
            statfs = bsd.statfs(path)
        except FileNotFoundError:
            raise CallError('Path not found.', errno.ENOENT)
        return {
            **statfs.__getstate__(),
            'total_bytes': statfs.total_blocks * statfs.blocksize,
            'free_bytes': statfs.free_blocks * statfs.blocksize,
            'avail_bytes': statfs.avail_blocks * statfs.blocksize,
        }

    def __convert_to_basic_permset(self, permset):
        """
        Convert "advanced" ACL permset format to basic format using
        bitwise operation and constants defined in py-bsd/bsd/acl.pyx,
        py-bsd/defs.pxd and acl.h.

        If the advanced ACL can't be converted without losing
        information, we return 'OTHER'.

        Reverse process converts the constant's value to a dictionary
        using a bitwise operation.
        """
        perm = 0
        for k, v, in permset.items():
            if v:
                perm |= acl.NFS4Perm[k]

        try:
            SimplePerm = (acl.NFS4BasicPermset(perm)).name
        except Exception:
            SimplePerm = 'OTHER'

        return SimplePerm

    def __convert_to_basic_flagset(self, flagset):
        flags = 0
        for k, v, in flagset.items():
            if k == "INHERITED":
                continue
            if v:
                flags |= acl.NFS4Flag[k]

        try:
            SimpleFlag = (acl.NFS4BasicFlagset(flags)).name
        except Exception:
            SimpleFlag = 'OTHER'

        return SimpleFlag

    def __convert_to_adv_permset(self, basic_perm):
        permset = {}
        perm_mask = acl.NFS4BasicPermset[basic_perm].value
        for name, member in acl.NFS4Perm.__members__.items():
            if perm_mask & member.value:
                permset.update({name: True})
            else:
                permset.update({name: False})

        return permset

    def __convert_to_adv_flagset(self, basic_flag):
        flagset = {}
        flag_mask = acl.NFS4BasicFlagset[basic_flag].value
        for name, member in acl.NFS4Flag.__members__.items():
            if flag_mask & member.value:
                flagset.update({name: True})
            else:
                flagset.update({name: False})

        return flagset

    def _winacl(self, path, action, uid, gid, options):
        chroot_dir = os.path.dirname(path)
        target = os.path.basename(path)
        winacl = subprocess.run([
            '/usr/local/bin/winacl', '-a', action, '-O',
            str(uid), '-G',
            str(gid), '-rx' if options['traverse'] else '-r', '-c', chroot_dir,
            '-p', target
        ],
                                check=False,
                                capture_output=True)
        if winacl.returncode != 0:
            CallError(
                f"Winacl {action} on path {path} failed with error: [{winacl.stderr.decode().strip()}]"
            )

    def _common_perm_path_validate(self, path):
        if not os.path.exists(path):
            raise CallError(f"Path not found: {path}", errno.ENOENT)

        if not os.path.realpath(path).startswith('/mnt/'):
            raise CallError(
                f"Changing permissions on paths outside of /mnt is not permitted: {path}",
                errno.EPERM)

        if os.path.realpath(path) in [
                x['path'] for x in self.middleware.call_sync('pool.query')
        ]:
            raise CallError(
                f"Changing permissions of root level dataset is not permitted: {path}",
                errno.EPERM)

    @accepts(Str('path'))
    def acl_is_trivial(self, path):
        """
        Returns True if the ACL can be fully expressed as a file mode without losing
        any access rules, or if the path does not support NFSv4 ACLs (for example
        a path on a tmpfs filesystem).
        """
        if not os.path.exists(path):
            raise CallError('Path not found.', errno.ENOENT)

        has_nfs4_acl_support = os.pathconf(path, 64)
        if not has_nfs4_acl_support:
            return True

        return acl.ACL(file=path).is_trivial

    @accepts(
        Dict(
            'filesystem_ownership', Str('path', required=True),
            Int('uid', null=True, default=None),
            Int('gid', null=True, default=None),
            Dict('options', Bool('recursive', default=False),
                 Bool('traverse', default=False))))
    @job(lock="perm_change")
    def chown(self, job, data):
        """
        Change owner or group of file at `path`.

        `uid` and `gid` specify new owner of the file. If either
        key is absent or None, then existing value on the file is not
        changed.

        `recursive` performs action recursively, but does
        not traverse filesystem mount points.

        If `traverse` and `recursive` are specified, then the chown
        operation will traverse filesystem mount points.
        """
        job.set_progress(0, 'Preparing to change owner.')

        self._common_perm_path_validate(data['path'])

        uid = -1 if data['uid'] is None else data['uid']
        gid = -1 if data['gid'] is None else data['gid']
        options = data['options']

        if not options['recursive']:
            job.set_progress(100, 'Finished changing owner.')
            os.chown(data['path'], uid, gid)
        else:
            job.set_progress(10,
                             f'Recursively changing owner of {data["path"]}.')
            self._winacl(data['path'], 'chown', uid, gid, options)
            job.set_progress(100, 'Finished changing owner.')

    @accepts(
        Dict(
            'filesystem_permission', Str('path', required=True),
            UnixPerm('mode', null=True), Int('uid', null=True, default=None),
            Int('gid', null=True, default=None),
            Dict(
                'options',
                Bool('stripacl', default=False),
                Bool('recursive', default=False),
                Bool('traverse', default=False),
            )))
    @job(lock="perm_change")
    def setperm(self, job, data):
        """
        Remove extended ACL from specified path.

        If `mode` is specified then the mode will be applied to the
        path and files and subdirectories depending on which `options` are
        selected. Mode should be formatted as string representation of octal
        permissions bits.

        `uid` the desired UID of the file user. If set to None (the default), then user is not changed.

        `gid` the desired GID of the file group. If set to None (the default), then group is not changed.

        `stripacl` setperm will fail if an extended ACL is present on `path`,
        unless `stripacl` is set to True.

        `recursive` remove ACLs recursively, but do not traverse dataset
        boundaries.

        `traverse` remove ACLs from child datasets.

        If no `mode` is set, and `stripacl` is True, then non-trivial ACLs
        will be converted to trivial ACLs. An ACL is trivial if it can be
        expressed as a file mode without losing any access rules.

        """
        job.set_progress(0, 'Preparing to set permissions.')
        options = data['options']
        mode = data.get('mode', None)

        uid = -1 if data['uid'] is None else data['uid']
        gid = -1 if data['gid'] is None else data['gid']

        self._common_perm_path_validate(data['path'])

        acl_is_trivial = self.middleware.call_sync('filesystem.acl_is_trivial',
                                                   data['path'])
        if not acl_is_trivial and not options['stripacl']:
            raise CallError(
                f'Non-trivial ACL present on [{data["path"]}]. Option "stripacl" required to change permission.',
                errno.EINVAL)

        if mode is not None:
            mode = int(mode, 8)

        a = acl.ACL(file=data['path'])
        a.strip()
        a.apply(data['path'])

        if mode:
            os.chmod(data['path'], mode)

        if uid or gid:
            os.chown(data['path'], uid, gid)

        if not options['recursive']:
            job.set_progress(100, 'Finished setting permissions.')
            return

        action = 'clone' if mode else 'strip'
        job.set_progress(
            10, f'Recursively setting permissions on {data["path"]}.')
        self._winacl(data['path'], action, uid, gid, options)
        job.set_progress(100, 'Finished setting permissions.')

    @accepts()
    async def default_acl_choices(self):
        """
        Get list of default ACL types.
        """
        acl_choices = []
        for x in ACLDefault:
            if x.value['visible']:
                acl_choices.append(x.name)

        return acl_choices

    @accepts(Str('acl_type', default='OPEN',
                 enum=[x.name for x in ACLDefault]))
    async def get_default_acl(self, acl_type):
        """
        Returns a default ACL depending on the usage specified by `acl_type`.
        If an admin group is defined, then an entry granting it full control will
        be placed at the top of the ACL.
        """
        acl = []
        admin_group = (await self.middleware.call('smb.config'))['admin_group']
        if acl_type == 'HOME' and (await self.middleware.call(
                'activedirectory.get_state')) == 'HEALTHY':
            acl_type = 'DOMAIN_HOME'
        if admin_group:
            acl.append({
                'tag':
                'GROUP',
                'id': (await self.middleware.call('dscache.get_uncached_group',
                                                  admin_group))['gr_gid'],
                'perms': {
                    'BASIC': 'FULL_CONTROL'
                },
                'flags': {
                    'BASIC': 'INHERIT'
                },
                'type':
                'ALLOW'
            })
        acl.extend((ACLDefault[acl_type].value)['acl'])

        return acl

    def _is_inheritable(self, flags):
        """
        Takes ACE flags and return True if any inheritance bits are set.
        """
        inheritance_flags = [
            'FILE_INHERIT', 'DIRECTORY_INHERIT', 'NO_PROPAGATE_INHERIT',
            'INHERIT_ONLY'
        ]
        for i in inheritance_flags:
            if flags.get(i):
                return True

        return False

    @private
    def canonicalize_acl_order(self, acl):
        """
        Convert flags to advanced, then separate the ACL into two lists. One for ACEs that have been inherited,
        one for aces that have not been inherited. Non-inherited ACEs take precedence
        and so they are placed first in finalized combined list. Within each list, the
        ACEs are orderd according to the following:

        1) Deny ACEs that apply to the object itself (NOINHERIT)

        2) Deny ACEs that apply to a subobject of the object (INHERIT)

        3) Allow ACEs that apply to the object itself (NOINHERIT)

        4) Allow ACEs that apply to a subobject of the object (INHERIT)

        See http://docs.microsoft.com/en-us/windows/desktop/secauthz/order-of-aces-in-a-dacl

        The "INHERITED" bit is stripped in filesystem.getacl when generating a BASIC flag type.
        It is best practice to use a non-simplified ACL for canonicalization.
        """
        inherited_aces = []
        final_acl = []
        non_inherited_aces = []
        for entry in acl:
            entry['flags'] = self.__convert_to_adv_flagset(
                entry['flags']
                ['BASIC']) if 'BASIC' in entry['flags'] else entry['flags']
            if entry['flags'].get('INHERITED'):
                inherited_aces.append(entry)
            else:
                non_inherited_aces.append(entry)

        if inherited_aces:
            inherited_aces = sorted(
                inherited_aces,
                key=lambda x:
                (x['type'] == 'ALLOW', self._is_inheritable(x['flags'])),
            )
        if non_inherited_aces:
            non_inherited_aces = sorted(
                non_inherited_aces,
                key=lambda x:
                (x['type'] == 'ALLOW', self._is_inheritable(x['flags'])),
            )
        final_acl = non_inherited_aces + inherited_aces
        return final_acl

    @accepts(
        Str('path'),
        Bool('simplified', default=True),
    )
    def getacl(self, path, simplified=True):
        """
        Return ACL of a given path.

        Simplified returns a shortened form of the ACL permset and flags

        `TRAVERSE` sufficient rights to traverse a directory, but not read contents.

        `READ` sufficient rights to traverse a directory, and read file contents.

        `MODIFIY` sufficient rights to traverse, read, write, and modify a file. Equivalent to modify_set.

        `FULL_CONTROL` all permissions.

        If the permisssions do not fit within one of the pre-defined simplified permissions types, then
        the full ACL entry will be returned.

        In all cases we replace USER_OBJ, GROUP_OBJ, and EVERYONE with owner@, group@, everyone@ for
        consistency with getfacl and setfacl. If one of aforementioned special tags is used, 'id' must
        be set to None.

        An inheriting empty everyone@ ACE is appended to non-trivial ACLs in order to enforce Windows
        expectations regarding permissions inheritance. This entry is removed from NT ACL returned
        to SMB clients when 'ixnas' samba VFS module is enabled. We also remove it here to avoid confusion.
        """
        if not os.path.exists(path):
            raise CallError('Path not found.', errno.ENOENT)

        stat = os.stat(path)

        a = acl.ACL(file=path)
        fs_acl = a.__getstate__()

        if not simplified:
            advanced_acl = []
            for entry in fs_acl:
                ace = {
                    'tag': (acl.ACLWho[entry['tag']]).value,
                    'id': entry['id'],
                    'type': entry['type'],
                    'perms': entry['perms'],
                    'flags': entry['flags'],
                }
                if ace['tag'] == 'everyone@' and self.__convert_to_basic_permset(
                        ace['perms']) == 'NOPERMS':
                    continue

                advanced_acl.append(ace)

            return {
                'uid': stat.st_uid,
                'gid': stat.st_gid,
                'acl': advanced_acl
            }

        if simplified:
            simple_acl = []
            for entry in fs_acl:
                ace = {
                    'tag': (acl.ACLWho[entry['tag']]).value,
                    'id': entry['id'],
                    'type': entry['type'],
                    'perms': {
                        'BASIC':
                        self.__convert_to_basic_permset(entry['perms'])
                    },
                    'flags': {
                        'BASIC':
                        self.__convert_to_basic_flagset(entry['flags'])
                    },
                }
                if ace['tag'] == 'everyone@' and ace['perms'][
                        'BASIC'] == 'NOPERMS':
                    continue

                for key in ['perms', 'flags']:
                    if ace[key]['BASIC'] == 'OTHER':
                        ace[key] = entry[key]

                simple_acl.append(ace)

            return {'uid': stat.st_uid, 'gid': stat.st_gid, 'acl': simple_acl}

    @accepts(
        Dict(
            'filesystem_acl', Str('path', required=True),
            Int('uid', null=True, default=None),
            Int('gid', null=True, default=None),
            List('dacl',
                 items=[
                     Dict(
                         'aclentry',
                         Str('tag',
                             enum=[
                                 'owner@', 'group@', 'everyone@', 'USER',
                                 'GROUP'
                             ]),
                         Int('id', null=True),
                         Str('type', enum=['ALLOW', 'DENY']),
                         Dict(
                             'perms',
                             Bool('READ_DATA'),
                             Bool('WRITE_DATA'),
                             Bool('APPEND_DATA'),
                             Bool('READ_NAMED_ATTRS'),
                             Bool('WRITE_NAMED_ATTRS'),
                             Bool('EXECUTE'),
                             Bool('DELETE_CHILD'),
                             Bool('READ_ATTRIBUTES'),
                             Bool('WRITE_ATTRIBUTES'),
                             Bool('DELETE'),
                             Bool('READ_ACL'),
                             Bool('WRITE_ACL'),
                             Bool('WRITE_OWNER'),
                             Bool('SYNCHRONIZE'),
                             Str('BASIC',
                                 enum=[
                                     'FULL_CONTROL', 'MODIFY', 'READ',
                                     'TRAVERSE'
                                 ]),
                         ),
                         Dict(
                             'flags',
                             Bool('FILE_INHERIT'),
                             Bool('DIRECTORY_INHERIT'),
                             Bool('NO_PROPAGATE_INHERIT'),
                             Bool('INHERIT_ONLY'),
                             Bool('INHERITED'),
                             Str('BASIC', enum=['INHERIT', 'NOINHERIT']),
                         ),
                     )
                 ],
                 default=[]),
            Dict('options', Bool('stripacl', default=False),
                 Bool('recursive', default=False),
                 Bool('traverse', default=False),
                 Bool('canonicalize', default=True))))
    @job(lock="perm_change")
    def setacl(self, job, data):
        """
        Set ACL of a given path. Takes the following parameters:
        `path` full path to directory or file.

        `dacl` "simplified" ACL here or a full ACL.

        `uid` the desired UID of the file user. If set to None (the default), then user is not changed.

        `gid` the desired GID of the file group. If set to None (the default), then group is not changed.

        `recursive` apply the ACL recursively

        `traverse` traverse filestem boundaries (ZFS datasets)

        `strip` convert ACL to trivial. ACL is trivial if it can be expressed as a file mode without
        losing any access rules.

        `canonicalize` reorder ACL entries so that they are in concanical form as described
        in the Microsoft documentation MS-DTYP 2.4.5 (ACL)

        In all cases we replace USER_OBJ, GROUP_OBJ, and EVERYONE with owner@, group@, everyone@ for
        consistency with getfacl and setfacl. If one of aforementioned special tags is used, 'id' must
        be set to None.

        An inheriting empty everyone@ ACE is appended to non-trivial ACLs in order to enforce Windows
        expectations regarding permissions inheritance. This entry is removed from NT ACL returned
        to SMB clients when 'ixnas' samba VFS module is enabled.
        """
        job.set_progress(0, 'Preparing to set acl.')
        options = data['options']
        dacl = data.get('dacl', [])

        self._common_perm_path_validate(data['path'])

        if dacl and options['stripacl']:
            raise CallError(
                'Setting ACL and stripping ACL are not permitted simultaneously.',
                errno.EINVAL)

        uid = -1 if data.get('uid', None) is None else data['uid']
        gid = -1 if data.get('gid', None) is None else data['gid']
        if options['stripacl']:
            a = acl.ACL(file=data['path'])
            a.strip()
            a.apply(data['path'])
        else:
            cleaned_acl = []
            lockace_is_present = False
            for entry in dacl:
                ace = {
                    'tag': (acl.ACLWho(entry['tag'])).name,
                    'id':
                    entry['id'],
                    'type':
                    entry['type'],
                    'perms':
                    self.__convert_to_adv_permset(entry['perms']['BASIC'])
                    if 'BASIC' in entry['perms'] else entry['perms'],
                    'flags':
                    self.__convert_to_adv_flagset(entry['flags']['BASIC'])
                    if 'BASIC' in entry['flags'] else entry['flags'],
                }
                if ace['flags']['INHERIT_ONLY'] and not ace['flags'].get(
                        'DIRECTORY_INHERIT', False) and not ace['flags'].get(
                            'FILE_INHERIT', False):
                    raise CallError(
                        'Invalid flag combination. DIRECTORY_INHERIT or FILE_INHERIT must be set if INHERIT_ONLY is set.',
                        errno.EINVAL)
                if ace['tag'] == 'EVERYONE' and self.__convert_to_basic_permset(
                        ace['perms']) == 'NOPERMS':
                    lockace_is_present = True
                cleaned_acl.append(ace)
            if options['canonicalize']:
                cleaned_acl = self.canonicalize_acl_order(cleaned_acl)

            if not lockace_is_present:
                locking_ace = {
                    'tag': 'EVERYONE',
                    'id': None,
                    'type': 'ALLOW',
                    'perms': self.__convert_to_adv_permset('NOPERMS'),
                    'flags': self.__convert_to_adv_flagset('INHERIT')
                }
                cleaned_acl.append(locking_ace)

            a = acl.ACL()
            a.__setstate__(cleaned_acl)
            a.apply(data['path'])

        if not options['recursive']:
            os.chown(data['path'], uid, gid)
            job.set_progress(100, 'Finished setting ACL.')
            return

        job.set_progress(10, f'Recursively setting ACL on {data["path"]}.')
        self._winacl(data['path'], 'clone', uid, gid, options)
        job.set_progress(100, 'Finished setting ACL.')