Exemplo n.º 1
0
class CatalogService(CRUDService):

    class Config:
        datastore = 'services.catalog'
        datastore_extend = 'catalog.catalog_extend'
        datastore_extend_context = 'catalog.catalog_extend_context'
        datastore_primary_key = 'label'
        datastore_primary_key_type = 'string'
        cli_namespace = 'app.catalog'

    ENTRY = Dict(
        'catalog_entry',
        Str('label', required=True, validators=[Match(r'^\w+[\w.-]*$')], max_length=60),
        Str('repository', required=True, empty=False),
        Str('branch', required=True, empty=False),
        Str('location', required=True),
        Str('id', required=True),
        List('preferred_trains'),
        Dict('trains', additional_attrs=True),
        Bool('healthy'),
        Bool('error'),
        Bool('builtin'),
        Bool('cached'),
        Dict(
            'caching_progress',
            Str('description', null=True),
            Any('extra', null=True),
            Float('percent', null=True),
            null=True,
        ),
        Dict('caching_job', null=True, additional_attrs=True),
    )

    @private
    async def catalog_extend_context(self, rows, extra):
        k8s_dataset = (await self.middleware.call('kubernetes.config'))['dataset']
        catalogs_ds = await self.middleware.call(
            'zfs.dataset.query', [['id', '=', os.path.join(k8s_dataset, 'catalogs')]], {
                'extra': {'properties': ['encryption', 'keystatus', 'mountpoint', 'mounted']}
            }
        ) if k8s_dataset else []
        if k8s_dataset and catalogs_ds and (
            catalogs_ds[0]['properties']['mounted']['parsed'] and (
                (catalogs_ds[0]['encrypted'] and catalogs_ds[0]['key_loaded']) or not catalogs_ds[0]['encrypted']
            )
        ):
            catalogs_dir = catalogs_ds[0]['properties']['mountpoint']['parsed']
        else:
            catalogs_dir = os.path.join(TMP_IX_APPS_DIR, 'catalogs')

        context = {
            'catalogs_dir': catalogs_dir,
            'extra': extra or {},
            'catalogs_context': {},
        }
        if extra.get('item_details'):
            item_sync_params = await self.middleware.call('catalog.sync_items_params')
            item_jobs = await self.middleware.call(
                'core.get_jobs', [['method', '=', 'catalog.items'], ['state', '=', 'RUNNING']]
            )
            for row in rows:
                label = row['label']
                catalog_info = {
                    'item_job': await self.middleware.call('catalog.items', label, {
                        'cache': True,
                        'cache_only': await self.official_catalog_label() != row['label'],
                        'retrieve_all_trains': extra.get('retrieve_all_trains', True),
                        'trains': extra.get('trains', []),
                    }),
                    'cached': label == OFFICIAL_LABEL or await self.middleware.call('catalog.cached', label),
                    'normalized_progress': None,
                }
                if not catalog_info['cached']:
                    caching_job = filter_list(item_jobs, [['arguments', '=', [row['label'], item_sync_params]]])
                    if caching_job:
                        # We will almost certainly always have this except for the case when middleware starts
                        # it is guaranteed that we will eventually have this anyways as catalog.sync_all is called
                        # periodically. So let's not trigger a new redundant job for this
                        caching_job = caching_job[0]
                    else:
                        caching_job = None

                    catalog_info['normalized_progress'] = {
                        'caching_job': caching_job,
                        'caching_progress': caching_job['progress'] if caching_job else None,
                    }
                context['catalogs_context'][label] = catalog_info

        return context

    @private
    async def normalize_data_from_item_job(self, label, catalog_context):
        normalized = {
            'trains': {},
            'cached': catalog_context['cached'],
            'healthy': False,
            'error': True,
            'caching_progress': None,
            'caching_job': None,
        }
        item_job = catalog_context['item_job']
        await item_job.wait()
        if not item_job.error:
            normalized.update({
                'trains': item_job.result,
                'healthy': all(
                    app['healthy'] for train in item_job.result for app in item_job.result[train].values()
                ),
                'cached': label == OFFICIAL_LABEL or await self.middleware.call('catalog.cached', label),
                'error': False,
                'caching_progress': None,
                'caching_job': None,
            })
        return normalized

    @private
    async def catalog_extend(self, catalog, context):
        catalog.update({
            'location': os.path.join(
                context['catalogs_dir'], convert_repository_to_path(catalog['repository'], catalog['branch'])
            ),
            'id': catalog['label'],
        })
        extra = context['extra']
        if extra.get('item_details'):
            catalog_context = context['catalogs_context'][catalog['label']]
            catalog.update(await self.normalize_data_from_item_job(catalog['id'], catalog_context))
            if catalog['cached']:
                return catalog
            else:
                catalog.update(catalog_context['normalized_progress'])
        return catalog

    @private
    async def common_validation(self, catalog, schema, data):
        found_trains = set(catalog['trains'])
        diff = set(data['preferred_trains']) - found_trains
        verrors = ValidationErrors()
        if diff:
            verrors.add(
                f'{schema}.preferred_trains',
                f'{", ".join(diff)} trains were not found in catalog.'
            )
        if not data['preferred_trains']:
            verrors.add(
                f'{schema}.preferred_trains',
                'At least 1 preferred train must be specified for a catalog.'
            )

        verrors.check()

    @accepts(
        Patch(
            'catalog_entry',
            'catalog_create',
            ('add', Bool('force', default=False)),
            ('rm', {'name': 'id'}),
            ('rm', {'name': 'trains'}),
            ('rm', {'name': 'healthy'}),
            ('rm', {'name': 'error'}),
            ('rm', {'name': 'builtin'}),
            ('rm', {'name': 'location'}),
            ('rm', {'name': 'cached'}),
            ('rm', {'name': 'caching_progress'}),
            ('rm', {'name': 'caching_job'}),
        ),
    )
    @job(lock=lambda args: f'catalog_create_{args[0]["label"]}')
    async def do_create(self, job, data):
        """
        `catalog_create.preferred_trains` specifies trains which will be displayed in the UI directly for a user.
        """
        verrors = ValidationErrors()
        # We normalize the label
        data['label'] = data['label'].upper()

        if await self.query([['id', '=', data['label']]]):
            verrors.add('catalog_create.label', 'A catalog with specified label already exists', errno=errno.EEXIST)

        if await self.query([['repository', '=', data['repository']], ['branch', '=', data['branch']]]):
            for k in ('repository', 'branch'):
                verrors.add(
                    f'catalog_create.{k}', 'A catalog with same repository/branch already exists', errno=errno.EEXIST
                )

        verrors.check()

        if not data['preferred_trains']:
            data['preferred_trains'] = ['stable']

        if not data.pop('force'):
            job.set_progress(40, f'Validating {data["label"]!r} catalog')
            # We will validate the catalog now to ensure it's valid wrt contents / format
            path = os.path.join(
                TMP_IX_APPS_DIR, 'validate_catalogs', convert_repository_to_path(data['repository'], data['branch'])
            )
            try:
                await self.middleware.call('catalog.update_git_repository', {**data, 'location': path})
                await self.middleware.call('catalog.validate_catalog_from_path', path)
                await self.common_validation(
                    {'trains': await self.middleware.call('catalog.retrieve_train_names', path)}, 'catalog_create', data
                )
            except ValidationErrors as ve:
                verrors.extend(ve)
            except CallError as e:
                verrors.add('catalog_create.label', f'Failed to validate catalog: {e}')
            finally:
                await self.middleware.run_in_thread(shutil.rmtree, path, ignore_errors=True)
        else:
            job.set_progress(50, 'Skipping validation of catalog')

        verrors.check()

        job.set_progress(60, 'Completed Validation')

        await self.middleware.call('datastore.insert', self._config.datastore, data)
        job.set_progress(70, f'Successfully added {data["label"]!r} catalog')

        job.set_progress(80, f'Syncing {data["label"]} catalog')
        sync_job = await self.middleware.call('catalog.sync', data['label'])
        await sync_job.wait()
        if sync_job.error:
            raise CallError(f'Catalog was added successfully but failed to sync: {sync_job.error}')

        job.set_progress(100, f'Successfully synced {data["label"]!r} catalog')

        return await self.get_instance(data['label'])

    @accepts(
        Str('id'),
        Dict(
            'catalog_update',
            List('preferred_trains'),
            update=True
        )
    )
    async def do_update(self, id, data):
        catalog = await self.query([['id', '=', id]], {'extra': {'item_details': True}, 'get': True})
        await self.common_validation(catalog, 'catalog_update', data)

        await self.middleware.call('datastore.update', self._config.datastore, id, data)

        return await self.get_instance(id)

    def do_delete(self, id):
        catalog = self.middleware.call_sync('catalog.get_instance', id)
        if catalog['builtin']:
            raise CallError('Builtin catalogs cannot be deleted')

        ret = self.middleware.call_sync('datastore.delete', self._config.datastore, id)

        if os.path.exists(catalog['location']):
            shutil.rmtree(catalog['location'], ignore_errors=True)

        # Let's delete any unhealthy alert if we had one
        self.middleware.call_sync('alert.oneshot_delete', 'CatalogNotHealthy', id)
        self.middleware.call_sync('alert.oneshot_delete', 'CatalogSyncFailed', id)

        # Remove cached content of the catalog in question so that if a catalog is created again
        # with same label but different repo/branch, we don't reuse old cache
        self.middleware.call_sync('cache.pop', get_cache_key(id))

        return ret

    @private
    async def official_catalog_label(self):
        return OFFICIAL_LABEL
Exemplo n.º 2
0
class JailService(Service):
    def __init__(self, *args):
        super(JailService, self).__init__(*args)

    @private
    def check_dataset_existence(self):
        from iocage.lib.ioc_check import IOCCheck

        IOCCheck()

    @private
    def check_jail_existence(self, jail):
        self.check_dataset_existence()

        jails, paths = IOCList("uuid").list_datasets()
        _jail = {
            tag: uuid
            for (tag, uuid) in jails.items()
            if uuid.startswith(jail) or tag == jail
        }

        if len(_jail) == 1:
            tag, uuid = next(iter(_jail.items()))
            path = paths[tag]

            return tag, uuid, path
        elif len(_jail) > 1:
            raise RuntimeError("Multiple jails found for {}:".format(jail))
        else:
            raise RuntimeError("{} not found!".format(jail))

    @accepts(Str("lst_type", enum=["ALL", "RELEASE", "BASE", "TEMPLATE"]),
             Dict(
                 "options",
                 Bool("full"),
                 Bool("header"),
             ))
    def list(self, lst_type, options=None):
        """Lists either 'all', 'base', 'template'"""
        self.check_dataset_existence()

        lst_type = lst_type.lower()

        if options is None:
            options = {}

        if lst_type == "release":
            lst_type = "base"

        full = options.get("full", True)
        hdr = options.get("header", False)

        if lst_type == "plugins":
            from iocage.lib.ioc_fetch import IOCFetch

            _list = IOCFetch("").fetch_plugin_index("", _list=True)
        else:
            _list = IOCList(lst_type, hdr, full).list_datasets()

        return _list

    @accepts(Str("jail"), Dict(
        "options",
        Str("prop"),
        Bool("plugin"),
    ))
    def set(self, jail, options):
        """Sets a jail property."""
        prop = options["prop"]
        plugin = options["plugin"]

        tag, uuid, path = self.check_jail_existence(jail)

        if "template" in prop.split("=")[0]:
            if "template" in path and prop != "template=no":
                raise RuntimeError(f"{uuid} ({tag}) is already a template!")
            elif "template" not in path and prop != "template=yes":
                raise RuntimeError(f"{uuid} ({tag}) is already a jail!")

        if plugin:
            _prop = prop.split(".")

            return IOCJson(path, cli=True).json_plugin_set_value(_prop)

        IOCJson(path, cli=True).json_set_value(prop)

        return True

    @accepts(Str("jail"), Dict(
        "options",
        Str("prop"),
        Bool("plugin"),
    ))
    def get(self, jail, options):
        """Gets a jail property."""
        prop = options["prop"]
        plugin = options["plugin"]

        tag, uuid, path = self.check_jail_existence(jail)

        if "template" in prop.split("=")[0]:
            if "template" in path and prop != "template=no":
                raise RuntimeError(f"{uuid} ({tag}) is already a template!")
            elif "template" not in path and prop != "template=yes":
                raise RuntimeError(f"{uuid} ({tag}) is already a jail!")

        if plugin:
            _prop = prop.split(".")
            return IOCJson(path).json_plugin_set_value(_prop)

        if prop == "all":
            return IOCJson(path).json_get_value(prop)
        elif prop == "state":
            status, _ = IOCList.list_get_jid(path.split("/")[3])

            if status:
                return "UP"
            else:
                return "DOWN"

        return IOCJson(path).json_get_value(prop)

    @accepts(
        Dict(
            "options",
            Str("release"),
            Str("server", default="ftp.freebsd.org"),
            Str("user", default="anonymous"),
            Str("password", default="anonymous@"),
            Str("plugin_file"),
            Str("props"),
        ))
    @job(lock=lambda args: f"jail_fetch:{args[-1]}")
    def fetch(self, job, options):
        """Fetches a release or plugin."""
        from iocage.lib.ioc_fetch import IOCFetch
        self.check_dataset_existence()

        release = options["release"]
        server = options["server"]
        user = options["user"]
        password = options["password"]
        plugin_file = options["plugin_file"]
        props = options["props"]

        if plugin_file:
            IOCFetch("", server, user,
                     password).fetch_plugin(plugin_file, props, 0)
            return True

        IOCFetch(release, server, user, password).fetch_release()

        return True

    @accepts(Str("jail"))
    def destroy(self, jail):
        """Takes a jail and destroys it."""
        from iocage.lib.ioc_destroy import IOCDestroy

        tag, uuid, path = self.check_jail_existence(jail)
        conf = IOCJson(path).json_load()
        status, _ = IOCList().list_get_jid(uuid)

        if status:
            from iocage.lib.ioc_stop import IOCStop
            IOCStop(uuid, tag, path, conf, silent=True)

        IOCDestroy().destroy_jail(path)

        return True

    @accepts(Str("jail"))
    def start(self, jail):
        """Takes a jail and starts it."""
        from iocage.lib.ioc_start import IOCStart

        tag, uuid, path = self.check_jail_existence(jail)
        conf = IOCJson(path).json_load()
        status, _ = IOCList().list_get_jid(uuid)

        if not status:
            if conf["type"] in ("jail", "plugin"):
                IOCStart(uuid, tag, path, conf)

                return True
            else:
                raise RuntimeError(f"{jail} must be type jail or plugin to"
                                   " be started")
        else:
            raise RuntimeError(f"{jail} already running.")

    @accepts(Str("jail"))
    def stop(self, jail):
        """Takes a jail and stops it."""
        from iocage.lib.ioc_stop import IOCStop

        tag, uuid, path = self.check_jail_existence(jail)
        conf = IOCJson(path).json_load()
        status, _ = IOCList().list_get_jid(uuid)

        if status:
            if conf["type"] in ("jail", "plugin"):
                IOCStop(uuid, tag, path, conf)

                return True
            else:
                raise RuntimeError(f"{jail} must be type jail or plugin to"
                                   " be stopped")
        else:
            raise RuntimeError(f"{jail} already stopped")

    @accepts(
        Dict(
            "options",
            Str("release"),
            Str("template"),
            Str("pkglist"),
            Str("uuid"),
            Bool("basejail"),
            Bool("empty"),
            Bool("short"),
            List("props"),
        ))
    @job()
    def create(self, job, options):
        """Creates a jail."""
        from iocage.lib.ioc_create import IOCCreate
        self.check_dataset_existence()

        release = options["release"]
        template = options["template"]
        pkglist = options["pkglist"]
        uuid = options["uuid"]
        basejail = options["basejail"]
        empty = options["empty"]
        short = options["short"]
        props = options["props"]
        pool = IOCJson().json_get_value("pool")
        iocroot = IOCJson(pool).json_get_value("iocroot")

        if template:
            release = template

        if not os.path.isdir(f"{iocroot}/releases/{release}") and not \
                template and not empty:
            self.middleware.call('jail.fetch', {"release": release}).wait()

        IOCCreate(release,
                  props,
                  0,
                  pkglist,
                  template=template,
                  short=short,
                  uuid=uuid,
                  basejail=basejail,
                  empty=empty).create_jail()

        return True

    @accepts(Str("jail"),
             Dict(
                 "options",
                 Str("action"),
                 Str("source"),
                 Str("destination"),
                 Str("fstype"),
                 Str("fsoptions"),
                 Str("dump"),
                 Str("_pass"),
             ))
    def fstab(self, jail, options):
        """
        Adds an fstab mount to the jail, mounts if the jail is running.
        """
        from iocage.lib.ioc_fstab import IOCFstab
        self.check_dataset_existence()

        tag, uuid, path = self.check_jail_existence(jail)
        action = options["action"]
        source = options["source"]
        destination = options["destination"]
        fstype = options["fstype"]
        fsoptions = options["fsoptions"]
        dump = options["dump"]
        _pass = options["_pass"]

        IOCFstab(uuid, tag, action, source, destination, fstype, fsoptions,
                 dump, _pass)

        return True

    @accepts(Str("pool"))
    def activate(self, pool):
        """Activates a pool for iocage usage, and deactivates the rest."""
        import libzfs

        zfs = libzfs.ZFS(history=True, history_prefix="<iocage>")
        pools = zfs.pools
        prop = "org.freebsd.ioc:active"

        for _pool in pools:
            if _pool.name == pool:
                ds = zfs.get_dataset(_pool.name)
                ds.properties[prop] = libzfs.ZFSUserProperty("yes")
            else:
                ds = zfs.get_dataset(_pool.name)
                ds.properties[prop] = libzfs.ZFSUserProperty("no")

        return True

    @accepts(Str("ds_type", enum=["ALL", "JAIL", "TEMPLATE", "RELEASE"]))
    def clean(self, ds_type):
        """Cleans all iocage datasets of ds_type"""
        from iocage.lib.ioc_clean import IOCClean

        if ds_type == "JAIL":
            IOCClean().clean_jails()
        elif ds_type == "ALL":
            IOCClean().clean_all()
        elif ds_type == "RELEASE":
            pass
        elif ds_type == "TEMPLATE":
            IOCClean().clean_templates()

        return True

    @accepts(Str("jail"), List("command"),
             Dict("options", Str("host_user", default="root"),
                  Str("jail_user")))
    def exec(self, jail, command, options):
        """Issues a command inside a jail."""
        from iocage.lib.ioc_exec import IOCExec

        tag, uuid, path = self.check_jail_existence(jail)
        host_user = options["host_user"]
        jail_user = options["jail_user"]

        # We may be getting ';', '&&' and so forth. Adding the shell for
        # safety.
        if len(command) == 1:
            command = ["/bin/sh", "-c"] + command

        msg, _ = IOCExec(command, uuid, tag, path, host_user,
                         jail_user).exec_jail()

        return msg.decode("utf-8")

    @accepts(Str("jail"))
    @job(lock=lambda args: f"jail_update:{args[-1]}")
    def update(self, job, jail):
        """Updates specified jail to latest patch level."""
        from iocage.lib.ioc_fetch import IOCFetch

        tag, uuid, path = self.check_jail_existence(jail)
        status, jid = IOCList.list_get_jid(uuid)
        conf = IOCJson(path).json_load()
        started = False

        if conf["type"] == "jail":
            if not status:
                self.start(jail)
                started = True
        else:
            return False

        IOCFetch(conf["cloned_release"]).fetch_update(True, uuid, tag)

        if started:
            self.stop(jail)

        return True

    @accepts(Str("jail"), Str("release"))
    @job(lock=lambda args: f"jail_upgrade:{args[-1]}")
    def upgrade(self, job, jail, release):
        """Upgrades specified jail to specified RELEASE."""
        from iocage.lib.ioc_upgrade import IOCUpgrade

        tag, uuid, path = self.check_jail_existence(jail)
        status, jid = IOCList.list_get_jid(uuid)
        conf = IOCJson(path).json_load()
        root_path = f"{path}/root"
        started = False

        if conf["type"] == "jail":
            if not status:
                self.start(jail)
                started = True
        else:
            return False

        IOCUpgrade(conf, release, root_path).upgrade_jail()

        if started:
            self.stop(jail)

        return True

    @accepts(Str("jail"))
    @job(lock=lambda args: f"jail_export:{args[-1]}")
    def export(self, job, jail):
        """Exports jail to zip file"""
        from iocage.lib.ioc_image import IOCImage
        tag, uuid, path = self.check_jail_existence(jail)
        status, jid = IOCList.list_get_jid(uuid)
        started = False

        if status:
            self.stop(jail)
            started = True

        IOCImage().export_jail(uuid, tag, path)

        if started:
            self.start(jail)

        return True

    @accepts(Str("jail"))
    @job(lock=lambda args: f"jail_import:{args[-1]}")
    def _import(self, job, jail):
        """Imports jail from zip file"""
        from iocage.lib.ioc_image import IOCImage

        IOCImage().import_jail(jail)

        return True
Exemplo n.º 3
0
class ReportingService(ConfigService):
    class Config:
        datastore = 'system.reporting'

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.__rrds = {}
        for name, klass in RRD_PLUGINS.items():
            self.__rrds[name] = klass(self.middleware)

    @accepts(
        Dict('reporting_update',
             Bool('cpu_in_percentage'),
             Str('graphite'),
             Int('graph_age', validators=[Range(min=1)]),
             Int('graph_points', validators=[Range(min=1)]),
             Bool('confirm_rrd_destroy'),
             update=True))
    async def do_update(self, data):
        """
        Configure Reporting Database settings.

        If `cpu_in_percentage` is `true`, collectd reports CPU usage in percentage instead of "jiffies".

        `graphite` specifies a destination hostname or IP for collectd data sent by the Graphite plugin..

        `graph_age` specifies the maximum age of stored graphs in months. `graph_points` is the number of points for
        each hourly, daily, weekly, etc. graph. Changing these requires destroying the current reporting database,
        so when these fields are changed, an additional `confirm_rrd_destroy: true` flag must be present.

        .. examples(websocket)::

          Update reporting settings

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "reporting.update",
                "params": [{
                    "cpu_in_percentage": false,
                    "graphite": "",
                }]
            }

          Recreate reporting database with new settings

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "reporting.update",
                "params": [{
                    "graph_age": 12,
                    "graph_points": 1200,
                    "confirm_rrd_destroy": true,
                }]
            }
        """

        confirm_rrd_destroy = data.pop('confirm_rrd_destroy', False)

        old = await self.config()

        new = copy.deepcopy(old)
        new.update(data)

        verrors = ValidationErrors()

        destroy_database = False
        for k in ['graph_age', 'graph_points']:
            if old[k] != new[k]:
                destroy_database = True

                if not confirm_rrd_destroy:
                    verrors.add(
                        f'reporting_update.{k}',
                        _('Changing this option requires destroying the reporting database. This action must be '
                          'confirmed by setting confirm_rrd_destroy flag'),
                    )

        if verrors:
            raise verrors

        await self.middleware.call('datastore.update', self._config.datastore,
                                   old['id'], new,
                                   {'prefix': self._config.datastore_prefix})

        if destroy_database:
            await self.middleware.call('service.stop', 'collectd')
            await self.middleware.call('service.stop', 'rrdcached')
            await run('sh',
                      '-c',
                      'rm -rfx /var/db/collectd/rrd/*',
                      check=False)
            await self.middleware.call('reporting.setup')
            await self.middleware.call('service.start', 'rrdcached')

        await self.middleware.call('service.restart', 'collectd')

        return await self.config()

    @private
    def setup(self):
        systemdatasetconfig = self.middleware.call_sync('systemdataset.config')
        if not systemdatasetconfig['path']:
            self.middleware.logger.error(f'System dataset is not mounted')
            return False

        rrd_mount = f'{systemdatasetconfig["path"]}/rrd-{systemdatasetconfig["uuid"]}'
        if not os.path.exists(rrd_mount):
            self.middleware.logger.error(
                f'{rrd_mount} does not exist or is not a directory')
            return False

        # Ensure that collectd working path is a symlink to system dataset
        pwd = '/var/db/collectd/rrd'
        if os.path.exists(pwd) and (not os.path.isdir(pwd)
                                    or not os.path.islink(pwd)):
            shutil.move(pwd, f'{pwd}.{time.strftime("%Y%m%d%H%M%S")}')
        if not os.path.exists(pwd):
            os.symlink(rrd_mount, pwd)

        # Migrate legacy RAMDisk
        persist_file = '/data/rrd_dir.tar.bz2'
        if os.path.isfile(persist_file):
            with tarfile.open(persist_file) as tar:
                if 'collectd/rrd' in tar.getnames():
                    tar.extractall(pwd, get_members(tar, 'collectd/rrd/'))

            os.unlink('/data/rrd_dir.tar.bz2')

        hostname = self.middleware.call_sync('system.info')['hostname']
        if not hostname:
            hostname = self.middleware.call_sync(
                'network.configuration.config')['hostname_local']

        # Migrate from old version, where `hostname` was a real directory and `localhost` was a symlink.
        # Skip the case where `hostname` is "localhost", so symlink was not (and is not) needed.
        if (hostname != 'localhost'
                and os.path.isdir(os.path.join(pwd, hostname))
                and not os.path.islink(os.path.join(pwd, hostname))):
            if os.path.exists(os.path.join(pwd, 'localhost')):
                if os.path.islink(os.path.join(pwd, 'localhost')):
                    os.unlink(os.path.join(pwd, 'localhost'))
                else:
                    # This should not happen, but just in case
                    shutil.move(
                        os.path.join(pwd, 'localhost'),
                        os.path.join(
                            pwd,
                            f'localhost.bak.{time.strftime("%Y%m%d%H%M%S")}'))
            shutil.move(os.path.join(pwd, hostname),
                        os.path.join(pwd, 'localhost'))

        # Remove all directories except "localhost" and its backups (that may be erroneously created by
        # running collectd before this script)
        to_remove_dirs = [
            os.path.join(pwd, d) for d in os.listdir(pwd)
            if not d.startswith('localhost')
            and os.path.isdir(os.path.join(pwd, d))
        ]
        for r_dir in to_remove_dirs:
            subprocess.run(['rm', '-rfx', r_dir])

        # Remove all symlinks (that are stale if hostname was changed).
        to_remove_symlinks = [
            os.path.join(pwd, l) for l in os.listdir(pwd)
            if os.path.islink(os.path.join(pwd, l))
        ]
        for r_symlink in to_remove_symlinks:
            os.unlink(r_symlink)

        # Create "localhost" directory if it does not exist
        if not os.path.exists(os.path.join(pwd, 'localhost')):
            os.makedirs(os.path.join(pwd, 'localhost'))

        # Create "${hostname}" -> "localhost" symlink if necessary
        if hostname != 'localhost':
            os.symlink(os.path.join(pwd, 'localhost'),
                       os.path.join(pwd, hostname))

        # Let's return a positive value to indicate that necessary collectd operations were performed successfully
        return True

    @filterable
    def graphs(self, filters, options):
        return filter_list(
            [i.__getstate__() for i in self.__rrds.values() if i.has_data()],
            filters, options)

    def __rquery_to_start_end(self, query):
        unit = query.get('unit')
        if unit:
            verrors = ValidationErrors()
            for i in ('start', 'end'):
                if i in query:
                    verrors.add(
                        f'reporting_query.{i}',
                        f'{i!r} should only be used if "unit" attribute is not provided.',
                    )
            verrors.check()
        else:
            if 'start' not in query:
                unit = 'HOURLY'
            else:
                starttime = query['start']
                endtime = query.get('end') or 'now'

        if unit:
            unit = unit[0].lower()
            page = query['page']
            starttime = f'end-{page + 1}{unit}'
            if not page:
                endtime = 'now'
            else:
                endtime = f'now-{page}{unit}'
        return starttime, endtime

    @accepts(
        List('graphs',
             items=[
                 Dict(
                     'graph',
                     Str('name', required=True),
                     Str('identifier', default=None, null=True),
                 ),
             ],
             empty=False),
        Dict(
            'reporting_query',
            Str('unit', enum=['HOUR', 'DAY', 'WEEK', 'MONTH', 'YEAR']),
            Int('page', default=0),
            Str('start', empty=False),
            Str('end', empty=False),
            Bool('aggregate', default=True),
            register=True,
        ))
    def get_data(self, graphs, query):
        """
        Get reporting data for given graphs.

        List of possible graphs can be retrieved using `reporting.graphs` call.

        For the time period of the graph either `unit` and `page` OR `start` and `end` should be
        used, not both.

        `aggregate` will return aggregate available data for each graph (e.g. min, max, mean).

        .. examples(websocket)::

          Get graph data of "nfsstat" from the last hour.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "reporting.get_data",
                "params": [
                    [{"name": "nfsstat"}],
                    {"unit": "HOURLY"},
                ]
            }

        """
        starttime, endtime = self.__rquery_to_start_end(query)
        rv = []
        for i in graphs:
            try:
                rrd = self.__rrds[i['name']]
            except KeyError:
                raise CallError(f'Graph {i["name"]!r} not found.',
                                errno.ENOENT)
            rv.append(
                rrd.export(i['identifier'],
                           starttime,
                           endtime,
                           aggregate=query['aggregate']))
        return rv

    @private
    @accepts(Ref('reporting_query'))
    def get_all(self, query):
        starttime, endtime = self.__rquery_to_start_end(query)
        rv = []
        for rrd in self.__rrds.values():
            idents = rrd.get_identifiers()
            if idents is None:
                idents = [None]
            for ident in idents:
                rv.append(
                    rrd.export(ident,
                               starttime,
                               endtime,
                               aggregate=query['aggregate']))
        return rv

    @private
    def get_plugin_and_rrd_types(self, name_idents):
        rv = []
        for name, identifier in name_idents:
            rrd = self.__rrds[name]
            rv.append(((name, identifier), rrd.plugin,
                       rrd.get_rrd_types(identifier)))
        return rv
Exemplo n.º 4
0
class SMBService(SystemServiceService):
    class Config:
        service = 'cifs'
        service_verb = 'restart'
        datastore = 'services.cifs'
        datastore_extend = 'smb.smb_extend'
        datastore_prefix = 'cifs_srv_'

    @private
    async def smb_extend(self, smb):
        """Extend smb for netbios."""
        smb['netbiosname_local'] = smb['netbiosname']
        if not await self.middleware.call('system.is_freenas'
                                          ) and await self.middleware.call(
                                              'failover.node') == 'B':
            smb['netbiosname_local'] = smb['netbiosname_b']

        smb['netbiosalias'] = (smb['netbiosalias'] or '').split()

        smb['loglevel'] = LOGLEVEL_MAP.get(smb['loglevel'])

        return smb

    async def __validate_netbios_name(self, name):
        return RE_NETBIOSNAME.match(name)

    async def unixcharset_choices(self):
        return await self.generate_choices([
            'UTF-8', 'ISO-8859-1', 'ISO-8859-15', 'GB2312', 'EUC-JP', 'ASCII'
        ])

    @private
    async def generate_choices(self, initial):
        def key_cp(encoding):
            cp = re.compile(
                r"(?P<name>CP|GB|ISO-8859-|UTF-)(?P<num>\d+)").match(encoding)
            if cp:
                return tuple((cp.group('name'), int(cp.group('num'), 10)))
            else:
                return tuple((encoding, float('inf')))

        charset = await self.common_charset_choices()
        return {
            v: v
            for v in
            [c for c in sorted(charset, key=key_cp) if c not in initial] +
            initial
        }

    @accepts()
    async def bindip_choices(self):
        """
        List of valid choices for IP addresses to which to bind the SMB service.
        Addresses assigned by DHCP are excluded from the results.
        """
        choices = {}
        for i in await self.middleware.call('interface.ip_in_use'):
            choices[i['address']] = i['address']
        return choices

    @accepts()
    async def domain_choices(self):
        """
        List of domains visible to winbindd. Returns empty list if winbindd is
        stopped.
        """
        domains = []
        wb = await run([SMBCmd.WBINFO.value, '-m'], check=False)
        if wb.returncode == 0:
            domains = wb.stdout.decode().splitlines()

        return domains

    @private
    async def common_charset_choices(self):
        def check_codec(encoding):
            try:
                return encoding.upper() if codecs.lookup(encoding) else False
            except LookupError:
                return False

        proc = await Popen(['/usr/bin/iconv', '-l'],
                           stdout=subprocess.PIPE,
                           stderr=subprocess.PIPE)
        output = (await proc.communicate())[0].decode()

        encodings = set()
        for line in output.splitlines():
            enc = [e for e in line.split() if check_codec(e)]

            if enc:
                cp = enc[0]
                for e in enc:
                    if e in ('UTF-8', 'ASCII', 'GB2312', 'HZ-GB-2312',
                             'CP1361'):
                        cp = e
                        break

                encodings.add(cp)

        return encodings

    @private
    async def store_ldap_admin_password(self):
        """
        This is required if the LDAP directory service is enabled. The ldap admin dn and
        password are stored in private/secrets.tdb file.
        """
        ldap = await self.middleware.call('datastore.config',
                                          'directoryservice.ldap')
        if not ldap['ldap_enable']:
            return True

        set_pass = await run(
            ['usr/local/bin/smbpasswd', '-w', ldap['ldap_bindpw']],
            check=False)
        if set_pass.returncode != 0:
            self.logger.debug(
                f"Failed to set set ldap bindpw in secrets.tdb: {set_pass.stdout.decode()}"
            )
            return False

        return True

    @private
    def getparm(self, parm, section):
        """
        Get a parameter from the smb4.conf file. This is more reliable than
        'testparm --parameter-name'. testparm will fail in a variety of
        conditions without returning the parameter's value.
        """
        try:
            if section.upper() == 'GLOBAL':
                try:
                    LP_CTX.load(SMBPath.GLOBALCONF.platform())
                except Exception as e:
                    self.logger.warning("Failed to reload smb.conf: %s", e)

                return LP_CTX.get(parm)
            else:
                return self.middleware.call_sync('sharing.smb.reg_getparm',
                                                 section, parm)

        except Exception as e:
            raise CallError(
                f'Attempt to query smb4.conf parameter [{parm}] failed with error: {e}'
            )

    @private
    async def get_next_rid(self):
        next_rid = (await self.config())['next_rid']
        if next_rid == 0:
            try:
                private_dir = await self.middleware.call(
                    "smb.getparm", "private directory", "GLOBAL")
                next_rid = passdb.PDB(
                    f"tdbsam:{private_dir}/passdb.tdb").new_rid()
            except Exception:
                self.logger.warning(
                    "Failed to initialize RID counter from passdb. "
                    "Using default value for initialization.",
                    exc_info=True)
                next_rid = 5000

        await self.middleware.call('datastore.update', 'services.cifs', 1,
                                   {'next_rid': next_rid + 1},
                                   {'prefix': 'cifs_srv_'})
        return next_rid

    @private
    async def setup_directories(self):
        for p in SMBPath:
            if p == SMBPath.STATEDIR:
                path = await self.middleware.call("smb.getparm",
                                                  "state directory", "global")
            elif p == SMBPath.PRIVATEDIR:
                path = await self.middleware.call("smb.getparm", "privatedir",
                                                  "global")
            else:
                path = p.platform()

            try:
                if not await self.middleware.call('filesystem.acl_is_trivial',
                                                  path):
                    self.logger.warning(
                        "Inappropriate ACL detected on path [%s] stripping ACL",
                        path)
                    stripacl = await run(['setfacl', '-b', path], check=False)
                    if stripacl.returncode != 0:
                        self.logger.warning(
                            "Failed to strip ACL from path %s: %s", path,
                            stripacl.stderr.decode())
            except CallError:
                # Currently only time CallError is raise here is on ENOENT, which may be expected
                pass

            if not os.path.exists(path):
                if p.is_dir():
                    os.mkdir(path, p.mode())
            else:
                os.chmod(path, p.mode())

    @private
    @job(lock="smb_configure")
    async def configure(self, job):
        job.set_progress(0, 'Preparing to configure SMB.')
        data = await self.config()
        job.set_progress(10, 'Generating SMB config.')
        await self.middleware.call('etc.generate', 'smb')

        # Following hack will be removed once we make our own samba package
        if osc.IS_LINUX:
            os.remove("/etc/samba/smb.conf")
            os.symlink("/etc/smb4.conf", "etc/samba/smb.conf")

        job.set_progress(20, 'Setting up SMB directories.')
        await self.setup_directories()
        job.set_progress(30, 'Setting up server SID.')
        await self.middleware.call('smb.set_sid', data['cifs_SID'])

        if await self.middleware.call("smb.getparm", "passdb backend",
                                      "global") == "tdbsam":
            job.set_progress(40, 'Synchronizing passdb and groupmap.')
            pdb_job = await self.middleware.call("smb.synchronize_passdb")
            grp_job = await self.middleware.call(
                "smb.synchronize_group_mappings")
            await pdb_job.wait()
            await grp_job.wait()
            await self.middleware.call("admonitor.start")
            job.set_progress(60, 'generating SMB share configuration.')
            await self.middleware.call("etc.generate", "smb_share")

        job.set_progress(70, 'Checking SMB server status.')
        if await self.middleware.call("service.started", "cifs"):
            job.set_progress(80, 'Restarting SMB service.')
            await self.middleware.call("service.restart", "cifs")
        job.set_progress(100, 'Finished configuring SMB.')

    @private
    async def get_smb_ha_mode(self):
        if await self.middleware.call('cache.has_key', 'SMB_HA_MODE'):
            return await self.middleware.call('cache.get', 'SMB_HA_MODE')

        if not await self.middleware.call('system.is_freenas'
                                          ) and await self.middleware.call(
                                              'failover.licensed'):
            system_dataset = await self.middleware.call('systemdataset.config')
            if system_dataset['pool'] != await self.middleware.call(
                    'boot.pool_name'):
                hamode = SMBHAMODE['UNIFIED'].name
            else:
                hamode = SMBHAMODE['LEGACY'].name
        else:
            hamode = SMBHAMODE['STANDALONE'].name

        await self.middleware.call('cache.put', 'SMB_HA_MODE', hamode)
        return hamode

    @private
    async def reset_smb_ha_mode(self):
        await self.middleware.call('cache.pop', 'SMB_HA_MODE')
        return await self.get_smb_ha_mode()

    @accepts(
        Dict(
            'smb_update',
            Str('netbiosname', max_length=15),
            Str('netbiosname_b', max_length=15),
            List('netbiosalias',
                 default=[],
                 items=[Str('netbios_alias', max_length=15)]),
            Str('workgroup'),
            Str('description'),
            Bool('enable_smb1'),
            Str('unixcharset'),
            Str('loglevel',
                enum=['NONE', 'MINIMUM', 'NORMAL', 'FULL', 'DEBUG']),
            Bool('syslog'),
            Bool('aapl_extensions'),
            Bool('localmaster'),
            Str('guest'),
            Str('admin_group', required=False, default=None, null=True),
            Str('filemask'),
            Str('dirmask'),
            Bool('ntlmv1_auth'),
            List('bindip', items=[IPAddr('ip')], default=[]),
            Str('smb_options', max_length=None),
            update=True,
        ))
    async def do_update(self, data):
        """
        Update SMB Service Configuration.

        `netbiosname` defaults to the original hostname of the system.

        `workgroup` and `netbiosname` should have different values.

        `enable_smb1` allows legacy SMB clients to connect to the server when enabled.

        `localmaster` when set, determines if the system participates in a browser election.

        `domain_logons` is used to provide netlogin service for older Windows clients if enabled.

        `guest` attribute is specified to select the account to be used for guest access. It defaults to "nobody".

        `nullpw` when enabled allows the users to authorize access without a password.

        `hostlookup` when enabled, allows using hostnames rather then IP addresses in "hostsallow"/"hostsdeny" fields
        of SMB Shares.
        """
        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        if data.get('unixcharset') and data[
                'unixcharset'] not in await self.unixcharset_choices():
            verrors.add('smb_update.unixcharset',
                        'Please provide a valid value for unixcharset')

        for i in ('workgroup', 'netbiosname', 'netbiosname_b', 'netbiosalias'):
            if i not in data or not data[i]:
                continue
            if i == 'netbiosalias':
                for idx, item in enumerate(data[i]):
                    if not await self.__validate_netbios_name(item):
                        verrors.add(f'smb_update.{i}.{idx}',
                                    f'Invalid NetBIOS name: {item}')
            else:
                if not await self.__validate_netbios_name(data[i]):
                    verrors.add(f'smb_update.{i}',
                                f'Invalid NetBIOS name: {data[i]}')

        if new['netbiosname'] and new['netbiosname'].lower(
        ) == new['workgroup'].lower():
            verrors.add('smb_update.netbiosname',
                        'NetBIOS and Workgroup must be unique')

        if data.get('bindip'):
            bindip_choices = list((await self.bindip_choices()).keys())
            for idx, item in enumerate(data['bindip']):
                if item not in bindip_choices:
                    verrors.add(
                        f'smb_update.bindip.{idx}',
                        f'IP address [{item}] is not a configured address for this server'
                    )

        for i in ('filemask', 'dirmask'):
            if i not in data or not data[i]:
                continue
            try:
                if int(data[i], 8) & ~0o11777:
                    raise ValueError('Not an octet')
            except (ValueError, TypeError):
                verrors.add(f'smb_update.{i}', 'Not a valid mask')

        if new['admin_group'] and new['admin_group'] != old['admin_group']:
            await self.middleware.call('smb.add_admin_group',
                                       new['admin_group'])

        if verrors:
            raise verrors

        # TODO: consider using bidict
        for k, v in LOGLEVEL_MAP.items():
            if new['loglevel'] == v:
                new['loglevel'] = k
                break

        await self.compress(new)

        await self._update_service(old, new)
        await self.reset_smb_ha_mode()

        return await self.config()

    @private
    async def compress(self, data):
        data['netbiosalias'] = ' '.join(data['netbiosalias'])
        data.pop('netbiosname_local', None)
        data.pop('next_rid')
        return data
Exemplo n.º 5
0
class ServiceService(CRUDService):

    SERVICE_DEFS = {
        's3': ServiceDefinition('minio', '/var/run/minio.pid'),
        'ssh': ServiceDefinition('sshd', '/var/run/sshd.pid'),
        'rsync': ServiceDefinition('rsync', '/var/run/rsyncd.pid'),
        'nfs': ServiceDefinition('nfsd', None),
        'afp': ServiceDefinition('netatalk', None),
        'cifs': ServiceDefinition('smbd', '/var/run/samba4/smbd.pid'),
        'dynamicdns': ServiceDefinition('inadyn', None),
        'snmp': ServiceDefinition('snmpd', '/var/run/net_snmpd.pid'),
        'ftp': ServiceDefinition('proftpd', '/var/run/proftpd.pid'),
        'tftp': ServiceDefinition('inetd', '/var/run/inetd.pid'),
        'iscsitarget': ServiceDefinition('ctld', '/var/run/ctld.pid'),
        'lldp': ServiceDefinition('ladvd', '/var/run/ladvd.pid'),
        'ups': ServiceDefinition('upsd', '/var/db/nut/upsd.pid'),
        'upsmon': ServiceDefinition('upsmon', '/var/db/nut/upsmon.pid'),
        'smartd': ServiceDefinition('smartd', 'smartd-daemon', '/var/run/smartd-daemon.pid'),
        'webshell': ServiceDefinition(None, '/var/run/webshell.pid'),
        'webdav': ServiceDefinition('httpd', '/var/run/httpd.pid'),
        'netdata': ServiceDefinition('netdata', '/var/db/netdata/netdata.pid'),
        'asigra': ServiceDefinition('asigra', '/var/run/dssystem.pid')
    }

    @filterable
    async def query(self, filters=None, options=None):
        if options is None:
            options = {}
        options['prefix'] = 'srv_'

        services = await self.middleware.call('datastore.query', 'services.services', filters, options)

        # In case a single service has been requested
        if not isinstance(services, list):
            services = [services]

        jobs = {
            asyncio.ensure_future(self._get_status(entry)): entry
            for entry in services
        }
        if jobs:
            done, pending = await asyncio.wait(list(jobs.keys()), timeout=15)

        def result(task):
            """
            Method to handle results of the coroutines.
            In case of error or timeout, provide UNKNOWN state.
            """
            result = None
            try:
                if task in done:
                    result = task.result()
            except Exception:
                pass
            if result is None:
                entry = jobs.get(task)
                self.logger.warn('Failed to get status for %s', entry['service'])
                entry['state'] = 'UNKNOWN'
                entry['pids'] = []
                return entry
            else:
                return result

        services = list(map(result, jobs))
        return filter_list(services, filters, options)

    @accepts(
        Str('id_or_name'),
        Dict(
            'service-update',
            Bool('enable', default=False),
        ),
    )
    async def do_update(self, id_or_name, data):
        """
        Update service entry of `id_or_name`.

        Currently it only accepts `enable` option which means whether the
        service should start on boot.

        """
        if not id_or_name.isdigit():
            svc = await self.middleware.call('datastore.query', 'services.services', [('srv_service', '=', id_or_name)])
            if not svc:
                raise CallError(f'Service {id_or_name} not found.', errno.ENOENT)
            id_or_name = svc[0]['id']

        return await self.middleware.call('datastore.update', 'services.services', id_or_name, {'srv_enable': data['enable']})

    @accepts(
        Str('service'),
        Dict(
            'service-control',
            Bool('onetime', default=True),
            Bool('wait', default=None, null=True),
            Bool('sync', default=None, null=True),
            register=True,
        ),
    )
    async def start(self, service, options=None):
        """ Start the service specified by `service`.

        The helper will use method self._start_[service]() to start the service.
        If the method does not exist, it would fallback using service(8)."""
        await self.middleware.call_hook('service.pre_action', service, 'start', options)
        sn = self._started_notify("start", service)
        await self._simplecmd("start", service, options)
        return await self.started(service, sn)

    async def started(self, service, sn=None):
        """
        Test if service specified by `service` has been started.
        """
        if sn:
            await self.middleware.run_in_thread(sn.join)

        try:
            svc = await self.query([('service', '=', service)], {'get': True})
            self.middleware.send_event('service.query', 'CHANGED', fields=svc)
            return svc['state'] == 'RUNNING'
        except IndexError:
            f = getattr(self, '_started_' + service, None)
            if callable(f):
                if inspect.iscoroutinefunction(f):
                    return (await f())[0]
                else:
                    return f()[0]
            else:
                return (await self._started(service))[0]

    @accepts(
        Str('service'),
        Ref('service-control'),
    )
    async def stop(self, service, options=None):
        """ Stop the service specified by `service`.

        The helper will use method self._stop_[service]() to stop the service.
        If the method does not exist, it would fallback using service(8)."""
        await self.middleware.call_hook('service.pre_action', service, 'stop', options)
        sn = self._started_notify("stop", service)
        await self._simplecmd("stop", service, options)
        return await self.started(service, sn)

    @accepts(
        Str('service'),
        Ref('service-control'),
    )
    async def restart(self, service, options=None):
        """
        Restart the service specified by `service`.

        The helper will use method self._restart_[service]() to restart the service.
        If the method does not exist, it would fallback using service(8)."""
        await self.middleware.call_hook('service.pre_action', service, 'restart', options)
        sn = self._started_notify("restart", service)
        await self._simplecmd("restart", service, options)
        return await self.started(service, sn)

    @accepts(
        Str('service'),
        Ref('service-control'),
    )
    async def reload(self, service, options=None):
        """
        Reload the service specified by `service`.

        The helper will use method self._reload_[service]() to reload the service.
        If the method does not exist, the helper will try self.restart of the
        service instead."""
        await self.middleware.call_hook('service.pre_action', service, 'reload', options)
        try:
            await self._simplecmd("reload", service, options)
        except Exception as e:
            await self.restart(service, options)
        return await self.started(service)

    async def _get_status(self, service):
        f = getattr(self, '_started_' + service['service'], None)
        if callable(f):
            if inspect.iscoroutinefunction(f):
                running, pids = await f()
            else:
                running, pids = f()
        else:
            running, pids = await self._started(service['service'])

        if running:
            state = 'RUNNING'
        else:
            state = 'STOPPED'

        service['state'] = state
        service['pids'] = pids
        return service

    async def _simplecmd(self, action, what, options=None):
        self.logger.debug("Calling: %s(%s) ", action, what)
        f = getattr(self, '_' + action + '_' + what, None)
        if f is None:
            # Provide generic start/stop/restart verbs for rc.d scripts
            if what in self.SERVICE_DEFS:
                if self.SERVICE_DEFS[what].rc_script:
                    what = self.SERVICE_DEFS[what].rc_script
            if action in ("start", "stop", "restart", "reload"):
                if action == 'restart':
                    await self._system("/usr/sbin/service " + what + " forcestop ")
                await self._service(what, action, **options)
            else:
                raise ValueError("Internal error: Unknown command")
        else:
            call = f(**(options or {}))
            if inspect.iscoroutinefunction(f):
                await call

    async def _system(self, cmd, options=None):
        stdout = DEVNULL
        if options and 'stdout' in options:
            stdout = options['stdout']
        stderr = DEVNULL
        if options and 'stderr' in options:
            stderr = options['stderr']

        proc = await Popen(cmd, stdout=stdout, stderr=stderr, shell=True, close_fds=True)
        await proc.communicate()
        return proc.returncode

    async def _service(self, service, verb, **options):
        onetime = options.pop('onetime', None)
        force = options.pop('force', None)
        quiet = options.pop('quiet', None)
        extra = options.pop('extra', '')

        # force comes before one which comes before quiet
        # they are mutually exclusive
        preverb = ''
        if force:
            preverb = 'force'
        elif onetime:
            preverb = 'one'
        elif quiet:
            preverb = 'quiet'

        return await self._system('/usr/sbin/service {} {}{} {}'.format(
            service,
            preverb,
            verb,
            extra,
        ), options)

    def _started_notify(self, verb, what):
        """
        The check for started [or not] processes is currently done in 2 steps
        This is the first step which involves a thread StartNotify that watch for event
        before actually start/stop rc.d scripts

        Returns:
            StartNotify object if the service is known or None otherwise
        """

        if what in self.SERVICE_DEFS:
            sn = StartNotify(verb=verb, pidfile=self.SERVICE_DEFS[what].pidfile)
            sn.start()
            return sn
        else:
            return None

    async def _started(self, what, notify=None):
        """
        This is the second step::
        Wait for the StartNotify thread to finish and then check for the
        status of pidfile/procname using pgrep

        Returns:
            True whether the service is alive, False otherwise
        """

        if what in self.SERVICE_DEFS:
            if notify:
                await self.middleware.run_in_thread(notify.join)

            if self.SERVICE_DEFS[what].pidfile:
                pgrep = "/bin/pgrep -F {}{}".format(
                    self.SERVICE_DEFS[what].pidfile,
                    ' ' + self.SERVICE_DEFS[what].procname if self.SERVICE_DEFS[what].procname else '',
                )
            else:
                pgrep = "/bin/pgrep {}".format(self.SERVICE_DEFS[what].procname)
            proc = await Popen(pgrep, shell=True, stdout=PIPE, stderr=PIPE, close_fds=True)
            data = (await proc.communicate())[0].decode()

            if proc.returncode == 0:
                return True, [
                    int(i)
                    for i in data.strip().split('\n') if i.isdigit()
                ]
        return False, []

    async def _start_asigra(self, **kwargs):
        await self.middleware.call('asigra.setup_filesystems')
        await self.middleware.call('asigra.setup_postgresql')
        await self._service("postgresql", "start", force=True, **kwargs)
        await self.middleware.call('asigra.setup_asigra')
        await self.middleware.call('etc.generate', 'asigra')
        await self._service("dssystem", "start", force=True, **kwargs)

    async def _stop_asigra(self, **kwargs):
        await self._service("dssystem", "stop", force=True, **kwargs)
        await self._service("postgresql", "stop", force=True, **kwargs)

    async def _restart_asigra(self, **kwargs):
        await self._stop_asigra(**kwargs)
        await self._start_asigra(**kwargs)

    async def _started_asigra(self, **kwargs):
        if await self._service("dssystem", "status", force=True, **kwargs) != 0:
            return False, []
        return True, []

    async def _start_webdav(self, **kwargs):
        await self.middleware.call('etc.generate', 'webdav')
        await self._service("apache24", "start", **kwargs)

    async def _stop_webdav(self, **kwargs):
        await self._service("apache24", "stop", **kwargs)

    async def _restart_webdav(self, **kwargs):
        await self._service("apache24", "stop", force=True, **kwargs)
        await self.middleware.call('etc.generate', 'webdav')
        await self._service("apache24", "restart", **kwargs)

    async def _reload_webdav(self, **kwargs):
        await self.middleware.call('etc.generate', 'webdav')
        await self._service("apache24", "reload", **kwargs)

    async def _restart_django(self, **kwargs):
        await self._service("django", "restart", **kwargs)

    async def _start_webshell(self, **kwargs):
        await self._system("/usr/local/bin/python /usr/local/www/freenasUI/tools/webshell.py")

    async def _restart_webshell(self, **kwargs):
        try:
            with open('/var/run/webshell.pid', 'r') as f:
                pid = f.read()
                os.kill(int(pid), signal.SIGTERM)
                time.sleep(0.2)
                os.kill(int(pid), signal.SIGKILL)
        except Exception:
            pass
        await self._system("ulimit -n 1024 && /usr/local/bin/python /usr/local/www/freenasUI/tools/webshell.py")

    async def _restart_iscsitarget(self, **kwargs):
        await self._service("ix-ctld", "start", force=True, **kwargs)
        await self._service("ctld", "stop", force=True, **kwargs)
        await self._service("ix-ctld", "start", quiet=True, **kwargs)
        await self._service("ctld", "restart", **kwargs)

    async def _start_iscsitarget(self, **kwargs):
        await self._service("ix-ctld", "start", quiet=True, **kwargs)
        await self._service("ctld", "start", **kwargs)

    async def _stop_iscsitarget(self, **kwargs):
        await self._service("ix-ctld", "stop", force=True, **kwargs)
        await self._service("ctld", "stop", force=True, **kwargs)

    async def _reload_iscsitarget(self, **kwargs):
        await self._service("ix-ctld", "start", quiet=True, **kwargs)
        await self._service("ctld", "reload", **kwargs)

    async def _start_collectd(self, **kwargs):
        await self.middleware.call('etc.generate', 'collectd')
        await self._service("collectd", "restart", **kwargs)

    async def _restart_collectd(self, **kwargs):
        await self._service("collectd", "stop", **kwargs)
        await self._start_collectd(**kwargs)

    async def _start_sysctl(self, **kwargs):
        await self._service("ix-sysctl", "start", quiet=True, **kwargs)

    async def _reload_sysctl(self, **kwargs):
        await self._service("ix-sysctl", "reload", **kwargs)

    async def _start_network(self, **kwargs):
        await self.middleware.call('interface.sync')
        await self.middleware.call('route.sync')

    async def _reload_named(self, **kwargs):
        await self._service("named", "reload", **kwargs)

    async def _reload_hostname(self, **kwargs):
        await self._system('/bin/hostname ""')
        await self._service("ix-hostname", "start", quiet=True, **kwargs)
        await self._service("hostname", "start", quiet=True, **kwargs)
        await self._service("mdnsd", "restart", quiet=True, **kwargs)
        await self._restart_collectd(**kwargs)

    async def _reload_resolvconf(self, **kwargs):
        await self._reload_hostname()
        await self._service("ix-resolv", "start", quiet=True, **kwargs)

    async def _reload_networkgeneral(self, **kwargs):
        await self._reload_resolvconf()
        await self._service("routing", "restart", **kwargs)

    async def _reload_timeservices(self, **kwargs):
        await self._service("ix-localtime", "start", quiet=True, **kwargs)
        await self._service("ix-ntpd", "start", quiet=True, **kwargs)
        await self._service("ntpd", "restart", **kwargs)
        settings = await self.middleware.call(
            'datastore.query',
            'system.settings',
            [],
            {'order_by': ['-id'], 'get': True}
        )
        os.environ['TZ'] = settings['stg_timezone']
        time.tzset()

    async def _start_smartd(self, **kwargs):
        await self.middleware.call("etc.generate", "smartd")
        await self._service("smartd-daemon", "start", **kwargs)

    async def _reload_smartd(self, **kwargs):
        await self.middleware.call("etc.generate", "smartd")
        await self._service("smartd-daemon", "reload", **kwargs)

    async def _restart_smartd(self, **kwargs):
        await self.middleware.call("etc.generate", "smartd")
        await self._service("smartd-daemon", "stop", force=True, **kwargs)
        await self._service("smartd-daemon", "restart", **kwargs)

    async def _reload_ssh(self, **kwargs):
        await self._service("ix-sshd", "start", quiet=True, **kwargs)
        await self._service("ix_register", "reload", **kwargs)
        await self._service("openssh", "reload", **kwargs)
        await self._service("ix_sshd_save_keys", "start", quiet=True, **kwargs)

    async def _start_ssh(self, **kwargs):
        await self._service("ix-sshd", "start", quiet=True, **kwargs)
        await self._service("ix_register", "reload", **kwargs)
        await self._service("openssh", "start", **kwargs)
        await self._service("ix_sshd_save_keys", "start", quiet=True, **kwargs)

    async def _stop_ssh(self, **kwargs):
        await self._service("openssh", "stop", force=True, **kwargs)
        await self._service("ix_register", "reload", **kwargs)

    async def _restart_ssh(self, **kwargs):
        await self._service("ix-sshd", "start", quiet=True, **kwargs)
        await self._service("openssh", "stop", force=True, **kwargs)
        await self._service("ix_register", "reload", **kwargs)
        await self._service("openssh", "restart", **kwargs)
        await self._service("ix_sshd_save_keys", "start", quiet=True, **kwargs)

    async def _start_ssl(self, **kwargs):
        await self.middleware.call('etc.generate', 'ssl')

    async def _start_s3(self, **kwargs):
        await self.middleware.call('etc.generate', 's3')
        await self._service("minio", "start", quiet=True, stdout=None, stderr=None, **kwargs)

    async def _reload_s3(self, **kwargs):
        await self.middleware.call('etc.generate', 's3')
        await self._service("minio", "restart", quiet=True, stdout=None, stderr=None, **kwargs)

    async def _reload_rsync(self, **kwargs):
        await self._service("ix-rsyncd", "start", quiet=True, **kwargs)
        await self._service("rsyncd", "restart", **kwargs)

    async def _restart_rsync(self, **kwargs):
        await self._stop_rsync()
        await self._start_rsync()

    async def _start_rsync(self, **kwargs):
        await self._service("ix-rsyncd", "start", quiet=True, **kwargs)
        await self._service("rsyncd", "start", **kwargs)

    async def _stop_rsync(self, **kwargs):
        await self._service("rsyncd", "stop", force=True, **kwargs)

    async def _started_nis(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/NIS/ctl status"):
            res = True
        return res, []

    async def _start_nis(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/NIS/ctl start"):
            res = True
        return res

    async def _restart_nis(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/NIS/ctl restart"):
            res = True
        return res

    async def _stop_nis(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/NIS/ctl stop"):
            res = True
        return res

    async def _started_ldap(self, **kwargs):
        if (await self._system('/usr/sbin/service ix-ldap status') != 0):
            return False, []
        return await self.middleware.call('notifier.ldap_status'), []

    async def _start_ldap(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/LDAP/ctl start"):
            res = True
        return res

    async def _stop_ldap(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/LDAP/ctl stop"):
            res = True
        return res

    async def _restart_ldap(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/LDAP/ctl restart"):
            res = True
        return res

    async def _start_lldp(self, **kwargs):
        await self._service("ladvd", "start", **kwargs)

    async def _stop_lldp(self, **kwargs):
        await self._service("ladvd", "stop", force=True, **kwargs)

    async def _restart_lldp(self, **kwargs):
        await self._service("ladvd", "stop", force=True, **kwargs)
        await self._service("ladvd", "restart", **kwargs)

    async def _clear_activedirectory_config(self):
        await self._system("/bin/rm -f /etc/directoryservice/ActiveDirectory/config")

    async def _started_activedirectory(self, **kwargs):
        # Perform a wbinfo -t because it's the most accurate single test we have to
        # detect problems with AD join. The default winbind timeout is 60 seconds (as of Samba 4.7).
        # This can be controlled by the smb4.conf parameter "winbind request timeout = "
        if await self._system('/usr/local/bin/wbinfo -t') != 0:
                self.logger.debug('AD monitor: wbinfo -t failed')
                return False, []
        return True, []

    async def _start_activedirectory(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/ActiveDirectory/ctl start"):
            res = True
        return res

    async def _stop_activedirectory(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/ActiveDirectory/ctl stop"):
            res = True
        return res

    async def _restart_activedirectory(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/ActiveDirectory/ctl restart"):
            res = True
        return res

    async def _reload_activedirectory(self, **kwargs):
        # Steps required to force winbind to connect to new DC if DC it's connected to goes down
        # We may need to expand the list of operations below to include fresh kinit. Some
        # information about winbind connection is stored in samba's gencache. In test cases, flushing
        # gencache (net cache flush) was not required to do this.
        await self._service("samba_server", "stop", force=True, **kwargs)
        await self._service("samba_server", "start", quiet=True, **kwargs)

    async def _started_domaincontroller(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/DomainController/ctl status"):
            res = True
        return res, []

    async def _start_domaincontroller(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/DomainController/ctl start"):
            res = True
        return res

    async def _stop_domaincontroller(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/DomainController/ctl stop"):
            res = True
        return res

    async def _restart_domaincontroller(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/DomainController/ctl restart"):
            res = True
        return res

    async def _restart_syslogd(self, **kwargs):
        await self._service("ix-syslogd", "start", quiet=True, **kwargs)
        await self._system("/etc/local/rc.d/syslog-ng restart")

    async def _start_syslogd(self, **kwargs):
        await self._service("ix-syslogd", "start", quiet=True, **kwargs)
        await self._system("/etc/local/rc.d/syslog-ng start")

    async def _stop_syslogd(self, **kwargs):
        await self._system("/etc/local/rc.d/syslog-ng stop")

    async def _reload_syslogd(self, **kwargs):
        await self._service("ix-syslogd", "start", quiet=True, **kwargs)
        await self._system("/etc/local/rc.d/syslog-ng reload")

    async def _start_tftp(self, **kwargs):
        await self._service("ix-inetd", "start", quiet=True, **kwargs)
        await self._service("inetd", "start", **kwargs)

    async def _reload_tftp(self, **kwargs):
        await self._service("ix-inetd", "start", quiet=True, **kwargs)
        await self._service("inetd", "stop", force=True, **kwargs)
        await self._service("inetd", "restart", **kwargs)

    async def _restart_tftp(self, **kwargs):
        await self._service("ix-inetd", "start", quiet=True, **kwargs)
        await self._service("inetd", "stop", force=True, **kwargs)
        await self._service("inetd", "restart", **kwargs)

    async def _restart_cron(self, **kwargs):
        await self._service("ix-crontab", "start", quiet=True, **kwargs)

    async def _start_motd(self, **kwargs):
        await self._service("ix-motd", "start", quiet=True, **kwargs)
        await self._service("motd", "start", quiet=True, **kwargs)

    async def _start_ttys(self, **kwargs):
        await self._service("ix-ttys", "start", quiet=True, **kwargs)

    async def _reload_ftp(self, **kwargs):
        await self._service("ix-proftpd", "start", quiet=True, **kwargs)
        await self._service("proftpd", "restart", **kwargs)

    async def _restart_ftp(self, **kwargs):
        await self._stop_ftp()
        await self._start_ftp()

    async def _start_ftp(self, **kwargs):
        await self._service("ix-proftpd", "start", quiet=True, **kwargs)
        await self._service("proftpd", "start", **kwargs)

    async def _stop_ftp(self, **kwargs):
        await self._service("proftpd", "stop", force=True, **kwargs)

    async def _start_ups(self, **kwargs):
        await self._service("ix-ups", "start", quiet=True, **kwargs)
        await self._service("nut", "start", **kwargs)
        await self._service("nut_upsmon", "start", **kwargs)
        await self._service("nut_upslog", "start", **kwargs)

    async def _stop_ups(self, **kwargs):
        await self._service("nut_upslog", "stop", force=True, **kwargs)
        await self._service("nut_upsmon", "stop", force=True, **kwargs)
        await self._service("nut", "stop", force=True, **kwargs)

    async def _restart_ups(self, **kwargs):
        await self._service("ix-ups", "start", quiet=True, **kwargs)
        await self._service("nut", "stop", force=True, **kwargs)
        await self._service("nut_upsmon", "stop", force=True, **kwargs)
        await self._service("nut_upslog", "stop", force=True, **kwargs)
        await self._service("nut", "restart", **kwargs)
        await self._service("nut_upsmon", "restart", **kwargs)
        await self._service("nut_upslog", "restart", **kwargs)

    async def _started_ups(self, **kwargs):
        mode = (await self.middleware.call('datastore.query', 'services.ups', [], {'order_by': ['-id'], 'get': True}))['ups_mode']
        if mode == "master":
            svc = "ups"
        else:
            svc = "upsmon"
        return await self._started(svc)

    async def _start_afp(self, **kwargs):
        await self._service("ix-afpd", "start", **kwargs)
        await self._service("netatalk", "start", **kwargs)

    async def _stop_afp(self, **kwargs):
        await self._service("netatalk", "stop", force=True, **kwargs)
        # when netatalk stops if afpd or cnid_metad is stuck
        # they'll get left behind, which can cause issues
        # restarting netatalk.
        await self._system("pkill -9 afpd")
        await self._system("pkill -9 cnid_metad")

    async def _restart_afp(self, **kwargs):
        await self._stop_afp()
        await self._start_afp()

    async def _reload_afp(self, **kwargs):
        await self._service("ix-afpd", "start", quiet=True, **kwargs)
        await self._system("killall -1 netatalk")

    async def _reload_nfs(self, **kwargs):
        await self.middleware.call("etc.generate", "nfsd")

    async def _restart_nfs(self, **kwargs):
        await self._stop_nfs(**kwargs)
        await self._start_nfs(**kwargs)

    async def _stop_nfs(self, **kwargs):
        await self._service("lockd", "stop", force=True, **kwargs)
        await self._service("statd", "stop", force=True, **kwargs)
        await self._service("nfsd", "stop", force=True, **kwargs)
        await self._service("mountd", "stop", force=True, **kwargs)
        await self._service("nfsuserd", "stop", force=True, **kwargs)
        await self._service("gssd", "stop", force=True, **kwargs)
        await self._service("rpcbind", "stop", force=True, **kwargs)

    async def _start_nfs(self, **kwargs):
        nfs = await self.middleware.call('datastore.config', 'services.nfs')
        await self.middleware.call("etc.generate", "nfsd")
        await self._service("rpcbind", "start", quiet=True, **kwargs)
        await self._service("gssd", "start", quiet=True, **kwargs)
        # Workaround to work with "onetime", since the rc scripts depend on rc flags.
        if nfs['nfs_srv_v4']:
            sysctl.filter('vfs.nfsd.server_max_nfsvers')[0].value = 4
            if nfs['nfs_srv_v4_v3owner']:
                # Per RFC7530, sending NFSv3 style UID/GIDs across the wire is now allowed
                # You must have both of these sysctl's set to allow the desired functionality
                sysctl.filter('vfs.nfsd.enable_stringtouid')[0].value = 1
                sysctl.filter('vfs.nfs.enable_uidtostring')[0].value = 1
                await self._service("nfsuserd", "stop", force=True, **kwargs)
            else:
                sysctl.filter('vfs.nfsd.enable_stringtouid')[0].value = 0
                sysctl.filter('vfs.nfs.enable_uidtostring')[0].value = 0
                await self._service("nfsuserd", "start", quiet=True, **kwargs)
        else:
            sysctl.filter('vfs.nfsd.server_max_nfsvers')[0].value = 3
            if nfs['nfs_srv_16']:
                await self._service("nfsuserd", "start", quiet=True, **kwargs)
        await self._service("mountd", "start", quiet=True, **kwargs)
        await self._service("nfsd", "start", quiet=True, **kwargs)
        await self._service("statd", "start", quiet=True, **kwargs)
        await self._service("lockd", "start", quiet=True, **kwargs)

    async def _start_dynamicdns(self, **kwargs):
        await self._service("ix-inadyn", "start", quiet=True, **kwargs)
        await self._service("inadyn", "start", **kwargs)

    async def _restart_dynamicdns(self, **kwargs):
        await self._service("ix-inadyn", "start", quiet=True, **kwargs)
        await self._service("inadyn", "stop", force=True, **kwargs)
        await self._service("inadyn", "restart", **kwargs)

    async def _reload_dynamicdns(self, **kwargs):
        await self._service("ix-inadyn", "start", quiet=True, **kwargs)
        await self._service("inadyn", "stop", force=True, **kwargs)
        await self._service("inadyn", "restart", **kwargs)

    async def _restart_system(self, **kwargs):
        asyncio.ensure_future(self._system("/bin/sleep 3 && /sbin/shutdown -r now"))

    async def _stop_system(self, **kwargs):
        asyncio.ensure_future(self._system("/bin/sleep 3 && /sbin/shutdown -p now"))

    async def _reload_cifs(self, **kwargs):
        await self.middleware.call("etc.generate", "smb_share")
        await self._service("samba_server", "reload", force=True, **kwargs)
        await self._service("mdnsd", "restart", **kwargs)
        # After mdns is restarted we need to reload netatalk to have it rereregister
        # with mdns. Ticket #7133
        await self._service("netatalk", "reload", **kwargs)

    async def _restart_cifs(self, **kwargs):
        await self.middleware.call("etc.generate", "smb")
        await self.middleware.call("etc.generate", "smb_share")
        await self._service("samba_server", "stop", force=True, **kwargs)
        await self._service("samba_server", "restart", quiet=True, **kwargs)
        await self._service("mdnsd", "restart", **kwargs)
        # After mdns is restarted we need to reload netatalk to have it rereregister
        # with mdns. Ticket #7133
        await self._service("netatalk", "reload", **kwargs)

    async def _start_cifs(self, **kwargs):
        await self.middleware.call("etc.generate", "smb")
        await self.middleware.call("etc.generate", "smb_share")
        await self._service("samba_server", "start", quiet=True, **kwargs)

    async def _stop_cifs(self, **kwargs):
        await self._service("samba_server", "stop", force=True, **kwargs)

    async def _started_cifs(self, **kwargs):
        if await self._service("samba_server", "status", quiet=True, onetime=True, **kwargs):
            return False, []
        else:
            return True, []

    async def _start_snmp(self, **kwargs):
        await self._service("ix-snmpd", "start", quiet=True, **kwargs)
        await self._service("snmpd", "start", quiet=True, **kwargs)
        await self._service("snmp-agent", "start", quiet=True, **kwargs)

    async def _stop_snmp(self, **kwargs):
        await self._service("snmp-agent", "stop", quiet=True, **kwargs)
        await self._service("snmpd", "stop", quiet=True, **kwargs)

    async def _restart_snmp(self, **kwargs):
        await self._service("snmp-agent", "stop", quiet=True, **kwargs)
        await self._service("snmpd", "stop", force=True, **kwargs)
        await self._service("ix-snmpd", "start", quiet=True, **kwargs)
        await self._service("snmpd", "start", quiet=True, **kwargs)
        await self._service("snmp-agent", "start", quiet=True, **kwargs)

    async def _reload_snmp(self, **kwargs):
        await self._service("snmp-agent", "stop", quiet=True, **kwargs)
        await self._service("snmpd", "stop", force=True, **kwargs)
        await self._service("ix-snmpd", "start", quiet=True, **kwargs)
        await self._service("snmpd", "start", quiet=True, **kwargs)
        await self._service("snmp-agent", "start", quiet=True, **kwargs)

    async def _restart_http(self, **kwargs):
        await self.middleware.call("etc.generate", "nginx")
        await self._service("ix_register", "reload", **kwargs)
        await self._service("nginx", "restart", **kwargs)

    async def _reload_http(self, **kwargs):
        await self.middleware.call("etc.generate", "nginx")
        await self._service("ix_register", "reload", **kwargs)
        await self._service("nginx", "reload", **kwargs)

    async def _reload_loader(self, **kwargs):
        await self._service("ix-loader", "reload", **kwargs)

    async def _start_loader(self, **kwargs):
        await self._service("ix-loader", "start", quiet=True, **kwargs)

    async def _restart_disk(self, **kwargs):
        await self._reload_disk(**kwargs)

    async def _reload_disk(self, **kwargs):
        await self._service("ix-fstab", "start", quiet=True, **kwargs)
        await self._service("mountlate", "start", quiet=True, **kwargs)
        # Restarting rrdcached can take a long time. There is no
        # benefit in waiting for it, since even if it fails it will not
        # tell the user anything useful.
        asyncio.ensure_future(self.restart("collectd", kwargs))

    async def _reload_user(self, **kwargs):
        await self.middleware.call("etc.generate", "user")
        await self._service("ix-aliases", "start", quiet=True, **kwargs)
        await self._service("ix-sudoers", "start", quiet=True, **kwargs)
        await self.reload("cifs", kwargs)

    async def _restart_system_datasets(self, **kwargs):
        systemdataset = await self.middleware.call('systemdataset.setup')
        if not systemdataset:
            return None
        if systemdataset['syslog']:
            await self.restart("syslogd", kwargs)
        await self.restart("cifs", kwargs)

        # Restarting rrdcached can take a long time. There is no
        # benefit in waiting for it, since even if it fails it will not
        # tell the user anything useful.
        asyncio.ensure_future(self.restart("rrdcached", kwargs))
        asyncio.ensure_future(self.start("collectd", kwargs))

    async def _start_netdata(self, **kwargs):
        await self.middleware.call('etc.generate', 'netdata')
        await self._service('netdata', 'start', **kwargs)

    async def _restart_netdata(self, **kwargs):
        await self._service('netdata', 'stop')
        await self._start_netdata(**kwargs)
Exemplo n.º 6
0
class ConsulService(Service):

    INFLUXDB_API = [
        'host', 'username', 'password', 'database', 'series-name', 'enabled'
    ]
    SLACK_API = [
        'cluster-name', 'url', 'channel', 'username', 'icon-url', 'detailed',
        'enabled'
    ]
    MATTERMOST_API = [
        'cluster', 'url', 'username', 'password', 'team', 'channel', 'enabled'
    ]
    PAGERDUTY_API = ['service-key', 'client-name', 'enabled']
    HIPCHAT_API = [
        'from', 'cluster-name', 'base-url', 'room-id', 'auth-token', 'enabled'
    ]
    OPSGENIE_API = ['cluster-name', 'api-key', 'enabled']
    AWSSNS_API = ['reigion', 'topic-arn', 'enabled']
    VICTOROPS_API = ['api-key', 'routing-key', 'enabled']

    @accepts(Str('key'), Any('value'))
    def set_kv(self, key, value):
        """
        Sets `key` with `value` in Consul KV.

        Returns:
                    bool: True if it added successful the value or otherwise False.
        """
        c = consul.Consul()
        return c.kv.put(str(key), str(value))

    @accepts(Str('key'))
    def get_kv(self, key):
        """
        Gets value of `key` in Consul KV.

        Returns:
                    str: Return the value or an empty string.
        """
        c = consul.Consul()
        index = None
        index, data = c.kv.get(key, index=index)
        if data is not None:
            return data['Value'].decode("utf-8")
        else:
            return ""

    @accepts(Str('key'))
    def delete_kv(self, key):
        """
        Delete a `key` in Consul KV.

        Returns:
                    bool: True if it could delete the data or otherwise False.
        """
        c = consul.Consul()
        return c.kv.delete(str(key))

    def _convert_keys(self, data):
        """
        Transforms key values that contains "_" to values with "-"

        Returns:
                    dict: With the values on keys using "-".
        """
        for key in data.keys():
            new_key = key.replace("_", "-")
            if new_key != key:
                data[new_key] = data[key]
                del data[key]

        return data

    def _api_keywords(self, api_list, data):
        """
        Helper to convert the API list into a dict.

        Returns:
                    dict: With the API_LIST.
        """
        new_dict = {k: data.get(k, None) for k in api_list}

        return new_dict

    def _insert_keys(self, prefix, data, api_keywords):
        """
        Helper to insert keys into consul.

        Note: because 'from' is a reserved word in Python, we can't
        use it directly and instead we use hfrom and convert it later.
        """
        new_dict = self._api_keywords(api_keywords, data)

        for k, v in new_dict.items():
            if k == 'hfrom':
                k = 'from'
            self.set_kv(prefix + k, v)

    def _delete_keys(self, prefix, data, api_keywords):
        """
        Helper to delete keys into consul.

        Note: The same applies for 'from' like explained on _insert_keys().
        """
        new_dict = self._api_keywords(api_keywords, data)

        for k in new_dict.keys():
            if k == 'hfrom':
                k = 'from'
            self.delete_kv(prefix + k)

    def do_create(self, data):
        """
        Helper to insert keys into consul based on the service API.
        """
        consul_prefix = 'consul-alerts/config/notifiers/'
        cdata = self._convert_keys(data)

        alert_service = data.pop('consulalert-type')
        consul_prefix = consul_prefix + alert_service.lower() + '/'

        if alert_service == 'InfluxDB':
            self._insert_keys(consul_prefix, cdata, self.INFLUXDB_API)
        elif alert_service == 'Slack':
            self._insert_keys(consul_prefix, cdata, self.SLACK_API)
        elif alert_service == 'Mattermost':
            self._insert_keys(consul_prefix, cdata, self.MATTERMOST_API)
        elif alert_service == 'PagerDuty':
            self._insert_keys(consul_prefix, cdata, self.PAGERDUTY_API)
        elif alert_service == 'HipChat':
            self._insert_keys(consul_prefix, cdata, self.HIPCHAT_API)
        elif alert_service == 'OpsGenie':
            self._insert_keys(consul_prefix, cdata, self.OPSGENIE_API)
        elif alert_service == 'AWS-SNS':
            self._insert_keys(consul_prefix, cdata, self.AWSSNS_API)
        elif alert_service == 'VictorOps':
            self._insert_keys(consul_prefix, cdata, self.VICTOROPS_API)

    def do_delete(self, alert_service, data):
        """
        Helper to delete the keys from consul based on the service API.
        """
        consul_prefix = 'consul-alerts/config/notifiers/' + alert_service.lower(
        ) + '/'
        cdata = self._convert_keys(data)

        if alert_service == 'InfluxDB':
            self._delete_keys(consul_prefix, cdata, self.INFLUXDB_API)
        elif alert_service == 'Slack':
            self._delete_keys(consul_prefix, cdata, self.SLACK_API)
        elif alert_service == 'Mattermost':
            self._delete_keys(consul_prefix, cdata, self.MATTERMOST_API)
        elif alert_service == 'PagerDuty':
            self._delete_keys(consul_prefix, cdata, self.PAGERDUTY_API)
        elif alert_service == 'HipChat':
            self._delete_keys(consul_prefix, cdata, self.HIPCHAT_API)
        elif alert_service == 'OpsGenie':
            self._delete_keys(consul_prefix, cdata, self.OPSGENIE_API)
        elif alert_service == 'AWS-SNS':
            self._delete_keys(consul_prefix, cdata, self.AWSSNS_API)
        elif alert_service == 'VictorOps':
            self._delete_keys(consul_prefix, cdata, self.VICTOROPS_API)
Exemplo n.º 7
0
class ZFSDatasetService(CRUDService):
    class Config:
        namespace = 'zfs.dataset'
        private = True
        process_pool = True

    def locked_datasets(self, names=None):
        query_filters = []
        if names is not None:
            names_optimized = []
            for name in sorted(names, key=len):
                if not any(
                        name.startswith(f'{existing_name}/')
                        for existing_name in names_optimized):
                    names_optimized.append(name)

            query_filters.append(['id', 'in', names_optimized])

        result = self.flatten_datasets(
            self.query(
                query_filters,
                {
                    'extra': {
                        'flat': False,  # So child datasets are also queried
                        'properties':
                        ['encryption', 'keystatus', 'mountpoint']
                    },
                }))

        post_filters = [['encrypted', '=', True]]

        try:
            about_to_lock_dataset = self.middleware.call_sync(
                'cache.get', 'about_to_lock_dataset')
        except KeyError:
            about_to_lock_dataset = None

        post_filters.append([
            'OR', [['key_loaded', '=', False]] +
            ([['id', '=', about_to_lock_dataset],
              ['id', '^', f'{about_to_lock_dataset}/']]
             if about_to_lock_dataset else [])
        ])

        return [{
            'id':
            dataset['id'],
            'mountpoint':
            dataset['properties'].get('mountpoint', {}).get('value'),
        } for dataset in filter_list(result, post_filters)]

    def flatten_datasets(self, datasets):
        return sum(
            [[deepcopy(ds)] + self.flatten_datasets(ds.get('children') or [])
             for ds in datasets], [])

    @filterable
    def query(self, filters, options):
        """
        In `query-options` we can provide `extra` arguments which control which data should be retrieved
        for a dataset.

        `query-options.extra.snapshots` is a boolean which when set will retrieve snapshots for the dataset in question
        by adding a snapshots key to the dataset data.

        `query-options.extra.retrieve_children` is a boolean set to true by default. When set to true, will retrieve
        all children datasets which can cause a performance penalty. When set to false, will not retrieve children
        datasets which does not incur the performance penalty.

        `query-options.extra.properties` is a list of properties which should be retrieved. If null ( by default ),
        it would retrieve all properties, if empty, it will retrieve no property ( `mountpoint` is special in this
        case and is controlled by `query-options.extra.mountpoint` attribute ).

        We provide 2 ways how zfs.dataset.query returns dataset's data. First is a flat structure ( default ), which
        means that all the datasets in the system are returned as separate objects which also contain all the data
        their is for their children. This retrieval type is slightly slower because of duplicates which exist in
        each object.
        Second type is hierarchical where only top level datasets are returned in the list and they contain all the
        children there are for them in `children` key. This retrieval type is slightly faster.
        These options are controlled by `query-options.extra.flat` attribute which defaults to true.

        `query-options.extra.user_properties` controls if user defined properties of datasets should be retrieved
        or not.

        While we provide a way to exclude all properties from data retrieval, we introduce a single attribute
        `query-options.extra.retrieve_properties` which if set to false will make sure that no property is retrieved
        whatsoever and overrides any other property retrieval attribute.
        """
        options = options or {}
        extra = options.get('extra', {}).copy()
        props = extra.get('properties', None)
        flat = extra.get('flat', True)
        user_properties = extra.get('user_properties', True)
        retrieve_properties = extra.get('retrieve_properties', True)
        retrieve_children = extra.get('retrieve_children', True)
        snapshots = extra.get('snapshots')
        if not retrieve_properties:
            # This is a short hand version where consumer can specify that they don't want any property to
            # be retrieved
            user_properties = False
            props = []

        with libzfs.ZFS() as zfs:
            # Handle `id` filter specially to avoiding getting all datasets
            kwargs = dict(
                props=props,
                user_props=user_properties,
                snapshots=snapshots,
                retrieve_children=retrieve_children,
            )
            if filters and filters[0][0] == 'id':
                if filters[0][1] == '=':
                    kwargs['datasets'] = [filters[0][2]]
                if filters[0][1] == 'in':
                    kwargs['datasets'] = filters[0][2]

            datasets = zfs.datasets_serialized(**kwargs)
            if flat:
                datasets = self.flatten_datasets(datasets)
            else:
                datasets = list(datasets)

        return filter_list(datasets, filters, options)

    def query_for_quota_alert(self):
        return [{
            k: v
            for k, v in dataset['properties'].items() if k in [
                "name", "quota", "available", "refquota", "usedbydataset",
                "mounted", "mountpoint", "org.freenas:quota_warning",
                "org.freenas:quota_critical", "org.freenas:refquota_warning",
                "org.freenas:refquota_critical"
            ]
        } for dataset in self.query()]

    def common_load_dataset_checks(self, ds):
        self.common_encryption_checks(ds)
        if ds.key_loaded:
            raise CallError(f'{id} key is already loaded')

    def common_encryption_checks(self, ds):
        if not ds.encrypted:
            raise CallError(f'{id} is not encrypted')

    def path_to_dataset(self, path):
        with libzfs.ZFS() as zfs:
            try:
                zh = zfs.get_dataset_by_path(path)
                ds_name = zh.name
            except libzfs.ZFSException:
                ds_name = None

        return ds_name

    def get_quota(self, ds, quota_type):
        if quota_type == 'dataset':
            dataset = self.query([('id', '=', ds)], {'get': True})
            return [{
                'quota_type':
                'DATASET',
                'id':
                ds,
                'name':
                ds,
                'quota':
                int(dataset['properties']['quota']['rawvalue']),
                'refquota':
                int(dataset['properties']['refquota']['rawvalue']),
                'used_bytes':
                int(dataset['properties']['used']['rawvalue']),
            }]

        quota_list = []
        quota_get = subprocess.run(
            [
                'zfs', f'{quota_type}space', '-H', '-n', '-p', '-o',
                'name,used,quota,objquota,objused', ds
            ],
            capture_output=True,
            check=False,
        )
        if quota_get.returncode != 0:
            raise CallError(
                f'Failed to get {quota_type} quota for {ds}: [{quota_get.stderr.decode()}]'
            )

        for quota in quota_get.stdout.decode().splitlines():
            m = quota.split('\t')
            if len(m) != 5:
                self.logger.debug('Invalid %s quota: %s', quota_type.lower(),
                                  quota)
                continue

            entry = {
                'quota_type': quota_type.upper(),
                'id': int(m[0]),
                'name': None,
                'quota': int(m[2]),
                'used_bytes': int(m[1]),
                'used_percent': 0,
                'obj_quota': int(m[3]) if m[3] != '-' else 0,
                'obj_used': int(m[4]) if m[4] != '-' else 0,
                'obj_used_percent': 0,
            }
            if entry['quota'] > 0:
                entry['used_percent'] = entry['used_bytes'] / entry[
                    'quota'] * 100

            if entry['obj_quota'] > 0:
                entry['obj_used_percent'] = entry['obj_used'] / entry[
                    'obj_quota'] * 100

            try:
                if entry['quota_type'] == 'USER':
                    entry['name'] = (self.middleware.call_sync(
                        'user.get_user_obj', {'uid': entry['id']}))['pw_name']
                else:
                    entry['name'] = (self.middleware.call_sync(
                        'group.get_group_obj',
                        {'gid': entry['id']}))['gr_name']

            except Exception:
                self.logger.debug('Unable to resolve %s id %d to name',
                                  quota_type.lower(), entry['id'])
                pass

            quota_list.append(entry)

        return quota_list

    def set_quota(self, ds, quota_list):
        cmd = ['zfs', 'set']
        cmd.extend(quota_list)
        cmd.append(ds)
        quota_set = subprocess.run(cmd, check=False)
        if quota_set.returncode != 0:
            raise CallError(
                f'Failed to set userspace quota on {ds}: [{quota_set.stderr.decode()}]'
            )

    @accepts(
        Str('id'),
        Dict(
            'load_key_options',
            Bool('mount', default=True),
            Bool('recursive', default=False),
            Any('key', default=None, null=True),
            Str('key_location', default=None, null=True),
        ),
    )
    def load_key(self, id, options):
        mount_ds = options.pop('mount')
        recursive = options.pop('recursive')
        try:
            with libzfs.ZFS() as zfs:
                ds = zfs.get_dataset(id)
                self.common_load_dataset_checks(ds)
                ds.load_key(**options)
        except libzfs.ZFSException as e:
            self.logger.error(f'Failed to load key for {id}', exc_info=True)
            raise CallError(f'Failed to load key for {id}: {e}')
        else:
            if mount_ds:
                self.mount(id, {'recursive': recursive})

    @accepts(Str('name'), List('params', private=True))
    @job()
    def bulk_process(self, job, name, params):
        f = getattr(self, name, None)
        if not f:
            raise CallError(f'{name} method not found in zfs.dataset')

        statuses = []
        for i in params:
            result = error = None
            try:
                result = f(*i)
            except Exception as e:
                error = str(e)
            finally:
                statuses.append({'result': result, 'error': error})

        return statuses

    @accepts(Str('id'),
             Dict(
                 'check_key',
                 Any('key', default=None, null=True),
                 Str('key_location', default=None, null=True),
             ))
    def check_key(self, id, options):
        """
        Returns `true` if the `key` is valid, `false` otherwise.
        """
        try:
            with libzfs.ZFS() as zfs:
                ds = zfs.get_dataset(id)
                self.common_encryption_checks(ds)
                return ds.check_key(**options)
        except libzfs.ZFSException as e:
            self.logger.error(f'Failed to check key for {id}', exc_info=True)
            raise CallError(f'Failed to check key for {id}: {e}')

    @accepts(Str('id'),
             Dict(
                 'unload_key_options',
                 Bool('recursive', default=False),
                 Bool('force_umount', default=False),
                 Bool('umount', default=False),
             ))
    def unload_key(self, id, options):
        force = options.pop('force_umount')
        if options.pop('umount') and self.middleware.call_sync(
                'zfs.dataset.get_instance', id)['mountpoint']:
            self.umount(id, {'force': force})
        try:
            with libzfs.ZFS() as zfs:
                ds = zfs.get_dataset(id)
                self.common_encryption_checks(ds)
                if not ds.key_loaded:
                    raise CallError(f'{id}\'s key is not loaded')
                ds.unload_key(**options)
        except libzfs.ZFSException as e:
            self.logger.error(f'Failed to unload key for {id}', exc_info=True)
            raise CallError(f'Failed to unload key for {id}: {e}')

    @accepts(
        Str('id'),
        Dict(
            'change_key_options',
            Dict('encryption_properties', Str('keyformat'), Str('keylocation'),
                 Int('pbkdf2iters')),
            Bool('load_key', default=True),
            Any('key', default=None, null=True),
        ),
    )
    def change_key(self, id, options):
        try:
            with libzfs.ZFS() as zfs:
                ds = zfs.get_dataset(id)
                self.common_encryption_checks(ds)
                ds.change_key(props=options['encryption_properties'],
                              load_key=options['load_key'],
                              key=options['key'])
        except libzfs.ZFSException as e:
            self.logger.error(f'Failed to change key for {id}', exc_info=True)
            raise CallError(f'Failed to change key for {id}: {e}')

    @accepts(Str('id'),
             Dict(
                 'change_encryption_root_options',
                 Bool('load_key', default=True),
             ))
    def change_encryption_root(self, id, options):
        try:
            with libzfs.ZFS() as zfs:
                ds = zfs.get_dataset(id)
                ds.change_key(load_key=options['load_key'], inherit=True)
        except libzfs.ZFSException as e:
            raise CallError(f'Failed to change encryption root for {id}: {e}')

    @accepts(
        Dict(
            'dataset_create',
            Str('name', required=True),
            Str('type', enum=['FILESYSTEM', 'VOLUME'], default='FILESYSTEM'),
            Dict(
                'properties',
                Bool('sparse'),
                additional_attrs=True,
            ),
        ))
    def do_create(self, data):
        """
        Creates a ZFS dataset.
        """

        verrors = ValidationErrors()

        if '/' not in data['name']:
            verrors.add('name', 'You need a full name, e.g. pool/newdataset')

        if verrors:
            raise verrors

        properties = data.get('properties') or {}
        sparse = properties.pop('sparse', False)
        params = {}

        for k, v in data['properties'].items():
            params[k] = v

        # it's important that we set xattr=sa for various
        # performance reasons related to ea handling
        # pool.dataset.create already sets this by default
        # so mirror the behavior here
        if data['type'] == 'FILESYSTEM' and 'xattr' not in params:
            params['xattr'] = 'sa'

        try:
            with libzfs.ZFS() as zfs:
                pool = zfs.get(data['name'].split('/')[0])
                pool.create(data['name'],
                            params,
                            fstype=getattr(libzfs.DatasetType, data['type']),
                            sparse_vol=sparse)
        except libzfs.ZFSException as e:
            self.logger.error('Failed to create dataset', exc_info=True)
            raise CallError(f'Failed to create dataset: {e}')

    @accepts(
        Str('id'),
        Dict(
            'dataset_update',
            Dict(
                'properties',
                additional_attrs=True,
            ),
        ),
    )
    def do_update(self, id, data):
        try:
            with libzfs.ZFS() as zfs:
                dataset = zfs.get_dataset(id)

                if 'properties' in data:
                    properties = data['properties'].copy()
                    # Set these after reservations
                    for k in ['quota', 'refquota']:
                        if k in properties:
                            properties[k] = properties.pop(k)  # Set them last
                    for k, v in properties.items():

                        # If prop already exists we just update it,
                        # otherwise create a user property
                        prop = dataset.properties.get(k)
                        try:
                            if prop:
                                if v.get('source') == 'INHERIT':
                                    prop.inherit(
                                        recursive=v.get('recursive', False))
                                elif 'value' in v and (prop.value != v['value']
                                                       or prop.source.name
                                                       == 'INHERITED'):
                                    prop.value = v['value']
                                elif 'parsed' in v and (
                                        prop.parsed != v['parsed']
                                        or prop.source.name == 'INHERITED'):
                                    prop.parsed = v['parsed']
                            else:
                                if v.get('source') == 'INHERIT':
                                    pass
                                else:
                                    if 'value' not in v:
                                        raise ValidationError(
                                            'properties',
                                            f'properties.{k} needs a "value" attribute'
                                        )
                                    if ':' not in k:
                                        raise ValidationError(
                                            'properties',
                                            f'User property needs a colon (:) in its name`'
                                        )
                                    prop = libzfs.ZFSUserProperty(v['value'])
                                    dataset.properties[k] = prop
                        except libzfs.ZFSException as e:
                            raise ZFSSetPropertyError(k, str(e))

        except libzfs.ZFSException as e:
            self.logger.error('Failed to update dataset', exc_info=True)
            raise CallError(f'Failed to update dataset: {e}')

    def do_delete(self, id, options=None):
        options = options or {}
        force = options.get('force', False)
        recursive = options.get('recursive', False)

        args = []
        if force:
            args += ['-f']
        if recursive:
            args += ['-r']

        # If dataset is mounted and has receive_resume_token, we should destroy it or ZFS will say
        # "cannot destroy 'pool/dataset': dataset already exists"
        recv_run = subprocess.run(['zfs', 'recv', '-A', id],
                                  stdout=subprocess.DEVNULL,
                                  stderr=subprocess.DEVNULL)
        # Destroying may take a long time, lets not use py-libzfs as it will block
        # other ZFS operations.
        try:
            subprocess.run(
                ['zfs', 'destroy'] + args + [id],
                text=True,
                capture_output=True,
                check=True,
            )
        except subprocess.CalledProcessError as e:
            if recv_run.returncode == 0 and e.stderr.strip().endswith(
                    'dataset does not exist'):
                # This operation might have deleted this dataset if it was created by `zfs recv` operation
                return
            self.logger.error('Failed to delete dataset', exc_info=True)
            error = e.stderr.strip()
            errno_ = errno.EFAULT
            if "Device busy" in error or "dataset is busy" in error:
                errno_ = errno.EBUSY
            raise CallError(f'Failed to delete dataset: {error}', errno_)

    @accepts(Str('name'), Dict('options', Bool('recursive', default=False)))
    def mount(self, name, options):
        try:
            with libzfs.ZFS() as zfs:
                dataset = zfs.get_dataset(name)
                if options['recursive']:
                    dataset.mount_recursive()
                else:
                    dataset.mount()
        except libzfs.ZFSException as e:
            self.logger.error('Failed to mount dataset', exc_info=True)
            raise CallError(f'Failed to mount dataset: {e}')

    @accepts(Str('name'), Dict('options', Bool('force', default=False)))
    def umount(self, name, options):
        try:
            with libzfs.ZFS() as zfs:
                dataset = zfs.get_dataset(name)
                dataset.umount(force=options['force'])
        except libzfs.ZFSException as e:
            self.logger.error('Failed to umount dataset', exc_info=True)
            raise CallError(f'Failed to umount dataset: {e}')

    @accepts(Str('dataset'),
             Dict('options', Str('new_name', required=True, empty=False),
                  Bool('recursive', default=False)))
    def rename(self, name, options):
        try:
            with libzfs.ZFS() as zfs:
                dataset = zfs.get_dataset(name)
                dataset.rename(options['new_name'],
                               recursive=options['recursive'])
        except libzfs.ZFSException as e:
            self.logger.error('Failed to rename dataset', exc_info=True)
            raise CallError(f'Failed to rename dataset: {e}')

    def promote(self, name):
        try:
            with libzfs.ZFS() as zfs:
                dataset = zfs.get_dataset(name)
                dataset.promote()
        except libzfs.ZFSException as e:
            self.logger.error('Failed to promote dataset', exc_info=True)
            raise CallError(f'Failed to promote dataset: {e}')

    def inherit(self, name, prop, recursive=False):
        try:
            with libzfs.ZFS() as zfs:
                dataset = zfs.get_dataset(name)
                zprop = dataset.properties.get(prop)
                if not zprop:
                    raise CallError(f'Property {prop!r} not found.',
                                    errno.ENOENT)
                zprop.inherit(recursive=recursive)
        except libzfs.ZFSException as e:
            raise CallError(str(e))
Exemplo n.º 8
0
class NFSService(SystemServiceService):
    class Config:
        service = "nfs"
        service_verb = "restart"
        datastore_prefix = "nfs_srv_"
        datastore_extend = 'nfs.nfs_extend'
        cli_namespace = "service.nfs"

    @private
    async def nfs_extend(self, nfs):
        keytab_has_nfs = await self.middleware.call(
            "kerberos.keytab.has_nfs_principal")
        nfs["v4_krb_enabled"] = (nfs["v4_krb"] or keytab_has_nfs)
        nfs["userd_manage_gids"] = nfs.pop("16")
        return nfs

    @private
    async def nfs_compress(self, nfs):
        nfs.pop("v4_krb_enabled")
        nfs["16"] = nfs.pop("userd_manage_gids")
        return nfs

    @accepts()
    async def bindip_choices(self):
        """
        Returns ip choices for NFS service to use
        """
        return {
            d['address']: d['address']
            for d in await self.middleware.call('interface.ip_in_use',
                                                {'static': True})
        }

    @private
    async def bindip(self, config):
        bindip = [
            addr for addr in config['bindip'] if addr not in ['0.0.0.0', '::']
        ]
        if osc.IS_LINUX:
            bindip = bindip[:1]

        if bindip:
            found = False
            for iface in await self.middleware.call('interface.query'):
                for alias in iface['state']['aliases']:
                    if alias['address'] in bindip:
                        found = True
                        break
                if found:
                    break
        else:
            found = True

        if found:
            await self.middleware.call('alert.oneshot_delete',
                                       'NFSBindAddress', None)

            return bindip
        else:
            if await self.middleware.call('cache.has_key',
                                          'interfaces_are_set_up'):
                await self.middleware.call('alert.oneshot_create',
                                           'NFSBindAddress', None)

            return []

    @accepts(
        Dict('nfs_update',
             Int('servers', validators=[Range(min=1, max=256)]),
             Bool('udp'),
             Bool('allow_nonroot'),
             Bool('v4'),
             Bool('v4_v3owner'),
             Bool('v4_krb'),
             Str('v4_domain'),
             List('bindip', items=[IPAddr('ip')]),
             Int('mountd_port',
                 null=True,
                 validators=[Range(min=1, max=65535)]),
             Int('rpcstatd_port',
                 null=True,
                 validators=[Range(min=1, max=65535)]),
             Int('rpclockd_port',
                 null=True,
                 validators=[Range(min=1, max=65535)]),
             Bool('userd_manage_gids'),
             Bool('mountd_log'),
             Bool('statd_lockd_log'),
             update=True))
    async def do_update(self, data):
        """
        Update NFS Service Configuration.

        `servers` represents number of servers to create.

        When `allow_nonroot` is set, it allows non-root mount requests to be served.

        `bindip` is a list of IP's on which NFS will listen for requests. When it is unset/empty, NFS listens on
        all available addresses.

        `v4` when set means that we switch from NFSv3 to NFSv4.

        `v4_v3owner` when set means that system will use NFSv3 ownership model for NFSv4.

        `v4_krb` will force NFS shares to fail if the Kerberos ticket is unavailable.

        `v4_domain` overrides the default DNS domain name for NFSv4.

        `mountd_port` specifies the port mountd(8) binds to.

        `rpcstatd_port` specifies the port rpc.statd(8) binds to.

        `rpclockd_port` specifies the port rpclockd_port(8) binds to.

        .. examples(websocket)::

          Update NFS Service Configuration to listen on 192.168.0.10 and use NFSv4

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "pool.resilver.update",
                "params": [{
                    "bindip": [
                        "192.168.0.10"
                    ],
                    "v4": true
                }]
            }
        """
        if data.get("v4") is False:
            data.setdefault("v4_v3owner", False)

        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        keytab_has_nfs = await self.middleware.call(
            "kerberos.keytab.has_nfs_principal")
        new_v4_krb_enabled = new["v4_krb"] or keytab_has_nfs

        if await self.middleware.call("failover.licensed"
                                      ) and new["v4"] and new_v4_krb_enabled:
            gc = await self.middleware.call("datastore.config",
                                            "network.globalconfiguration")
            if not gc["gc_hostname_virtual"] or not gc["gc_domain"]:
                verrors.add(
                    "nfs_update.v4",
                    "Enabling kerberos authentication on TrueNAS HA requires setting the virtual hostname and "
                    "domain")

        if osc.IS_LINUX:
            if len(new['bindip']) > 1:
                verrors.add(
                    'nfs_update.bindip',
                    'Listening on more than one address is not supported')
        bindip_choices = await self.bindip_choices()
        for i, bindip in enumerate(new['bindip']):
            if bindip not in bindip_choices:
                verrors.add(f'nfs_update.bindip.{i}',
                            'Please provide a valid ip address')

        if new["v4"] and new_v4_krb_enabled and await self.middleware.call(
                'activedirectory.get_state') != "DISABLED":
            """
            In environments with kerberized NFSv4 enabled, we need to tell winbindd to not prefix
            usernames with the short form of the AD domain. Directly update the db and regenerate
            the smb.conf to avoid having a service disruption due to restarting the samba server.
            """
            if await self.middleware.call('smb.get_smb_ha_mode') == 'LEGACY':
                raise ValidationError(
                    'nfs_update.v4',
                    'Enabling kerberos authentication on TrueNAS HA requires '
                    'the system dataset to be located on a data pool.')
            ad = await self.middleware.call('activedirectory.config')
            await self.middleware.call('datastore.update',
                                       'directoryservice.activedirectory',
                                       ad['id'],
                                       {'ad_use_default_domain': True})
            await self.middleware.call('etc.generate', 'smb')
            await self.middleware.call('service.reload', 'cifs')

        if not new["v4"] and new["v4_v3owner"]:
            verrors.add("nfs_update.v4_v3owner",
                        "This option requires enabling NFSv4")

        if new["v4_v3owner"] and new["userd_manage_gids"]:
            verrors.add(
                "nfs_update.userd_manage_gids",
                "This option is incompatible with NFSv3 ownership model for NFSv4"
            )

        if not new["v4"] and new["v4_domain"]:
            verrors.add("nfs_update.v4_domain",
                        "This option does not apply to NFSv3")

        if verrors:
            raise verrors

        await self.nfs_compress(new)

        await self._update_service(old, new)

        await self.nfs_extend(new)

        return new
Exemplo n.º 9
0
class CredentialsService(CRUDService):
    class Config:
        namespace = "cloudsync.credentials"

        datastore = "system.cloudcredentials"

    @accepts(
        Dict(
            "cloud_sync_credentials_verify",
            Str("provider", required=True),
            Dict("attributes", additional_attrs=True, required=True),
        ))
    async def verify(self, data):
        data = dict(data, name="")
        await self._validate("cloud_sync_credentials_create", data)

        with RcloneConfig({"credentials": data}) as config:
            proc = await run([
                "rclone", "--config", config.config_path, "lsjson", "remote:"
            ],
                             check=False,
                             encoding="utf8")
            if proc.returncode == 0:
                return {"valid": True}
            else:
                return {"valid": False, "error": proc.stderr}

    @accepts(
        Dict(
            "cloud_sync_credentials_create",
            Str("name", required=True),
            Str("provider", required=True),
            Dict("attributes", additional_attrs=True, required=True),
            register=True,
        ))
    async def do_create(self, data):
        await self._validate("cloud_sync_credentials_create", data)

        data["id"] = await self.middleware.call(
            "datastore.insert",
            "system.cloudcredentials",
            data,
        )
        return data

    @accepts(Int("id"),
             Patch("cloud_sync_credentials_create",
                   "cloud_sync_credentials_update", ("attr", {
                       "update": True
                   })))
    async def do_update(self, id, data):
        old = await self._get_instance(id)

        new = old.copy()
        new.update(data)

        await self._validate("cloud_sync_credentials_update", new, id)

        await self.middleware.call(
            "datastore.update",
            "system.cloudcredentials",
            id,
            new,
        )

        data["id"] = id

        return data

    @accepts(Int("id"))
    async def do_delete(self, id):
        await self.middleware.call(
            "datastore.delete",
            "system.cloudcredentials",
            id,
        )

    async def _validate(self, schema_name, data, id=None):
        verrors = ValidationErrors()

        await self._ensure_unique(verrors, schema_name, "name", data["name"],
                                  id)

        if data["provider"] not in REMOTES:
            verrors.add(f"{schema_name}.provider", "Invalid provider")
        else:
            provider = REMOTES[data["provider"]]

            attributes_verrors = validate_attributes(
                provider.credentials_schema, data)
            verrors.add_child(f"{schema_name}.attributes", attributes_verrors)

        if verrors:
            raise verrors
Exemplo n.º 10
0
class TrueNASService(Service):
    @accepts()
    async def get_chassis_hardware(self):
        """
        Returns what type of hardware this is, detected from dmidecode.

        TRUENAS-X10-HA-D
        TRUENAS-X10-S
        TRUENAS-X20-HA-D
        TRUENAS-X20-S
        TRUENAS-M40-HA
        TRUENAS-M40-S
        TRUENAS-M50-HA
        TRUENAS-M50-S
        TRUENAS-M60-HA
        TRUENAS-M60-S
        TRUENAS-Z20-S
        TRUENAS-Z20-HA-D
        TRUENAS-Z30-HA-D
        TRUENAS-Z30-S
        TRUENAS-Z35-HA-D
        TRUENAS-Z35-S
        TRUENAS-Z50-HA-D
        TRUENAS-Z50-S

        Nothing in dmidecode but a M, X or Z class machine:
        (Note this means production didn't burn the hardware model
        into SMBIOS. We can detect this case by looking at the
        motherboard)
        TRUENAS-M
        TRUENAS-X
        TRUENAS-Z

        Detected by the motherboard model:
        TRUENAS-SBB

        Pretty much anything else with a SM X8 board:
        (X8DTH was popular but there are a few other boards out there)
        TRUENAS-SM

        Really NFI about hardware at this point.  TrueNAS on a Dell?
        TRUENAS-UNKNOWN
        """

        chassis = await run('dmidecode',
                            '-s',
                            'system-product-name',
                            check=False)
        chassis = chassis.stdout.decode(errors='ignore').split('\n',
                                                               1)[0].strip()
        if chassis.startswith(('TRUENAS-M', 'TRUENAS-X', 'TRUENAS-Z')):
            return chassis
        # We don't match a burned in name for a M, X or Z series.  Let's catch
        # the case where we are a M, X or Z. (shame on you production!)
        motherboard = await run('dmidecode',
                                '-s',
                                'baseboard-manufacturer',
                                check=False)
        motherboard = motherboard.stdout.decode(errors='ignore').split(
            '\n', 1)[0].strip()
        motherboard_model = await run('dmidecode',
                                      '-s',
                                      'baseboard-product-name',
                                      check=False)
        motherboard_model = motherboard_model.stdout.decode(
            errors='ignore').split('\n', 1)[0].strip()
        if motherboard_model == 'X11DPi-NT' or motherboard_model == 'X11SPi-TF':
            return 'TRUENAS-M'
        if motherboard_model == 'iXsystems TrueNAS X10':
            return 'TRUENAS-X'
        if motherboard == 'GIGABYTE':
            return 'TRUENAS-Z'

        # Are we an SBB?  We can tell this because all SBBs used
        # the same motherboard: X8DTS
        if motherboard_model == 'X8DTS':
            return 'TRUENAS-SBB'

        # Most likely we are an X8DTH at this point, but there are some
        # unicorns that used various X8 boards, so we're going to make
        # allowances
        if motherboard_model.startswith('X8'):
            return 'TRUENAS-SM'

        # Give up
        return 'TRUENAS-UNKNOWN'

    @accepts()
    def get_eula(self):
        """
        Returns the TrueNAS End-User License Agreement (EULA).
        """
        if not os.path.exists(EULA_FILE):
            return
        with open(EULA_FILE, 'r', encoding='utf8') as f:
            return f.read()

    @accepts()
    async def is_eula_accepted(self):
        """
        Returns whether the EULA is accepted or not.
        """
        return not os.path.exists(EULA_PENDING_PATH)

    @accepts()
    async def accept_eula(self):
        """
        Accept TrueNAS EULA.
        """
        try:
            os.unlink(EULA_PENDING_PATH)
        except OSError as e:
            if e.errno != errno.ENOENT:
                raise

    @private
    async def unaccept_eula(self):
        with open(EULA_PENDING_PATH, "w"):
            pass

    @accepts()
    async def get_customer_information(self):
        """
        Returns stored customer information.
        """
        result = await self.__fetch_customer_information()
        return result

    @accepts(
        Dict(
            'customer_information_update',
            Str('company'),
            Dict('administrative_user', *user_attrs),
            Dict('technical_user', *user_attrs),
            Dict(
                'reseller',
                Str('company'),
                Str('first_name'),
                Str('last_name'),
                Str('title'),
                Str('office_phone'),
                Str('mobile_phone'),
            ),
            Dict(
                'physical_location',
                Str('address'),
                Str('city'),
                Str('state'),
                Str('zip'),
                Str('country'),
                Str('contact_name'),
                Str('contact_phone_number'),
                Str('contact_email'),
            ),
            Str('primary_use_case'),
            Str('other_primary_use_case'),
        ))
    async def update_customer_information(self, data):
        """
        Updates customer information.
        """
        customer_information = await self.__fetch_customer_information()

        await self.middleware.call('datastore.update',
                                   'truenas.customerinformation',
                                   customer_information["id"], {
                                       "data": json.dumps(data),
                                       "updated_at": datetime.utcnow(),
                                   })

        return customer_information

    async def __fetch_customer_information(self):
        result = await self.middleware.call('datastore.config',
                                            'truenas.customerinformation')
        result[
            "immutable_data"] = await self.__fetch_customer_information_immutable_data(
            )
        result["data"] = json.loads(result["data"])
        result["needs_update"] = datetime.utcnow(
        ) - result["updated_at"] > timedelta(days=365)
        return result

    async def __fetch_customer_information_immutable_data(self):
        license = (await self.middleware.call('system.info'))['license']
        if license is None:
            return None

        return {
            "serial_number": license['system_serial'],
            "serial_number_ha": license['system_serial_ha'],
            "support_level": license['contract_type'].title(),
            "support_start_date": license['contract_start'].isoformat(),
            "support_end_date": license['contract_end'].isoformat(),
        }

    @accepts()
    async def is_production(self):
        """
        Returns if system is marked as production.
        """
        return await self.middleware.call('keyvalue.get', 'truenas:production',
                                          False)

    @accepts(Bool('production'), Bool('attach_debug', default=False))
    async def set_production(self, production, attach_debug):
        """
        Sets system production state and optionally sends initial debug.
        """
        was_production = await self.is_production()
        await self.middleware.call('keyvalue.set', 'truenas:production',
                                   production)

        if not was_production and production:
            serial = (await
                      self.middleware.call('system.info'))["system_serial"]
            await self.middleware.call(
                'support.new_ticket', {
                    "title":
                    f"System has been just put into production ({serial})",
                    "body": "This system has been just put into production",
                    "attach_debug": attach_debug,
                    "category": "Installation/Setup",
                    "criticality": "Inquiry",
                    "environment": "Production",
                    "name": "Automatic Alert",
                    "email": "*****@*****.**",
                    "phone": "-",
                })
Exemplo n.º 11
0
class SharingNFSService(SharingService):

    path_field = 'paths'
    share_task_type = 'NFS'

    class Config:
        namespace = "sharing.nfs"
        datastore = "sharing.nfs_share"
        datastore_prefix = "nfs_"
        datastore_extend = "sharing.nfs.extend"
        cli_namespace = "sharing.nfs"

    async def human_identifier(self, share_task):
        return ', '.join(share_task[self.path_field])

    @private
    async def sharing_task_datasets(self, data):
        return [
            os.path.relpath(path, '/mnt') for path in data[self.path_field]
        ]

    @private
    async def sharing_task_determine_locked(self, data, locked_datasets):
        for path in data[self.path_field]:
            if await self.middleware.call(
                    'pool.dataset.path_in_locked_datasets', path,
                    locked_datasets):
                return True
        else:
            return False

    @accepts(
        Dict(
            "sharingnfs_create",
            List("paths", items=[Dir("path")], empty=False),
            List("aliases", items=[Str("path", validators=[Match(r"^/.*")])]),
            Str("comment", default=""),
            List("networks", items=[IPAddr("network", network=True)]),
            List("hosts", items=[Str("host")]),
            Bool("alldirs", default=False),
            Bool("ro", default=False),
            Bool("quiet", default=False),
            Str("maproot_user", required=False, default=None, null=True),
            Str("maproot_group", required=False, default=None, null=True),
            Str("mapall_user", required=False, default=None, null=True),
            Str("mapall_group", required=False, default=None, null=True),
            List(
                "security",
                items=[
                    Str("provider", enum=["SYS", "KRB5", "KRB5I", "KRB5P"])
                ],
            ),
            Bool("enabled", default=True),
            register=True,
            strict=True,
        ))
    async def do_create(self, data):
        """
        Create a NFS Share.

        `paths` is a list of valid paths which are configured to be shared on this share.

        `aliases` is a list of aliases for each path (or an empty list if aliases are not used).

        `networks` is a list of authorized networks that are allowed to access the share having format
        "network/mask" CIDR notation. If empty, all networks are allowed.

        `hosts` is a list of IP's/hostnames which are allowed to access the share. If empty, all IP's/hostnames are
        allowed.

        `alldirs` is a boolean value which when set indicates that the client can mount any subdirectories of the
        selected pool or dataset.
        """
        verrors = ValidationErrors()

        await self.validate(data, "sharingnfs_create", verrors)

        if verrors:
            raise verrors

        await self.compress(data)
        data["id"] = await self.middleware.call(
            "datastore.insert",
            self._config.datastore,
            data,
            {"prefix": self._config.datastore_prefix},
        )
        await self.extend(data)

        await self._service_change("nfs", "reload")

        return await self.get_instance(data["id"])

    @accepts(Int("id"),
             Patch("sharingnfs_create", "sharingnfs_update", ("attr", {
                 "update": True
             })))
    async def do_update(self, id, data):
        """
        Update NFS Share of `id`.
        """
        verrors = ValidationErrors()
        old = await self.get_instance(id)

        new = old.copy()
        new.update(data)

        await self.validate(new, "sharingnfs_update", verrors, old=old)

        if verrors:
            raise verrors

        await self.compress(new)
        await self.middleware.call("datastore.update", self._config.datastore,
                                   id, new,
                                   {"prefix": self._config.datastore_prefix})

        await self._service_change("nfs", "reload")

        return await self.get_instance(id)

    @accepts(Int("id"))
    async def do_delete(self, id):
        """
        Delete NFS Share of `id`.
        """
        await self.middleware.call("datastore.delete", self._config.datastore,
                                   id)
        await self._service_change("nfs", "reload")

    @private
    async def validate(self, data, schema_name, verrors, old=None):
        if len(data["aliases"]):
            if not osc.IS_LINUX:
                verrors.add(
                    f"{schema_name}.aliases",
                    "This field is only supported on SCALE",
                )

            if len(data["aliases"]) != len(data["paths"]):
                verrors.add(
                    f"{schema_name}.aliases",
                    "This field should be either empty of have the same number of elements as paths",
                )

        if data["alldirs"] and len(data["paths"]) > 1:
            verrors.add(
                f"{schema_name}.alldirs",
                "This option can only be used for shares that contain single path"
            )

        # if any of the `paths` that were passed to us by user are within the gluster volume
        # mountpoint then we need to pass the `gluster_bypass` kwarg so that we don't raise a
        # validation error complaining about using a gluster path within the zpool mountpoint
        bypass = any('.glusterfs' in i
                     for i in data["paths"] + data["aliases"])

        # need to make sure that the nfs share is within the zpool mountpoint
        for idx, i in enumerate(data["paths"]):
            await check_path_resides_within_volume(
                verrors,
                self.middleware,
                f'{schema_name}.paths.{idx}',
                i,
                gluster_bypass=bypass)

        await self.middleware.run_in_thread(self.validate_paths, data,
                                            schema_name, verrors)

        filters = []
        if old:
            filters.append(["id", "!=", old["id"]])
        other_shares = await self.middleware.call("sharing.nfs.query", filters)
        dns_cache = await self.resolve_hostnames(
            sum([share["hosts"]
                 for share in other_shares], []) + data["hosts"])
        await self.middleware.run_in_thread(self.validate_hosts_and_networks,
                                            other_shares, data, schema_name,
                                            verrors, dns_cache)

        for k in ["maproot", "mapall"]:
            if not data[f"{k}_user"] and not data[f"{k}_group"]:
                pass
            elif not data[f"{k}_user"] and data[f"{k}_group"]:
                verrors.add(
                    f"{schema_name}.{k}_user",
                    "This field is required when map group is specified")
            else:
                user = group = None
                with contextlib.suppress(KeyError):
                    user = await self.middleware.call(
                        'dscache.get_uncached_user', data[f'{k}_user'])

                if not user:
                    verrors.add(f"{schema_name}.{k}_user", "User not found")

                if data[f'{k}_group']:
                    with contextlib.suppress(KeyError):
                        group = await self.middleware.call(
                            'dscache.get_uncached_group', data[f'{k}_group'])

                    if not group:
                        verrors.add(f"{schema_name}.{k}_group",
                                    "Group not found")

        if data["maproot_user"] and data["mapall_user"]:
            verrors.add(f"{schema_name}.mapall_user",
                        "maproot_user disqualifies mapall_user")

        if data["security"]:
            nfs_config = await self.middleware.call("nfs.config")
            if not nfs_config["v4"]:
                verrors.add(f"{schema_name}.security",
                            "This is not allowed when NFS v4 is disabled")

    @private
    def validate_paths(self, data, schema_name, verrors):
        if osc.IS_LINUX:
            # Ganesha does not have such a restriction, each path is a different share
            return

        dev = None
        for i, path in enumerate(data["paths"]):
            stat = os.stat(path)
            if dev is None:
                dev = stat.st_dev
            else:
                if dev != stat.st_dev:
                    verrors.add(
                        f'{schema_name}.paths.{i}',
                        'Paths for a NFS share must reside within the same filesystem'
                    )

    @private
    async def resolve_hostnames(self, hostnames):
        hostnames = list(set(hostnames))

        async def resolve(hostname):
            try:
                return (await asyncio.wait_for(
                    self.middleware.run_in_thread(socket.getaddrinfo, hostname,
                                                  None), 5))[0][4][0]
            except Exception as e:
                self.logger.warning("Unable to resolve host %r: %r", hostname,
                                    e)
                return None

        resolved_hostnames = await asyncio_map(resolve, hostnames, 8)

        return dict(zip(hostnames, resolved_hostnames))

    @private
    def validate_hosts_and_networks(self, other_shares, data, schema_name,
                                    verrors, dns_cache):
        dev = os.stat(data["paths"][0]).st_dev

        used_networks = set()
        for share in other_shares:
            try:
                share_dev = os.stat(share["paths"][0]).st_dev
            except Exception:
                self.logger.warning("Failed to stat first path for %r",
                                    share,
                                    exc_info=True)
                continue

            if share_dev == dev:
                for host in share["hosts"]:
                    host = dns_cache[host]
                    if host is None:
                        continue

                    try:
                        network = ipaddress.ip_network(host)
                    except Exception:
                        self.logger.warning("Got invalid host %r", host)
                        continue
                    else:
                        used_networks.add(network)

                for network in share["networks"]:
                    try:
                        network = ipaddress.ip_network(network, strict=False)
                    except Exception:
                        self.logger.warning("Got invalid network %r", network)
                        continue
                    else:
                        used_networks.add(network)

                if not share["hosts"] and not share["networks"]:
                    used_networks.add(ipaddress.ip_network("0.0.0.0/0"))
                    used_networks.add(ipaddress.ip_network("::/0"))

        for host in set(data["hosts"]):
            host = dns_cache[host]
            if host is None:
                continue

            network = ipaddress.ip_network(host)
            if network in used_networks:
                verrors.add(
                    f"{schema_name}.hosts",
                    f"Another NFS share already exports this dataset for {host}"
                )

            used_networks.add(network)

        for network in set(data["networks"]):
            network = ipaddress.ip_network(network, strict=False)

            if network in used_networks:
                verrors.add(
                    f"{schema_name}.networks",
                    f"Another NFS share already exports this dataset for {network}"
                )

            used_networks.add(network)

        if not data["hosts"] and not data["networks"]:
            if used_networks:
                verrors.add(
                    f"{schema_name}.networks",
                    "Another NFS share already exports this dataset for some network"
                )

    @private
    async def extend(self, data):
        data["networks"] = data.pop("network").split()
        data["hosts"] = data["hosts"].split()
        data["security"] = [s.upper() for s in data["security"]]
        return data

    @private
    async def compress(self, data):
        data["network"] = " ".join(data.pop("networks"))
        data["hosts"] = " ".join(data["hosts"])
        data["security"] = [s.lower() for s in data["security"]]
        data.pop(self.locked_field, None)
        return data
Exemplo n.º 12
0
from datetime import datetime, timedelta
import errno
import json
import os

from middlewared.schema import accepts, Bool, Dict, Str
from middlewared.service import Service, private
import middlewared.sqlalchemy as sa
from middlewared.utils import run

EULA_FILE = '/usr/local/share/truenas/eula.html'
EULA_PENDING_PATH = "/data/truenas-eula-pending"

user_attrs = [
    Str('first_name'),
    Str('last_name'),
    Str('title'),
    Str('office_phone'),
    Str('mobile_phone'),
    Str('primary_email'),
    Str('secondary_email'),
    Str('address'),
    Str('city'),
    Str('state'),
    Str('zip'),
    Str('country'),
]


class TruenasCustomerInformationModel(sa.Model):
Exemplo n.º 13
0
class LDAPService(ConfigService):
    class Config:
        service = "ldap"
        datastore = 'directoryservice.ldap'
        datastore_extend = "ldap.ldap_extend"
        datastore_prefix = "ldap_"

    @private
    async def ldap_extend(self, data):
        data['hostname'] = data['hostname'].split()
        for key in ["ssl", "idmap_backend", "schema"]:
            data[key] = data[key].upper()

        for key in ["certificate", "kerberos_realm"]:
            if data[key] is not None:
                data[key] = data[key]["id"]

        return data

    @private
    async def ldap_compress(self, data):
        data['hostname'] = ','.join(data['hostname'])
        for key in ["ssl", "idmap_backend", "schema"]:
            data[key] = data[key].lower()

        if not data['bindpw']:
            data.pop('bindpw')

        return data

    @private
    async def ldap_validate(self, ldap):
        port = 636 if SSL(ldap['ssl']) == SSL.USESSL else 389
        for h in ldap['hostname']:
            await self.middleware.call('ldap.port_is_listening', h, port,
                                       ldap['dns_timeout'])
        await self.middleware.call('ldap.validate_credentials')

    @accepts(
        Dict('ldap_update',
             List('hostname', required=True),
             Str('basedn', required=True),
             Str('binddn'),
             Str('bindpw', private=True),
             Bool('anonbind', default=False),
             Str('usersuffix'),
             Str('groupsuffix'),
             Str('passwordsuffix'),
             Str('machinesuffix'),
             Str('sudosuffix'),
             Str('ssl', default='OFF', enum=['OFF', 'ON', 'START_TLS']),
             Int('certificate', null=True),
             Bool('disable_freenas_cache'),
             Int('timeout', default=30),
             Int('dns_timeout', default=5),
             Str('idmap_backend', default='LDAP', enum=['SCRIPT', 'LDAP']),
             Int('kerberos_realm', null=True),
             Str('kerberos_principal'),
             Bool('has_samba_schema', default=False),
             Str('auxiliary_parameters', default=False, max_length=None),
             Str('schema', default='RFC2307', enum=['RFC2307', 'RFC2307BIS']),
             Bool('enable'),
             update=True))
    async def do_update(self, data):
        """
        Update LDAP Service Configuration.

        """
        must_reload = False
        old = await self.config()
        new = old.copy()
        new.update(data)
        if old != new:
            must_reload = True
            if new['enable']:
                try:
                    await self.middleware.call('ldap.ldap_validate', new)
                except Exception as e:
                    raise ValidationError('ldap_update', str(e))

        await self.ldap_compress(new)
        await self.middleware.call('datastore.update', 'directoryservice.ldap',
                                   old['id'], new, {'prefix': 'ldap_'})

        if must_reload:
            if new['enable']:
                await self.middleware.call('ldap.start')
            else:
                await self.middleware.call('ldap.stop')

        return await self.config()

    @private
    def port_is_listening(self, host, port, timeout=1):
        ret = False

        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        if timeout:
            s.settimeout(timeout)

        try:
            s.connect((host, port))
            ret = True

        except Exception as e:
            raise CallError(e)

        finally:
            s.close()

        return ret

    @private
    def validate_credentials(self, ldap=None):
        ret = False
        if ldap is None:
            ldap = self.middleware.call_sync('ldap.config')

        with LDAPQuery(conf=ldap, logger=self.logger,
                       hosts=ldap['hostname']) as LDAP:
            ret = LDAP.validate_credentials()

        return ret

    @private
    def get_samba_domains(self, ldap=None):
        ret = []
        if ldap is None:
            ldap = self.middleware.call_sync('ldap.config')

        with LDAPQuery(conf=ldap, logger=self.logger,
                       hosts=ldap['hostname']) as LDAP:
            ret = LDAP.get_samba_domains()

        return ret

    @private
    def get_root_DSE(self, ldap=None):
        """
        root DSE is defined in RFC4512, and must include the following:

        `namingContexts` naming contexts held in the LDAP sever

        `subschemaSubentry` subschema entries known by the LDAP server

        `altServer` alternative servers in case this one is unavailable

        `supportedExtension` list of supported extended operations

        `supportedControl` list of supported controls

        `supportedSASLMechnaisms` recognized Simple Authentication and Security layers
        (SASL) [RFC4422] mechanisms.

        `supportedLDAPVersion` LDAP versions implemented by the LDAP server

        In practice, this full data is not returned from many LDAP servers
        """
        ret = []
        if ldap is None:
            ldap = self.middleware.call_sync('ldap.config')

        with LDAPQuery(conf=ldap, logger=self.logger,
                       hosts=ldap['hostname']) as LDAP:
            ret = LDAP.get_root_DSE()

        return ret

    @private
    def get_dn(self, dn=None, ldap=None):
        """
        Outputs contents of specified DN in JSON. By default will target the basedn.
        """
        ret = []
        if ldap is None:
            ldap = self.middleware.call_sync('ldap.config')

        if dn is None:
            dn = ldap['basedn']
        with LDAPQuery(conf=ldap, logger=self.logger,
                       hosts=ldap['hostname']) as LDAP:
            ret = LDAP.get_dn(dn)

        return ret

    @private
    async def started(self):
        ldap = await self.config()
        if not ldap['enable']:
            return False

        try:
            ret = await asyncio.wait_for(self.middleware.call(
                'ldap.get_root_DSE', ldap),
                                         timeout=ldap['timeout'])
        except asyncio.TimeoutError:
            raise CallError(
                f'LDAP status check timed out after {ldap["timeout"]} seconds.',
                errno.ETIMEDOUT)

        if ret:
            await self.__set_state(DSStatus['HEALTHY'])
        else:
            await self.__set_state(DSStatus['FAULTED'])

        return True if ret else False

    @private
    async def get_workgroup(self, ldap=None):
        ret = None
        smb = await self.middleware.call('smb.config')
        if ldap is None:
            ldap = await self.config()

        try:
            ret = await asyncio.wait_for(self.middleware.call(
                'ldap.get_samba_domains', ldap),
                                         timeout=ldap['timeout'])
        except asyncio.TimeoutError:
            raise CallError(
                f'ldap.get_workgroup timed out after {ldap["timeout"]} seconds.',
                errno.ETIMEDOUT)

        if len(ret) > 1:
            raise CallError(
                f'Multiple Samba Domains detected in LDAP environment: {ret}',
                errno.EINVAL)

        ret = ret[0]['data']['sambaDomainName'][0] if ret else []

        if ret and smb['workgroup'] != ret:
            self.logger.debug(
                f'Updating SMB workgroup to match the LDAP domain name [{ret}]'
            )
            await self.middleware.call('datastore.update', 'services.cifs',
                                       smb['id'], {'cifs_srv_workgroup': ret})

        return ret

    @private
    async def __set_state(self, state):
        await self.middleware.call('cache.put', 'LDAP_State', state.name)

    @accepts()
    async def get_state(self):
        """
        Check the state of the LDAP Directory Service.
        See DSStatus for definitions of return values.
        :DISABLED: Service is not enabled.
        If for some reason, the cache entry indicating Directory Service state
        does not exist, re-run a status check to generate a key, then return it.
        """
        ldap = await self.config()
        if not ldap['enable']:
            return 'DISABLED'
        else:
            try:
                return (await self.middleware.call('cache.get', 'LDAP_State'))
            except KeyError:
                await self.started()
                return (await self.middleware.call('cache.get', 'LDAP_State'))

    @private
    async def nslcd_cmd(self, cmd):
        nslcd = await run(['service', 'nslcd', cmd], check=False)
        if nslcd.returncode != 0:
            raise CallError(
                f'nslcd failed to {cmd} with errror: {nslcd.stderr.decode()}',
                errno.EFAULT)

    @private
    async def nslcd_status(self):
        nslcd = await run(['service', 'nslcd', 'onestatus'], check=False)
        return True if nslcd.returncode == 0 else False

    @private
    async def start(self):
        """
        Refuse to start service if the service is alreading in process of starting or stopping.
        If state is 'HEALTHY' or 'FAULTED', then stop the service first before restarting it to ensure
        that the service begins in a clean state.
        """
        ldap = await self.config()

        ldap_state = await self.middleware.call('ldap.get_state')
        if ldap_state in ['LEAVING', 'JOINING']:
            raise CallError(
                f'LDAP state is [{ldap_state}]. Please wait until directory service operation completes.',
                errno.EBUSY)

        await self.middleware.call('datastore.update', self._config.datastore,
                                   ldap['id'], {'ldap_enable': True})
        if ldap['kerberos_realm']:
            await self.middleware.call('kerberos.start')

        await self.middleware.call('etc.generate', 'rc')
        await self.middleware.call('etc.generate', 'nss')
        await self.middleware.call('etc.generate', 'ldap')
        await self.middleware.call('etc.generate', 'pam')
        has_samba_schema = True if (
            await self.middleware.call('ldap.get_workgroup')) else False

        if not await self.nslcd_status():
            await self.nslcd_cmd('onestart')
        else:
            await self.nslcd_cmd('onerestart')

        if has_samba_schema:
            await self.middleware.call('etc.generate', 'smb')
            await self.middleware.call('smb.store_ldap_admin_password')
            await self.middleware.call('service.restart', 'cifs')

        await self.middleware.call('ldap.fill_cache')

    @private
    async def stop(self):
        ldap = await self.config()
        await self.middleware.call('datastore.update', self._config.datastore,
                                   ldap['id'], {'ldap_enable': False})
        await self.__set_state(DSStatus['LEAVING'])
        await self.middleware.call('etc.generate', 'rc')
        await self.middleware.call('etc.generate', 'nss')
        await self.middleware.call('etc.generate', 'ldap')
        await self.middleware.call('etc.generate', 'pam')
        if ldap['has_samba_schema']:
            await self.middleware.call('etc.generate', 'smb')
            await self.middleware.call('service.restart', 'cifs')
        await self.middleware.call('cache.pop', 'LDAP_State')
        await self.middleware.call('cache.pop', 'LDAP_cache')
        await self.nslcd_cmd('onestop')

    @private
    @job(lock='fill_ldap_cache')
    def fill_cache(self, job, force=False):
        user_next_index = group_next_index = 100000000
        cache_data = {'users': [], 'groups': []}

        if self.middleware.call_sync('cache.has_key',
                                     'LDAP_cache') and not force:
            raise CallError(
                'LDAP cache already exists. Refusing to generate cache.')

        self.middleware.call_sync('cache.pop', 'LDAP_cache')

        if (self.middleware.call_sync('ldap.config'))['disable_freenas_cache']:
            self.middleware.call_sync('cache.put', 'LDAP_cache', cache_data)
            self.logger.debug('LDAP cache is disabled. Bypassing cache fill.')
            return

        pwd_list = pwd.getpwall()
        grp_list = grp.getgrall()

        local_uid_list = list(u['uid']
                              for u in self.middleware.call_sync('user.query'))
        local_gid_list = list(
            g['gid'] for g in self.middleware.call_sync('group.query'))

        for u in pwd_list:
            is_local_user = True if u.pw_uid in local_uid_list else False
            if is_local_user:
                continue

            cache_data['users'].append({
                'id': user_next_index,
                'uid': u.pw_uid,
                'username': u.pw_name,
                'unixhash': None,
                'smbhash': None,
                'group': {},
                'home': '',
                'shell': '',
                'full_name': u.pw_gecos,
                'builtin': False,
                'email': '',
                'password_disabled': False,
                'locked': False,
                'sudo': False,
                'microsoft_account': False,
                'attributes': {},
                'groups': [],
                'sshpubkey': None,
                'local': False
            })
            user_next_index += 1

        for g in grp_list:
            is_local_user = True if g.gr_gid in local_gid_list else False
            if is_local_user:
                continue

            cache_data['groups'].append({
                'id': group_next_index,
                'gid': g.gr_gid,
                'group': g.gr_name,
                'builtin': False,
                'sudo': False,
                'users': [],
                'local': False
            })
            group_next_index += 1

        self.middleware.call_sync('cache.put', 'LDAP_cache', cache_data)

    @private
    async def get_cache(self):
        if not await self.middleware.call('cache.has_key', 'LDAP_cache'):
            await self.middleware.call('ldap.fill_cache')
            self.logger.debug('cache fill is in progress.')
            return {'users': [], 'groups': []}
        return await self.middleware.call('cache.get', 'LDAP_cache')
Exemplo n.º 14
0
class ServiceService(CRUDService):

    SERVICE_DEFS = {
        's3': ('minio', '/var/run/minio.pid'),
        'ssh': ('sshd', '/var/run/sshd.pid'),
        'rsync': ('rsync', '/var/run/rsyncd.pid'),
        'nfs': ('nfsd', None),
        'afp': ('netatalk', None),
        'cifs': ('smbd', '/var/run/samba4/smbd.pid'),
        'dynamicdns': ('inadyn-mt', None),
        'snmp': ('snmpd', '/var/run/net_snmpd.pid'),
        'ftp': ('proftpd', '/var/run/proftpd.pid'),
        'tftp': ('inetd', '/var/run/inetd.pid'),
        'iscsitarget': ('ctld', '/var/run/ctld.pid'),
        'lldp': ('ladvd', '/var/run/ladvd.pid'),
        'ups': ('upsd', '/var/db/nut/upsd.pid'),
        'upsmon': ('upsmon', '/var/db/nut/upsmon.pid'),
        'smartd': ('smartd', '/var/run/smartd.pid'),
        'webshell': (None, '/var/run/webshell.pid'),
        'webdav': ('httpd', '/var/run/httpd.pid'),
        'netdata': ('netdata', '/var/db/netdata/netdata.pid')
    }

    @filterable
    async def query(self, filters=None, options=None):
        if options is None:
            options = {}
        options['prefix'] = 'srv_'

        services = await self.middleware.call('datastore.query', 'services.services', filters, options)

        # In case a single service has been requested
        if not isinstance(services, list):
            services = [services]

        jobs = {
            asyncio.ensure_future(self._get_status(entry)): entry
            for entry in services
        }
        await asyncio.wait(list(jobs.keys()), timeout=15)

        def result(task):
            """
            Method to handle results of the greenlets.
            In case a greenlet has timed out, provide UNKNOWN state
            """
            try:
                result = task.result()
            except Exception:
                result = None
                self.logger.warn('Failed to get status', exc_info=True)
            if result is None:
                entry = jobs.get(task)
                entry['state'] = 'UNKNOWN'
                entry['pids'] = []
                return entry
            else:
                return result

        services = list(map(result, jobs))
        return filter_list(services, filters, options)

    @accepts(
        Int('id'),
        Dict(
            'service-update',
            Bool('enable'),
        ),
    )
    async def do_update(self, id, data):
        """
        Update service entry of `id`.

        Currently it only accepts `enable` option which means whether the
        service should start on boot.

        """
        return await self.middleware.call('datastore.update', 'services.services', id, {'srv_enable': data['enable']})

    @accepts(
        Str('service'),
        Dict(
            'service-control',
            Bool('onetime'),
        ),
    )
    async def start(self, service, options=None):
        """ Start the service specified by `service`.

        The helper will use method self._start_[service]() to start the service.
        If the method does not exist, it would fallback using service(8)."""
        if options is None:
            options = {
                'onetime': True,
            }
        await self.middleware.call_hook('service.pre_start', service)
        sn = self._started_notify("start", service)
        await self._simplecmd("start", service, options)
        return await self.started(service, sn)

    async def started(self, service, sn=None):
        """
        Test if service specified by `service` has been started.
        """
        if sn:
            await self.middleware.threaded(sn.join)

        try:
            svc = await self.query([('service', '=', service)], {'get': True})
            self.middleware.send_event('service.query', 'CHANGED', fields=svc)
            return svc['state'] == 'RUNNING'
        except IndexError:
            f = getattr(self, '_started_' + service, None)
            if callable(f):
                if inspect.iscoroutinefunction(f):
                    return (await f())[0]
                else:
                    return f()[0]
            else:
                return (await self._started(service))[0]

    @accepts(
        Str('service'),
        Dict(
            'service-control',
            Bool('onetime'),
        ),
    )
    async def stop(self, service, options=None):
        """ Stop the service specified by `service`.

        The helper will use method self._stop_[service]() to stop the service.
        If the method does not exist, it would fallback using service(8)."""
        if options is None:
            options = {
                'onetime': True,
            }
        await self.middleware.call_hook('service.pre_stop', service)
        sn = self._started_notify("stop", service)
        await self._simplecmd("stop", service, options)
        return await self.started(service, sn)

    @accepts(
        Str('service'),
        Dict(
            'service-control',
            Bool('onetime'),
        ),
    )
    async def restart(self, service, options=None):
        """
        Restart the service specified by `service`.

        The helper will use method self._restart_[service]() to restart the service.
        If the method does not exist, it would fallback using service(8)."""
        if options is None:
            options = {
                'onetime': True,
            }
        await self.middleware.call_hook('service.pre_restart', service)
        sn = self._started_notify("restart", service)
        await self._simplecmd("restart", service, options)
        return await self.started(service, sn)

    @accepts(
        Str('service'),
        Dict(
            'service-control',
            Bool('onetime'),
        ),
    )
    async def reload(self, service, options=None):
        """
        Reload the service specified by `service`.

        The helper will use method self._reload_[service]() to reload the service.
        If the method does not exist, the helper will try self.restart of the
        service instead."""
        if options is None:
            options = {
                'onetime': True,
            }
        await self.middleware.call_hook('service.pre_reload', service)
        try:
            await self._simplecmd("reload", service, options)
        except:
            await self.restart(service, options)
        return await self.started(service)

    async def _get_status(self, service):
        f = getattr(self, '_started_' + service['service'], None)
        if callable(f):
            if inspect.iscoroutinefunction(f):
                running, pids = await f()
            else:
                running, pids = f()
        else:
            running, pids = await self._started(service['service'])

        if running:
            state = 'RUNNING'
        else:
            if service['enable']:
                state = 'CRASHED'
            else:
                state = 'STOPPED'

        service['state'] = state
        service['pids'] = pids
        return service

    async def _simplecmd(self, action, what, options=None):
        self.logger.debug("Calling: %s(%s) ", action, what)
        f = getattr(self, '_' + action + '_' + what, None)
        if f is None:
            # Provide generic start/stop/restart verbs for rc.d scripts
            if what in self.SERVICE_DEFS:
                procname, pidfile = self.SERVICE_DEFS[what]
                if procname:
                    what = procname
            if action in ("start", "stop", "restart", "reload"):
                if action == 'restart':
                    await self._system("/usr/sbin/service " + what + " forcestop ")
                await self._service(what, action, **options)
            else:
                raise ValueError("Internal error: Unknown command")
        else:
            call = f(**(options or {}))
            if inspect.iscoroutinefunction(f):
                await call

    async def _system(self, cmd, options=None):
        stdout = DEVNULL
        if options and 'stdout' in options:
            stdout = options['stdout']
        stderr = DEVNULL
        if options and 'stderr' in options:
            stderr = options['stderr']

        proc = await Popen(cmd, stdout=stdout, stderr=stderr, shell=True, close_fds=True)
        await proc.communicate()
        return proc.returncode

    async def _service(self, service, verb, **options):
        onetime = options.get('onetime')
        force = options.get('force')
        quiet = options.get('quiet')

        # force comes before one which comes before quiet
        # they are mutually exclusive
        preverb = ''
        if force:
            preverb = 'force'
        elif onetime:
            preverb = 'one'
        elif quiet:
            preverb = 'quiet'

        return await self._system('/usr/sbin/service {} {}{}'.format(
            service,
            preverb,
            verb,
        ), options)

    def _started_notify(self, verb, what):
        """
        The check for started [or not] processes is currently done in 2 steps
        This is the first step which involves a thread StartNotify that watch for event
        before actually start/stop rc.d scripts

        Returns:
            StartNotify object if the service is known or None otherwise
        """

        if what in self.SERVICE_DEFS:
            procname, pidfile = self.SERVICE_DEFS[what]
            sn = StartNotify(verb=verb, pidfile=pidfile)
            sn.start()
            return sn
        else:
            return None

    async def _started(self, what, notify=None):
        """
        This is the second step::
        Wait for the StartNotify thread to finish and then check for the
        status of pidfile/procname using pgrep

        Returns:
            True whether the service is alive, False otherwise
        """

        if what in self.SERVICE_DEFS:
            procname, pidfile = self.SERVICE_DEFS[what]
            if notify:
                await self.middleware.threaded(notify.join)

            if pidfile:
                pgrep = "/bin/pgrep -F {}{}".format(
                    pidfile,
                    ' ' + procname if procname else '',
                )
            else:
                pgrep = "/bin/pgrep {}".format(procname)
            proc = await Popen(pgrep, shell=True, stdout=PIPE, stderr=PIPE, close_fds=True)
            data = (await proc.communicate())[0].decode()

            if proc.returncode == 0:
                return True, [
                    int(i)
                    for i in data.strip().split('\n') if i.isdigit()
                ]
        return False, []

    async def _start_webdav(self, **kwargs):
        await self._service("ix-apache", "start", force=True, **kwargs)
        await self._service("apache24", "start", **kwargs)

    async def _stop_webdav(self, **kwargs):
        await self._service("apache24", "stop", **kwargs)

    async def _restart_webdav(self, **kwargs):
        await self._service("apache24", "stop", force=True, **kwargs)
        await self._service("ix-apache", "start", force=True, **kwargs)
        await self._service("apache24", "restart", **kwargs)

    async def _reload_webdav(self, **kwargs):
        await self._service("ix-apache", "start", force=True, **kwargs)
        await self._service("apache24", "reload", **kwargs)

    async def _restart_django(self, **kwargs):
        await self._service("django", "restart", **kwargs)

    async def _start_webshell(self, **kwargs):
        await self._system("/usr/local/bin/python /usr/local/www/freenasUI/tools/webshell.py")

    async def _restart_webshell(self, **kwargs):
        try:
            with open('/var/run/webshell.pid', 'r') as f:
                pid = f.read()
                os.kill(int(pid), signal.SIGTERM)
                time.sleep(0.2)
                os.kill(int(pid), signal.SIGKILL)
        except:
            pass
        await self._system("ulimit -n 1024 && /usr/local/bin/python /usr/local/www/freenasUI/tools/webshell.py")

    async def _restart_iscsitarget(self, **kwargs):
        await self._service("ix-ctld", "start", force=True, **kwargs)
        await self._service("ctld", "stop", force=True, **kwargs)
        await self._service("ix-ctld", "start", quiet=True, **kwargs)
        await self._service("ctld", "restart", **kwargs)

    async def _start_iscsitarget(self, **kwargs):
        await self._service("ix-ctld", "start", quiet=True, **kwargs)
        await self._service("ctld", "start", **kwargs)

    async def _stop_iscsitarget(self, **kwargs):
        await self._service("ix-ctld", "stop", force=True, **kwargs)
        await self._service("ctld", "stop", force=True, **kwargs)

    async def _reload_iscsitarget(self, **kwargs):
        await self._service("ix-ctld", "start", quiet=True, **kwargs)
        await self._service("ctld", "reload", **kwargs)

    async def _start_collectd(self, **kwargs):
        await self._service("ix-collectd", "start", quiet=True, **kwargs)
        await self._service("collectd", "restart", **kwargs)

    async def _restart_collectd(self, **kwargs):
        await self._service("collectd", "stop", **kwargs)
        await self._service("ix-collectd", "start", quiet=True, **kwargs)
        await self._service("collectd", "start", **kwargs)

    async def _start_sysctl(self, **kwargs):
        await self._service("sysctl", "start", **kwargs)
        await self._service("ix-sysctl", "start", quiet=True, **kwargs)

    async def _reload_sysctl(self, **kwargs):
        await self._service("sysctl", "start", **kwargs)
        await self._service("ix-sysctl", "reload", **kwargs)

    async def _start_network(self, **kwargs):
        await self.middleware.call('interfaces.sync')
        await self.middleware.call('routes.sync')

    async def _stop_jails(self, **kwargs):
        for jail in await self.middleware.call('datastore.query', 'jails.jails'):
            await self.middleware.call('notifier.warden', 'stop', [], {'jail': jail['jail_host']})

    async def _start_jails(self, **kwargs):
        await self._service("ix-warden", "start", **kwargs)
        for jail in await self.middleware.call('datastore.query', 'jails.jails'):
            if jail['jail_autostart']:
                await self.middleware.call('notifier.warden', 'start', [], {'jail': jail['jail_host']})
        await self._service("ix-plugins", "start", **kwargs)
        await self.reload("http", kwargs)

    async def _restart_jails(self, **kwargs):
        await self._stop_jails()
        await self._start_jails()

    async def _stop_pbid(self, **kwargs):
        await self._service("pbid", "stop", **kwargs)

    async def _start_pbid(self, **kwargs):
        await self._service("pbid", "start", **kwargs)

    async def _restart_pbid(self, **kwargs):
        await self._service("pbid", "restart", **kwargs)

    async def _reload_named(self, **kwargs):
        await self._service("named", "reload", **kwargs)

    async def _reload_hostname(self, **kwargs):
        await self._system('/bin/hostname ""')
        await self._service("ix-hostname", "start", quiet=True, **kwargs)
        await self._service("hostname", "start", quiet=True, **kwargs)
        await self._service("mdnsd", "restart", quiet=True, **kwargs)
        await self._service("collectd", "stop", **kwargs)
        await self._service("ix-collectd", "start", quiet=True, **kwargs)
        await self._service("collectd", "start", **kwargs)

    async def _reload_resolvconf(self, **kwargs):
        await self._reload_hostname()
        await self._service("ix-resolv", "start", quiet=True, **kwargs)

    async def _reload_networkgeneral(self, **kwargs):
        await self._reload_resolvconf()
        await self._service("routing", "restart", **kwargs)

    async def _reload_timeservices(self, **kwargs):
        await self._service("ix-localtime", "start", quiet=True, **kwargs)
        await self._service("ix-ntpd", "start", quiet=True, **kwargs)
        await self._service("ntpd", "restart", **kwargs)
        os.environ['TZ'] = await self.middleware.call('datastore.query', 'system.settings', [], {'order_by': ['-id'], 'get': True})['stg_timezone']
        time.tzset()

    async def _restart_smartd(self, **kwargs):
        await self._service("ix-smartd", "start", quiet=True, **kwargs)
        await self._service("smartd", "stop", force=True, **kwargs)
        await self._service("smartd", "restart", **kwargs)

    async def _reload_ssh(self, **kwargs):
        await self._service("ix-sshd", "start", quiet=True, **kwargs)
        await self._service("ix_register", "reload", **kwargs)
        await self._service("openssh", "reload", **kwargs)
        await self._service("ix_sshd_save_keys", "start", quiet=True, **kwargs)

    async def _start_ssh(self, **kwargs):
        await self._service("ix-sshd", "start", quiet=True, **kwargs)
        await self._service("ix_register", "reload", **kwargs)
        await self._service("openssh", "start", **kwargs)
        await self._service("ix_sshd_save_keys", "start", quiet=True, **kwargs)

    async def _stop_ssh(self, **kwargs):
        await self._service("openssh", "stop", force=True, **kwargs)
        await self._service("ix_register", "reload", **kwargs)

    async def _restart_ssh(self, **kwargs):
        await self._service("ix-sshd", "start", quiet=True, **kwargs)
        await self._service("openssh", "stop", force=True, **kwargs)
        await self._service("ix_register", "reload", **kwargs)
        await self._service("openssh", "restart", **kwargs)
        await self._service("ix_sshd_save_keys", "start", quiet=True, **kwargs)

    async def _start_s3(self, **kwargs):
        await self._service("minio", "start", quiet=True, stdout=None, stderr=None, **kwargs)

    async def _reload_rsync(self, **kwargs):
        await self._service("ix-rsyncd", "start", quiet=True, **kwargs)
        await self._service("rsyncd", "restart", **kwargs)

    async def _restart_rsync(self, **kwargs):
        await self._stop_rsync()
        await self._start_rsync()

    async def _start_rsync(self, **kwargs):
        await self._service("ix-rsyncd", "start", quiet=True, **kwargs)
        await self._service("rsyncd", "start", **kwargs)

    async def _stop_rsync(self, **kwargs):
        await self._service("rsyncd", "stop", force=True, **kwargs)

    async def _started_nis(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/NIS/ctl status"):
            res = True
        return res, []

    async def _start_nis(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/NIS/ctl start"):
            res = True
        return res

    async def _restart_nis(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/NIS/ctl restart"):
            res = True
        return res

    async def _stop_nis(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/NIS/ctl stop"):
            res = True
        return res

    async def _started_ldap(self, **kwargs):
        if (await self._system('/usr/sbin/service ix-ldap status') != 0):
            return False, []
        return await self.middleware.call('notifier.ldap_status'), []

    async def _start_ldap(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/LDAP/ctl start"):
            res = True
        return res

    async def _stop_ldap(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/LDAP/ctl stop"):
            res = True
        return res

    async def _restart_ldap(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/LDAP/ctl restart"):
            res = True
        return res

    async def _start_lldp(self, **kwargs):
        await self._service("ladvd", "start", **kwargs)

    async def _stop_lldp(self, **kwargs):
        await self._service("ladvd", "stop", force=True, **kwargs)

    async def _restart_lldp(self, **kwargs):
        await self._service("ladvd", "stop", force=True, **kwargs)
        await self._service("ladvd", "restart", **kwargs)

    async def _clear_activedirectory_config(self):
        await self._system("/bin/rm -f /etc/directoryservice/ActiveDirectory/config")

    async def _started_activedirectory(self, **kwargs):
        for srv in ('kinit', 'activedirectory', ):
            if await self._system('/usr/sbin/service ix-%s status' % (srv, )) != 0:
                return False, []
        if await self._system('/usr/local/bin/wbinfo -p') != 0:
                return False, []
        if await self._system('/usr/local/bin/wbinfo -t') != 0:
                return False, []
        return True, []

    async def _start_activedirectory(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/ActiveDirectory/ctl start"):
            res = True
        return res

    async def _stop_activedirectory(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/ActiveDirectory/ctl stop"):
            res = True
        return res

    async def _restart_activedirectory(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/ActiveDirectory/ctl restart"):
            res = True
        return res

    async def _started_domaincontroller(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/DomainController/ctl status"):
            res = True
        return res, []

    async def _start_domaincontroller(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/DomainController/ctl start"):
            res = True
        return res

    async def _stop_domaincontroller(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/DomainController/ctl stop"):
            res = True
        return res

    async def _restart_domaincontroller(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/DomainController/ctl restart"):
            res = True
        return res

    async def _restart_syslogd(self, **kwargs):
        await self._service("ix-syslogd", "start", quiet=True, **kwargs)
        await self._system("/etc/local/rc.d/syslog-ng restart")

    async def _start_syslogd(self, **kwargs):
        await self._service("ix-syslogd", "start", quiet=True, **kwargs)
        await self._system("/etc/local/rc.d/syslog-ng start")

    async def _stop_syslogd(self, **kwargs):
        await self._system("/etc/local/rc.d/syslog-ng stop")

    async def _reload_syslogd(self, **kwargs):
        await self._service("ix-syslogd", "start", quiet=True, **kwargs)
        await self._system("/etc/local/rc.d/syslog-ng reload")

    async def _start_tftp(self, **kwargs):
        await self._service("ix-inetd", "start", quiet=True, **kwargs)
        await self._service("inetd", "start", **kwargs)

    async def _reload_tftp(self, **kwargs):
        await self._service("ix-inetd", "start", quiet=True, **kwargs)
        await self._service("inetd", "stop", force=True, **kwargs)
        await self._service("inetd", "restart", **kwargs)

    async def _restart_tftp(self, **kwargs):
        await self._service("ix-inetd", "start", quiet=True, **kwargs)
        await self._service("inetd", "stop", force=True, **kwargs)
        await self._service("inetd", "restart", **kwargs)

    async def _restart_cron(self, **kwargs):
        await self._service("ix-crontab", "start", quiet=True, **kwargs)

    async def _start_motd(self, **kwargs):
        await self._service("ix-motd", "start", quiet=True, **kwargs)
        await self._service("motd", "start", quiet=True, **kwargs)

    async def _start_ttys(self, **kwargs):
        await self._service("ix-ttys", "start", quiet=True, **kwargs)

    async def _reload_ftp(self, **kwargs):
        await self._service("ix-proftpd", "start", quiet=True, **kwargs)
        await self._service("proftpd", "restart", **kwargs)

    async def _restart_ftp(self, **kwargs):
        await self._stop_ftp()
        await self._start_ftp()

    async def _start_ftp(self, **kwargs):
        await self._service("ix-proftpd", "start", quiet=True, **kwargs)
        await self._service("proftpd", "start", **kwargs)

    async def _stop_ftp(self, **kwargs):
        await self._service("proftpd", "stop", force=True, **kwargs)

    async def _start_ups(self, **kwargs):
        await self._service("ix-ups", "start", quiet=True, **kwargs)
        await self._service("nut", "start", **kwargs)
        await self._service("nut_upsmon", "start", **kwargs)
        await self._service("nut_upslog", "start", **kwargs)

    async def _stop_ups(self, **kwargs):
        await self._service("nut_upslog", "stop", force=True, **kwargs)
        await self._service("nut_upsmon", "stop", force=True, **kwargs)
        await self._service("nut", "stop", force=True, **kwargs)

    async def _restart_ups(self, **kwargs):
        await self._service("ix-ups", "start", quiet=True, **kwargs)
        await self._service("nut", "stop", force=True, **kwargs)
        await self._service("nut_upsmon", "stop", force=True, **kwargs)
        await self._service("nut_upslog", "stop", force=True, **kwargs)
        await self._service("nut", "restart", **kwargs)
        await self._service("nut_upsmon", "restart", **kwargs)
        await self._service("nut_upslog", "restart", **kwargs)

    async def _started_ups(self, **kwargs):
        mode = (await self.middleware.call('datastore.query', 'services.ups', [], {'order_by': ['-id'], 'get': True}))['ups_mode']
        if mode == "master":
            svc = "ups"
        else:
            svc = "upsmon"
        return await self._started(svc)

    async def _start_afp(self, **kwargs):
        await self._service("ix-afpd", "start", **kwargs)
        await self._service("netatalk", "start", **kwargs)

    async def _stop_afp(self, **kwargs):
        await self._service("netatalk", "stop", force=True, **kwargs)
        # when netatalk stops if afpd or cnid_metad is stuck
        # they'll get left behind, which can cause issues
        # restarting netatalk.
        await self._system("pkill -9 afpd")
        await self._system("pkill -9 cnid_metad")

    async def _restart_afp(self, **kwargs):
        await self._stop_afp()
        await self._start_afp()

    async def _reload_afp(self, **kwargs):
        await self._service("ix-afpd", "start", quiet=True, **kwargs)
        await self._system("killall -1 netatalk")

    async def _reload_nfs(self, **kwargs):
        await self._service("ix-nfsd", "start", quiet=True, **kwargs)

    async def _restart_nfs(self, **kwargs):
        await self._stop_nfs(**kwargs)
        await self._start_nfs(**kwargs)

    async def _stop_nfs(self, **kwargs):
        await self._service("lockd", "stop", force=True, **kwargs)
        await self._service("statd", "stop", force=True, **kwargs)
        await self._service("nfsd", "stop", force=True, **kwargs)
        await self._service("mountd", "stop", force=True, **kwargs)
        await self._service("nfsuserd", "stop", force=True, **kwargs)
        await self._service("gssd", "stop", force=True, **kwargs)
        await self._service("rpcbind", "stop", force=True, **kwargs)
        if not await self.middleware.call('system.is_freenas'):
            await self._service("vaaiserver", "stop", force=True, **kwargs)

    async def _start_nfs(self, **kwargs):
        await self._service("ix-nfsd", "start", quiet=True, **kwargs)
        await self._service("rpcbind", "start", quiet=True, **kwargs)
        await self._service("gssd", "start", quiet=True, **kwargs)
        await self._service("nfsuserd", "start", quiet=True, **kwargs)
        await self._service("mountd", "start", quiet=True, **kwargs)
        await self._service("nfsd", "start", quiet=True, **kwargs)
        await self._service("statd", "start", quiet=True, **kwargs)
        await self._service("lockd", "start", quiet=True, **kwargs)
        if not await self.middleware.call('system.is_freenas'):
            await self._service("vaaiserver", "start", quiet=True, **kwargs)

    async def _force_stop_jail(self, **kwargs):
        await self._service("jail", "stop", force=True, **kwargs)

    async def _start_plugins(self, jail=None, plugin=None, **kwargs):
        if jail and plugin:
            await self._system("/usr/sbin/service ix-plugins forcestart %s:%s" % (jail, plugin))
        else:
            await self._service("ix-plugins", "start", force=True, **kwargs)

    async def _stop_plugins(self, jail=None, plugin=None, **kwargs):
        if jail and plugin:
            await self._system("/usr/sbin/service ix-plugins forcestop %s:%s" % (jail, plugin))
        else:
            await self._service("ix-plugins", "stop", force=True, **kwargs)

    async def _restart_plugins(self, jail=None, plugin=None):
        await self._stop_plugins(jail=jail, plugin=plugin)
        await self._start_plugins(jail=jail, plugin=plugin)

    async def _started_plugins(self, jail=None, plugin=None, **kwargs):
        res = False
        if jail and plugin:
            if self._system("/usr/sbin/service ix-plugins status %s:%s" % (jail, plugin)) == 0:
                res = True
        else:
            if await self._service("ix-plugins", "status", **kwargs) == 0:
                res = True
        return res, []

    async def _restart_dynamicdns(self, **kwargs):
        await self._service("ix-inadyn", "start", quiet=True, **kwargs)
        await self._service("inadyn-mt", "stop", force=True, **kwargs)
        await self._service("inadyn-mt", "restart", **kwargs)

    async def _restart_system(self, **kwargs):
        asyncio.ensure_future(self._system("/bin/sleep 3 && /sbin/shutdown -r now"))

    async def _stop_system(self, **kwargs):
        asyncio.ensure_future(self._system("/bin/sleep 3 && /sbin/shutdown -p now"))

    async def _reload_cifs(self, **kwargs):
        await self._service("ix-pre-samba", "start", quiet=True, **kwargs)
        await self._service("samba_server", "reload", force=True, **kwargs)
        await self._service("ix-post-samba", "start", quiet=True, **kwargs)
        await self._service("mdnsd", "restart", **kwargs)
        # After mdns is restarted we need to reload netatalk to have it rereregister
        # with mdns. Ticket #7133
        await self._service("netatalk", "reload", **kwargs)

    async def _restart_cifs(self, **kwargs):
        await self._service("ix-pre-samba", "start", quiet=True, **kwargs)
        await self._service("samba_server", "stop", force=True, **kwargs)
        await self._service("samba_server", "restart", quiet=True, **kwargs)
        await self._service("ix-post-samba", "start", quiet=True, **kwargs)
        await self._service("mdnsd", "restart", **kwargs)
        # After mdns is restarted we need to reload netatalk to have it rereregister
        # with mdns. Ticket #7133
        await self._service("netatalk", "reload", **kwargs)

    async def _start_cifs(self, **kwargs):
        await self._service("ix-pre-samba", "start", quiet=True, **kwargs)
        await self._service("samba_server", "start", quiet=True, **kwargs)
        await self._service("ix-post-samba", "start", quiet=True, **kwargs)

    async def _stop_cifs(self, **kwargs):
        await self._service("samba_server", "stop", force=True, **kwargs)
        await self._service("ix-post-samba", "start", quiet=True, **kwargs)

    async def _start_snmp(self, **kwargs):
        await self._service("ix-snmpd", "start", quiet=True, **kwargs)
        await self._service("snmpd", "start", quiet=True, **kwargs)

    async def _stop_snmp(self, **kwargs):
        await self._service("snmpd", "stop", quiet=True, **kwargs)
        # The following is required in addition to just `snmpd`
        # to kill the `freenas-snmpd.py` daemon
        await self._service("ix-snmpd", "stop", quiet=True, **kwargs)

    async def _restart_snmp(self, **kwargs):
        await self._service("ix-snmpd", "start", quiet=True, **kwargs)
        await self._service("snmpd", "stop", force=True, **kwargs)
        await self._service("snmpd", "start", quiet=True, **kwargs)

    async def _restart_http(self, **kwargs):
        await self._service("ix-nginx", "start", quiet=True, **kwargs)
        await self._service("ix_register", "reload", **kwargs)
        await self._service("nginx", "restart", **kwargs)

    async def _reload_http(self, **kwargs):
        await self._service("ix-nginx", "start", quiet=True, **kwargs)
        await self._service("ix_register", "reload", **kwargs)
        await self._service("nginx", "reload", **kwargs)

    async def _reload_loader(self, **kwargs):
        await self._service("ix-loader", "reload", **kwargs)

    async def _start_loader(self, **kwargs):
        await self._service("ix-loader", "start", quiet=True, **kwargs)

    async def __saver_loaded(self):
        pipe = os.popen("kldstat|grep daemon_saver")
        out = pipe.read().strip('\n')
        pipe.close()
        return (len(out) > 0)

    async def _start_saver(self, **kwargs):
        if not self.__saver_loaded():
            await self._system("kldload daemon_saver")

    async def _stop_saver(self, **kwargs):
        if self.__saver_loaded():
            await self._system("kldunload daemon_saver")

    async def _restart_saver(self, **kwargs):
        await self._stop_saver()
        await self._start_saver()

    async def _reload_disk(self, **kwargs):
        await self._service("ix-fstab", "start", quiet=True, **kwargs)
        await self._service("ix-swap", "start", quiet=True, **kwargs)
        await self._service("swap", "start", quiet=True, **kwargs)
        await self._service("mountlate", "start", quiet=True, **kwargs)
        # Restarting collectd may take a long time and there is no
        # benefit in waiting for it since even if it fails it wont
        # tell the user anything useful.
        asyncio.ensure_future(self.restart("collectd", kwargs))

    async def _reload_user(self, **kwargs):
        await self._service("ix-passwd", "start", quiet=True, **kwargs)
        await self._service("ix-aliases", "start", quiet=True, **kwargs)
        await self._service("ix-sudoers", "start", quiet=True, **kwargs)
        await self.reload("cifs", kwargs)

    async def _restart_system_datasets(self, **kwargs):
        systemdataset = await self.middleware.call('notifier.system_dataset_create')
        if not systemdataset:
            return None
        systemdataset = await self.middleware.call('datastore.query', 'system.systemdataset', [], {'get': True})
        if systemdataset['sys_syslog_usedataset']:
            await self.restart("syslogd", kwargs)
        await self.restart("cifs", kwargs)
        if systemdataset['sys_rrd_usedataset']:
            # Restarting collectd may take a long time and there is no
            # benefit in waiting for it since even if it fails it wont
            # tell the user anything useful.
            asyncio.ensure_future(self.restart("collectd", kwargs))
Exemplo n.º 15
0
class SystemDatasetService(ConfigService):
    class Config:
        datastore = 'system.systemdataset'
        datastore_extend = 'systemdataset.config_extend'
        datastore_prefix = 'sys_'
        cli_namespace = 'system.system_dataset'

    @private
    async def config_extend(self, config):

        # Treat empty system dataset pool as boot pool
        boot_pool = await self.middleware.call('boot.pool_name')
        if not config['pool']:
            config['pool'] = boot_pool
        # Add `is_decrypted` dynamic attribute
        if config['pool'] == boot_pool:
            config['is_decrypted'] = True
        else:
            pool = await self.middleware.call('pool.query',
                                              [('name', '=', config['pool'])])
            if pool:
                config['is_decrypted'] = pool[0]['is_decrypted']
            else:
                config['is_decrypted'] = False

        if config['is_decrypted']:
            config['basename'] = f'{config["pool"]}/.system'
        else:
            config['basename'] = None

        # Make `uuid` point to the uuid of current node
        config['uuid_a'] = config['uuid']
        if await self.middleware.call('system.is_enterprise'):
            if await self.middleware.call('failover.node') == 'B':
                config['uuid'] = config['uuid_b']

        if not config['uuid']:
            config['uuid'] = uuid.uuid4().hex
            if await self.middleware.call('system.is_enterprise'
                                          ) and await self.middleware.call(
                                              'failover.node') == 'B':
                attr = 'uuid_b'
                config[attr] = config['uuid']
            else:
                attr = 'uuid'
            await self.middleware.call('datastore.update',
                                       'system.systemdataset', config['id'],
                                       {f'sys_{attr}': config['uuid']})

        config['syslog'] = config.pop('syslog_usedataset')

        if not os.path.exists(SYSDATASET_PATH) or not os.path.ismount(
                SYSDATASET_PATH):
            config['path'] = None
        else:
            config['path'] = SYSDATASET_PATH

        return config

    @accepts()
    async def pool_choices(self):
        """
        Retrieve pool choices which can be used for configuring system dataset.
        """
        boot_pool = await self.middleware.call('boot.pool_name')
        current_pool = (await self.config())['pool']
        pools = [
            p['name'] for p in await self.middleware.call(
                'pool.query', [['encrypt', '!=', 2]])
        ]
        valid_root_ds = [
            ds['id']
            for ds in await self.middleware.call('pool.dataset.query', [
                ['key_format.value', '!=', 'PASSPHRASE'
                 ], ['locked', '!=', True]
            ], {'extra': {
                'retrieve_children': False
            }})
        ]
        return {
            p: p
            for p in set([boot_pool, current_pool] +
                         [ds for ds in valid_root_ds if ds in pools])
        }

    @accepts(
        Dict('sysdataset_update',
             Str('pool', null=True),
             Str('pool_exclude', null=True),
             Bool('syslog'),
             update=True))
    @job(lock='sysdataset_update')
    async def do_update(self, job, data):
        """
        Update System Dataset Service Configuration.

        `pool` is the name of a valid pool configured in the system which will be used to host the system dataset.

        `pool_exclude` can be specified to make sure that we don't place the system dataset on that pool if `pool`
        is not provided.
        """
        config = await self.config()

        new = config.copy()
        new.update(data)

        verrors = ValidationErrors()
        if new['pool'] != config['pool']:
            ad_enabled = (await self.middleware.call(
                'activedirectory.get_state')) != 'DISABLED'
            if ad_enabled:
                verrors.add(
                    'sysdataset_update.pool',
                    'System dataset location may not be moved while the Active Directory service is enabled.',
                    errno.EPERM)

        if new['pool'] and new['pool'] != await self.middleware.call(
                'boot.pool_name'):
            pool = await self.middleware.call('pool.query',
                                              [['name', '=', new['pool']]])
            if not pool:
                verrors.add('sysdataset_update.pool',
                            f'Pool "{new["pool"]}" not found', errno.ENOENT)
            elif pool[0]['encrypt'] == 2:
                # This will cover two cases - passphrase being set for a pool and that it might be locked as well
                verrors.add(
                    'sysdataset_update.pool',
                    f'Pool "{new["pool"]}" has an encryption passphrase set. '
                    'The system dataset cannot be placed on this pool.')
            elif await self.middleware.call(
                    'pool.dataset.query',
                [['name', '=', new['pool']], ['encrypted', '=', True],
                 [
                     'OR',
                     [['key_format.value', '=', 'PASSPHRASE'],
                      ['locked', '=', True]]
                 ]]):
                verrors.add(
                    'sysdataset_update.pool',
                    'The system dataset cannot be placed on a pool '
                    'which has the root dataset encrypted with a passphrase or is locked.'
                )
        elif not new['pool']:
            for pool in await self.middleware.call('pool.query',
                                                   [['encrypt', '!=', 2]]):
                if data.get('pool_exclude'
                            ) == pool['name'] or await self.middleware.call(
                                'pool.dataset.query',
                                [['name', '=', pool['name']],
                                 [
                                     'OR',
                                     [['key_format.value', '=', 'PASSPHRASE'],
                                      ['locked', '=', True]]
                                 ]]):
                    continue
                new['pool'] = pool['name']
                break
            else:
                # If a data pool could not be found, reset it to blank
                # Which will eventually mean its back to boot pool (temporarily)
                new['pool'] = ''
        verrors.check()

        new['syslog_usedataset'] = new['syslog']

        update_dict = new.copy()
        for key in ('is_decrypted', 'basename', 'uuid_a', 'syslog', 'path',
                    'pool_exclude'):
            update_dict.pop(key, None)

        await self.middleware.call('datastore.update', 'system.systemdataset',
                                   config['id'], update_dict,
                                   {'prefix': 'sys_'})

        new = await self.config()

        if config['pool'] != new['pool']:
            await self.migrate(config['pool'], new['pool'])

        await self.setup(True, data.get('pool_exclude'))

        if config['syslog'] != new['syslog']:
            await self.middleware.call('service.restart', 'syslogd')

        if await self.middleware.call('failover.licensed'):
            if await self.middleware.call('failover.status') == 'MASTER':
                try:
                    await self.middleware.call('failover.call_remote',
                                               'system.reboot')
                except Exception as e:
                    self.logger.debug(
                        'Failed to reboot standby storage controller after system dataset change: %s',
                        e)

        return await self.config()

    @accepts(Bool('mount', default=True),
             Str('exclude_pool', default=None, null=True))
    @private
    async def setup(self, mount, exclude_pool):

        # FIXME: corefile for LINUX
        if osc.IS_FREEBSD:
            # We default kern.corefile value
            await run('sysctl', "kern.corefile='/var/tmp/%N.core'")

        config = await self.config()
        dbconfig = await self.middleware.call(
            'datastore.config', self._config.datastore,
            {'prefix': self._config.datastore_prefix})

        boot_pool = await self.middleware.call('boot.pool_name')
        if (await self.middleware.call('failover.licensed')
                and await self.middleware.call('failover.status') == 'BACKUP'
                and config.get('basename')
                and config['basename'] != f'{boot_pool}/.system'):
            try:
                os.unlink(SYSDATASET_PATH)
            except OSError:
                pass
            return

        # If the system dataset is configured in a data pool we need to make sure it exists.
        # In case it does not we need to use another one.
        if config['pool'] != boot_pool and not await self.middleware.call(
                'pool.query', [('name', '=', config['pool'])]):
            job = await self.middleware.call('systemdataset.update', {
                'pool': None,
                'pool_exclude': exclude_pool,
            })
            await job.wait()
            if job.error:
                raise CallError(job.error)
            return

        # If we dont have a pool configure in the database try to find the first data pool
        # to put it on.
        if not dbconfig['pool']:
            pool = None
            for p in await self.middleware.call('pool.query',
                                                [('encrypt', '!=', '2')],
                                                {'order_by': ['encrypt']}):
                if (exclude_pool and p['name']
                        == exclude_pool) or await self.middleware.call(
                            'pool.dataset.query',
                            [['name', '=', p['name']],
                             [
                                 'OR',
                                 [['key_format.value', '=', 'PASSPHRASE'],
                                  ['locked', '=', True]]
                             ]]):
                    continue
                if p['is_decrypted']:
                    pool = p
                    break
            if pool:
                job = await self.middleware.call('systemdataset.update',
                                                 {'pool': pool['name']})
                await job.wait()
                if job.error:
                    raise CallError(job.error)
                return

        if not config['basename']:
            if os.path.exists(SYSDATASET_PATH):
                try:
                    os.rmdir(SYSDATASET_PATH)
                except Exception:
                    self.logger.debug('Failed to remove system dataset dir',
                                      exc_info=True)
            return config

        if not config['is_decrypted']:
            return

        if await self.__setup_datasets(config['pool'], config['uuid']):
            # There is no need to wait this to finish
            # Restarting rrdcached will ensure that we start/restart collectd as well
            asyncio.ensure_future(
                self.middleware.call('service.restart', 'rrdcached'))

        if not os.path.isdir(SYSDATASET_PATH):
            if os.path.exists(SYSDATASET_PATH):
                os.unlink(SYSDATASET_PATH)
            os.makedirs(SYSDATASET_PATH)

        acltype = await self.middleware.call('zfs.dataset.query',
                                             [('id', '=', config['basename'])])
        if acltype and acltype[0]['properties']['acltype']['value'] == 'off':
            await self.middleware.call(
                'zfs.dataset.update',
                config['basename'],
                {'properties': {
                    'acltype': {
                        'value': 'off'
                    }
                }},
            )

        if mount:

            await self.__mount(config['pool'], config['uuid'])

            corepath = f'{SYSDATASET_PATH}/cores'
            if os.path.exists(corepath):
                # FIXME: corefile for LINUX
                if osc.IS_FREEBSD:
                    # FIXME: sysctl module not working
                    await run('sysctl', f"kern.corefile='{corepath}/%N.core'")
                os.chmod(corepath, 0o775)

                if await self.middleware.call('keyvalue.get', 'run_migration',
                                              False):
                    try:
                        cores = Path(corepath)
                        for corefile in cores.iterdir():
                            corefile.unlink()
                    except Exception:
                        self.logger.warning("Failed to clear old core files.",
                                            exc_info=True)

            await self.__nfsv4link(config)

            if osc.IS_LINUX:
                await self.middleware.call('etc.generate', 'glusterd')

            await self.middleware.call('smb.setup_directories')
            # The following should be backgrounded since they may be quite
            # long-running.
            await self.middleware.call('smb.configure', False)
            await self.middleware.call('dscache.initialize')

        return config

    async def __setup_datasets(self, pool, uuid):
        """
        Make sure system datasets for `pool` exist and have the right mountpoint property
        """
        createdds = False
        datasets = [i[0] for i in self.__get_datasets(pool, uuid)]
        datasets_prop = {
            i['id']: i['properties']
            for i in await self.middleware.call('zfs.dataset.query', [(
                'id', 'in', datasets)])
        }
        for dataset in datasets:
            is_cores_ds = dataset.endswith('/cores')
            dataset_quota = {'quota': '1G'} if is_cores_ds else {}
            if dataset not in datasets_prop:
                await self.middleware.call(
                    'zfs.dataset.create', {
                        'name': dataset,
                        'properties': {
                            'mountpoint': 'legacy',
                            **dataset_quota
                        },
                    })
                createdds = True
            elif is_cores_ds and datasets_prop[dataset]['used'][
                    'parsed'] >= 1024**3:
                try:
                    await self.middleware.call('zfs.dataset.delete', dataset, {
                        'force': True,
                        'recursive': True
                    })
                    await self.middleware.call(
                        'zfs.dataset.create', {
                            'name': dataset,
                            'properties': {
                                'mountpoint': 'legacy',
                                **dataset_quota
                            },
                        })
                except Exception:
                    self.logger.warning("Failed to replace dataset [%s].",
                                        dataset,
                                        exc_info=True)
            else:
                update_props_dict = {}
                if datasets_prop[dataset]['mountpoint']['value'] != 'legacy':
                    update_props_dict['mountpoint'] = {'value': 'legacy'}
                if dataset_quota and datasets_prop[dataset]['quota'][
                        'value'] != '1G':
                    update_props_dict['quota'] = {'value': '1G'}
                if update_props_dict:
                    await self.middleware.call(
                        'zfs.dataset.update',
                        dataset,
                        {'properties': update_props_dict},
                    )
        return createdds

    async def __mount(self, pool, uuid, path=SYSDATASET_PATH):

        for dataset, name in self.__get_datasets(pool, uuid):
            if name:
                mountpoint = f'{path}/{name}'
            else:
                mountpoint = path
            if os.path.ismount(mountpoint):
                continue
            if not os.path.isdir(mountpoint):
                os.mkdir(mountpoint)
            await run('mount', '-t', 'zfs', dataset, mountpoint, check=True)

        if osc.IS_LINUX:

            # make sure the glustereventsd webhook dir and
            # config file exist
            init_job = await self.middleware.call('gluster.eventsd.init')
            await init_job.wait()
            if init_job.error:
                self.logger.error(
                    'Failed to initilize %s directory with error: %s',
                    CTDBConfig.CTDB_VOL_NAME.value, init_job.error)

    async def __umount(self, pool, uuid):

        for dataset, name in reversed(self.__get_datasets(pool, uuid)):
            try:
                await run('umount', '-f', dataset)
            except subprocess.CalledProcessError as e:
                stderr = e.stderr.decode()
                if 'no mount point specified' in stderr:
                    # Already unmounted
                    continue
                raise CallError(f'Unable to umount {dataset}: {stderr}')

    def __get_datasets(self, pool, uuid):
        return [(f'{pool}/.system', '')] + [
            (f'{pool}/.system/{i}', i) for i in [
                'cores', 'samba4', f'syslog-{uuid}', f'rrd-{uuid}',
                f'configs-{uuid}', 'webui', 'services'
            ] + ['glusterd', CTDBConfig.CTDB_VOL_NAME.value] if osc.IS_LINUX
        ]

    async def __nfsv4link(self, config):
        syspath = config['path']
        if not syspath:
            return None

        restartfiles = [
            "/var/db/nfs-stablerestart", "/var/db/nfs-stablerestart.bak"
        ]
        if await self.middleware.call('failover.licensed'
                                      ) and await self.middleware.call(
                                          'failover.status') == 'BACKUP':
            return None

        for item in restartfiles:
            if os.path.exists(item):
                if os.path.isfile(item) and not os.path.islink(item):
                    # It's an honest to goodness file, this shouldn't ever happen...but
                    path = os.path.join(syspath, os.path.basename(item))
                    if not os.path.isfile(path):
                        # there's no file in the system dataset, so copy over what we have
                        # being careful to nuke anything that is there that happens to
                        # have the same name.
                        if os.path.exists(path):
                            shutil.rmtree(path)
                        shutil.copy(item, path)
                    # Nuke the original file and create a symlink to it
                    # We don't need to worry about creating the file on the system dataset
                    # because it's either been copied over, or was already there.
                    os.unlink(item)
                    os.symlink(path, item)
                elif os.path.isdir(item):
                    # Pathological case that should never happen
                    shutil.rmtree(item)
                    self.__createlink(syspath, item)
                else:
                    if not os.path.exists(os.readlink(item)):
                        # Dead symlink or some other nastiness.
                        shutil.rmtree(item)
                        self.__createlink(syspath, item)
            else:
                # We can get here if item is a dead symlink
                if os.path.islink(item):
                    os.unlink(item)
                self.__createlink(syspath, item)

    def __createlink(self, syspath, item):
        path = os.path.join(syspath, os.path.basename(item))
        if not os.path.isfile(path):
            if os.path.exists(path):
                # There's something here but it's not a file.
                shutil.rmtree(path)
            open(path, 'w').close()
        os.symlink(path, item)

    @private
    async def migrate(self, _from, _to):

        config = await self.config()

        await self.__setup_datasets(_to, config['uuid'])

        if _from:
            path = '/tmp/system.new'
            if not os.path.exists('/tmp/system.new'):
                os.mkdir('/tmp/system.new')
            else:
                # Make sure we clean up any previous attempts
                await run('umount', '-R', path, check=False)
        else:
            path = SYSDATASET_PATH
        await self.__mount(_to, config['uuid'], path=path)

        restart = ['collectd', 'rrdcached', 'syslogd']

        if await self.middleware.call('service.started', 'cifs'):
            restart.insert(0, 'cifs')
        for service in ['open-vm-tools', 'webdav']:
            restart.append(service)

        if await self.middleware.call('service.started', 'glusterd'):
            restart.append('glusterd')

        try:
            if osc.IS_LINUX:
                await self.middleware.call('cache.put', 'use_syslog_dataset',
                                           False)
                await self.middleware.call('service.restart', 'syslogd')
                if await self.middleware.call('service.started', 'glusterd'):
                    restart.insert(0, 'glusterd')

            # Middleware itself will log to syslog dataset.
            # This may be prone to a race condition since we dont wait the workers to stop
            # logging, however all the work before umount seems to make it seamless.
            await self.middleware.call('core.stop_logging')

            for i in restart:
                await self.middleware.call('service.stop', i)

            if _from:
                cp = await run('rsync',
                               '-az',
                               f'{SYSDATASET_PATH}/',
                               '/tmp/system.new',
                               check=False)
                if cp.returncode == 0:
                    await self.__umount(_from, config['uuid'])
                    await self.__umount(_to, config['uuid'])
                    await self.__mount(_to, config['uuid'], SYSDATASET_PATH)
                    proc = await Popen(
                        f'zfs list -H -o name {_from}/.system|xargs zfs destroy -r',
                        shell=True)
                    await proc.communicate()

                    os.rmdir('/tmp/system.new')
                else:
                    raise CallError(
                        f'Failed to rsync from {SYSDATASET_PATH}: {cp.stderr.decode()}'
                    )
        finally:
            if osc.IS_LINUX:
                await self.middleware.call('cache.pop', 'use_syslog_dataset')

            restart.reverse()
            for i in restart:
                await self.middleware.call('service.start', i)

        await self.__nfsv4link(config)
Exemplo n.º 16
0
class CloudSyncService(CRUDService):
    class Config:
        datastore = "tasks.cloudsync"
        datastore_extend = "cloudsync._extend"

    @filterable
    async def query(self, filters=None, options=None):
        tasks_or_task = await super().query(filters, options)

        jobs = {}
        for j in await self.middleware.call(
                "core.get_jobs", [("method", "=", "cloudsync.sync")],
            {"order_by": ["id"]}):
            try:
                task_id = int(j["arguments"][0])
            except (IndexError, ValueError):
                continue

            if task_id in jobs and jobs[task_id]["state"] == "RUNNING":
                continue

            jobs[task_id] = j

        if isinstance(tasks_or_task, list):
            for task in tasks_or_task:
                task["job"] = jobs.get(task["id"])
        else:
            tasks_or_task["job"] = jobs.get(tasks_or_task["id"])

        return tasks_or_task

    @private
    async def _extend(self, cloud_sync):
        cloud_sync["credentials"] = cloud_sync.pop("credential")

        cloud_sync["encryption_password"] = await self.middleware.call(
            "pwenc.decrypt", cloud_sync["encryption_password"])
        cloud_sync["encryption_salt"] = await self.middleware.call(
            "pwenc.decrypt", cloud_sync["encryption_salt"])

        Cron.convert_db_format_to_schedule(cloud_sync)

        return cloud_sync

    @private
    async def _compress(self, cloud_sync):
        cloud_sync["credential"] = cloud_sync.pop("credentials")

        cloud_sync["encryption_password"] = await self.middleware.call(
            "pwenc.encrypt", cloud_sync["encryption_password"])
        cloud_sync["encryption_salt"] = await self.middleware.call(
            "pwenc.encrypt", cloud_sync["encryption_salt"])

        Cron.convert_schedule_to_db_format(cloud_sync)

        cloud_sync.pop('job', None)

        return cloud_sync

    @private
    async def _get_credentials(self, credentials_id):
        try:
            return await self.middleware.call("datastore.query",
                                              "system.cloudcredentials",
                                              [("id", "=", credentials_id)],
                                              {"get": True})
        except IndexError:
            return None

    @private
    async def _basic_validate(self, verrors, name, data):
        if data["encryption"]:
            if not data["encryption_password"]:
                verrors.add(
                    f"{name}.encryption_password",
                    "This field is required when encryption is enabled")

        credentials = await self._get_credentials(data["credentials"])
        if not credentials:
            verrors.add(f"{name}.credentials", "Invalid credentials")

        try:
            shlex.split(data["args"])
        except ValueError as e:
            verrors.add(f"{name}.args", f"Parse error: {e.args[0]}")

        if verrors:
            raise verrors

        provider = REMOTES[credentials["provider"]]

        schema = []

        if provider.buckets:
            schema.append(Str("bucket", required=True, empty=False))

        schema.append(Str("folder", required=True))

        schema.extend(provider.task_schema)

        schema.extend(self.common_task_schema(provider))

        attributes_verrors = validate_attributes(schema,
                                                 data,
                                                 additional_attrs=True)

        if not attributes_verrors:
            await provider.pre_save_task(data, credentials, verrors)

        verrors.add_child(f"{name}.attributes", attributes_verrors)

    @private
    async def _validate(self, verrors, name, data):
        await self._basic_validate(verrors, name, data)

        for i, (limit1,
                limit2) in enumerate(zip(data["bwlimit"],
                                         data["bwlimit"][1:])):
            if limit1["time"] >= limit2["time"]:
                verrors.add(
                    f"{name}.bwlimit.{i + 1}.time",
                    f"Invalid time order: {limit1['time']}, {limit2['time']}")

        if data["snapshot"]:
            if data["direction"] != "PUSH":
                verrors.add(f"{name}.snapshot",
                            "This option can only be enabled for PUSH tasks")

    @private
    async def _validate_folder(self, verrors, name, data):
        if data["direction"] == "PULL":
            folder = data["attributes"]["folder"].rstrip("/")
            if folder:
                folder_parent = os.path.normpath(os.path.join(folder, ".."))
                if folder_parent == ".":
                    folder_parent = ""
                folder_basename = os.path.basename(folder)
                ls = await self.list_directory(
                    dict(
                        credentials=data["credentials"],
                        encryption=data["encryption"],
                        filename_encryption=data["filename_encryption"],
                        encryption_password=data["encryption_password"],
                        encryption_salt=data["encryption_salt"],
                        attributes=dict(data["attributes"],
                                        folder=folder_parent),
                        args=data["args"],
                    ))
                for item in ls:
                    if item["Name"] == folder_basename:
                        if not item["IsDir"]:
                            verrors.add(f"{name}.attributes.folder",
                                        "This is not a directory")
                        break
                else:
                    verrors.add(f"{name}.attributes.folder",
                                "Directory does not exist")

        if data["direction"] == "PUSH":
            credentials = await self._get_credentials(data["credentials"])

            provider = REMOTES[credentials["provider"]]

            if provider.readonly:
                verrors.add(f"{name}.direction", "This remote is read-only")

    @accepts(
        Dict(
            "cloud_sync_create",
            Str("description", default=""),
            Str("direction", enum=["PUSH", "PULL"], required=True),
            Str("transfer_mode", enum=["SYNC", "COPY", "MOVE"], required=True),
            Str("path", required=True),
            Int("credentials", required=True),
            Bool("encryption", default=False),
            Bool("filename_encryption", default=False),
            Str("encryption_password", default=""),
            Str("encryption_salt", default=""),
            Cron("schedule", required=True),
            Bool("follow_symlinks", default=False),
            Int("transfers",
                null=True,
                default=None,
                validators=[Range(min=1)]),
            List("bwlimit",
                 default=[],
                 items=[
                     Dict(
                         "cloud_sync_bwlimit", Str("time",
                                                   validators=[Time()]),
                         Int("bandwidth", validators=[Range(min=1)],
                             null=True))
                 ]),
            List("exclude", default=[], items=[Str("path", empty=False)]),
            Dict("attributes", additional_attrs=True, required=True),
            Bool("snapshot", default=False),
            Str("pre_script", default=""),
            Str("post_script", default=""),
            Str("args", default=""),
            Bool("enabled", default=True),
            register=True,
        ))
    async def do_create(self, cloud_sync):
        """
        Creates a new cloud_sync entry.

        .. examples(websocket)::

          Create a new cloud_sync using amazon s3 attributes, which is supposed to run every hour.

            :::javascript
            {
              "id": "6841f242-840a-11e6-a437-00e04d680384",
              "msg": "method",
              "method": "cloudsync.create",
              "params": [{
                "description": "s3 sync",
                "path": "/mnt/tank",
                "credentials": 1,
                "minute": "00",
                "hour": "*",
                "daymonth": "*",
                "month": "*",
                "attributes": {
                  "bucket": "mybucket",
                  "folder": ""
                },
                "enabled": true
              }]
            }
        """

        verrors = ValidationErrors()

        await self._validate(verrors, "cloud_sync", cloud_sync)

        if verrors:
            raise verrors

        await self._validate_folder(verrors, "cloud_sync", cloud_sync)

        if verrors:
            raise verrors

        cloud_sync = await self._compress(cloud_sync)

        cloud_sync["id"] = await self.middleware.call("datastore.insert",
                                                      "tasks.cloudsync",
                                                      cloud_sync)
        await self.middleware.call("service.restart", "cron")

        cloud_sync = await self._extend(cloud_sync)
        return cloud_sync

    @accepts(Int("id"),
             Patch("cloud_sync_create", "cloud_sync_update", ("attr", {
                 "update": True
             })))
    async def do_update(self, id, data):
        """
        Updates the cloud_sync entry `id` with `data`.
        """
        cloud_sync = await self._get_instance(id)

        # credentials is a foreign key for now
        if cloud_sync["credentials"]:
            cloud_sync["credentials"] = cloud_sync["credentials"]["id"]

        cloud_sync.update(data)

        verrors = ValidationErrors()

        await self._validate(verrors, "cloud_sync_update", cloud_sync)

        if verrors:
            raise verrors

        await self._validate_folder(verrors, "cloud_sync_update", cloud_sync)

        if verrors:
            raise verrors

        cloud_sync = await self._compress(cloud_sync)

        await self.middleware.call("datastore.update", "tasks.cloudsync", id,
                                   cloud_sync)
        await self.middleware.call("service.restart", "cron")

        cloud_sync = await self._extend(cloud_sync)
        return cloud_sync

    @accepts(Int("id"))
    async def do_delete(self, id):
        """
        Deletes cloud_sync entry `id`.
        """
        await self.middleware.call("datastore.delete", "tasks.cloudsync", id)
        await self.middleware.call("service.restart", "cron")

    @accepts(Int("credentials_id"))
    async def list_buckets(self, credentials_id):
        credentials = await self._get_credentials(credentials_id)
        if not credentials:
            raise CallError("Invalid credentials")

        provider = REMOTES[credentials["provider"]]

        if not provider.buckets:
            raise CallError("This provider does not use buckets")

        return await self.ls({"credentials": credentials}, "")

    @accepts(
        Dict(
            "cloud_sync_ls",
            Int("credentials", required=True),
            Bool("encryption", default=False),
            Bool("filename_encryption", default=False),
            Str("encryption_password", default=""),
            Str("encryption_salt", default=""),
            Dict("attributes", required=True, additional_attrs=True),
            Str("args", default=""),
        ))
    async def list_directory(self, cloud_sync):
        verrors = ValidationErrors()

        await self._basic_validate(verrors, "cloud_sync", dict(cloud_sync))

        if verrors:
            raise verrors

        credentials = await self._get_credentials(cloud_sync["credentials"])

        if REMOTES[credentials["provider"]].buckets:
            path = f"{cloud_sync['attributes']['bucket']}/{cloud_sync['attributes']['folder']}"
        else:
            path = cloud_sync["attributes"]["folder"]

        return await self.ls(dict(cloud_sync, credentials=credentials), path)

    @private
    async def ls(self, config, path):
        with RcloneConfig(config) as config:
            proc = await run([
                "rclone", "--config", config.config_path, "lsjson",
                "remote:" + path
            ],
                             check=False,
                             encoding="utf8")
            if proc.returncode == 0:
                return json.loads(proc.stdout)
            else:
                raise CallError(proc.stderr)

    @item_method
    @accepts(Int("id"))
    @job(lock=lambda args: "cloud_sync:{}".format(args[-1]),
         lock_queue_size=1,
         logs=True)
    async def sync(self, job, id):
        """
        Run the cloud_sync job `id`, syncing the local data to remote.
        """

        cloud_sync = await self._get_instance(id)

        return await rclone(self.middleware, job, cloud_sync)

    @accepts()
    async def providers(self):
        return sorted([{
            "name":
            provider.name,
            "title":
            provider.title,
            "credentials_schema": [{
                "property": field.name,
                "schema": field.to_json_schema()
            } for field in provider.credentials_schema],
            "credentials_oauth":
            f"{OAUTH_URL}/{provider.name.lower()}"
            if provider.credentials_oauth else None,
            "buckets":
            provider.buckets,
            "bucket_title":
            provider.bucket_title,
            "task_schema": [{
                "property": field.name,
                "schema": field.to_json_schema()
            } for field in provider.task_schema +
                            self.common_task_schema(provider)],
        } for provider in REMOTES.values()],
                      key=lambda provider: provider["title"].lower())

    def common_task_schema(self, provider):
        schema = []

        if provider.fast_list:
            schema.append(
                Bool("fast_list",
                     default=False,
                     title="Use --fast-list",
                     description=textwrap.dedent("""\
                Use fewer transactions in exchange for more RAM. This may also speed up or slow down your
                transfer. See [rclone documentation](https://rclone.org/docs/#fast-list) for more details.
            """).rstrip()))

        return schema
Exemplo n.º 17
0
class FCPortService(CRUDService):
    @filterable
    def query(self, filters, options):
        node = None
        if self.middleware.call_sync("failover.licensed"):
            node = self.middleware.call_sync("failover.node")

        fcportmap = {}
        for fbtt in self.middleware.call_sync("datastore.query",
                                              "services.fibrechanneltotarget"):
            fcportmap[fbtt["fc_port"]] = fbtt["fc_target"]

        proc = subprocess.Popen([
            "/usr/sbin/ctladm",
            "portlist",
            "-x",
        ],
                                stdout=subprocess.PIPE,
                                encoding="utf8")
        data = proc.communicate()[0]
        doc = etree.fromstring(data)
        results = []
        for e in doc.xpath("//frontend_type[text()='camtgt']"):
            tag_port = e.getparent()
            name = tag_port.xpath("./port_name")[0].text
            reg = re.search(r"\d+", name)
            if reg:
                port = reg.group(0)
            else:
                port = "0"
            vport = tag_port.xpath("./physical_port")[0].text
            if vport != "0":
                name += f"/{vport}"
            state = "NO_LINK"
            speed = None
            wwpn = None
            if vport == "0":
                mibname = port
            else:
                mibname = f"{port}.chan{vport}"
            mib = f"dev.isp.{mibname}.loopstate"
            loopstate = sysctl.filter(mib)
            if loopstate:
                loopstate = loopstate[0].value
                if loopstate > 0 and loopstate < 10:
                    state = "SCANNING"
                elif loopstate == 10:
                    state = "READY"
                if loopstate > 0:
                    speedres = sysctl.filter(f"dev.isp.{mibname}.speed")
                    if speedres:
                        speed = speedres[0].value
            mib = f"dev.isp.{mibname}.wwpn"
            _filter = sysctl.filter(mib)
            if _filter:
                wwpn = f"naa.{_filter[0].value:x}"
            if name in fcportmap:
                targetobj = fcportmap[name]
                if targetobj is not None:
                    mode = "TARGET"
                    target = fcportmap[name]["id"]
                else:
                    mode = "INITIATOR"
                    target = None
            else:
                mode = "DISABLED"
                target = None
            initiators = []
            for i in tag_port.xpath("./initiator"):
                initiators.append(i.text)

            if node:
                for e in doc.xpath("//frontend_type[text()='ha']"):
                    parent = e.getparent()
                    port_name = parent.xpath("./port_name")[0].text
                    if ":" in port_name:
                        port_name = port_name.split(":", 1)[1]
                    physical_port = parent.xpath("./physical_port")[0].text
                    if physical_port != "0":
                        port_name += f"/{physical_port}"
                    if port_name != name:
                        continue
                    for i in parent.xpath("./initiator"):
                        initiators.append(
                            f"{i.text} (TrueNAS Controller {'2' if node == 'A' else '1'})"
                        )

            results.append(
                dict(
                    id=name,
                    port=port,
                    vport=vport,
                    name=name,
                    wwpn=wwpn,
                    mode=mode,
                    target=target,
                    state=state,
                    speed=speed,
                    initiators=initiators,
                ))

        return filter_list(results,
                           filters=filters or [],
                           options=options or {})

    @accepts(
        Str("id"),
        Dict(
            "fcport_update",
            Str("mode",
                enum=["INITIATOR", "TARGET", "DISABLED"],
                required=True),
            Int("target", null=True, default=None),
        ),
    )
    def do_update(self, id, data):
        verrors = ValidationErrors()

        if data["mode"] == "TARGET":
            if data["target"] is None:
                verrors.add("fcport_update.target",
                            "This field is required when mode is TARGET")
            else:
                try:
                    self.middleware.call_sync("iscsi.target.query",
                                              [["id", "=", data["target"]]],
                                              {"get": True})
                except IndexError:
                    verrors.add("fcport_update.target",
                                "This target does not exist")

        if verrors:
            raise verrors

        self.middleware.call_sync("datastore.delete",
                                  "services.fibrechanneltotarget",
                                  [["fc_port", "=", id]])

        port = id.replace("isp", "").replace("/", ",")
        if "," in port:
            port_number, vport = port.split(",", 1)
            mibname = f"{port_number}.chan{vport}"
        else:
            mibname = port

        role = sysctl.filter(f"dev.isp.{mibname}.role")
        if role:
            role = role[0]
        tun_var = f"hint.isp.{mibname}.role"

        set_sysctl = {}
        reload_loader = False

        if data["mode"] == "INITATOR":
            if role:
                # From disabled to initiator, just set sysctl
                if role.value == 0:
                    role.value = 2
                # From target to initiator, reload ctld then set to 2
                elif role.value == 1:
                    set_sysctl[mibname] = 2

            try:
                tun = self.middleware.call_sync("tunable.query",
                                                [["var", "=", tun_var]],
                                                {"get": True})
            except IndexError:
                self.middleware.call_sync("tunable.insert", {
                    "var": tun_var,
                    "value": "2",
                    "type": "LOADER",
                })
                reload_loader = True
            else:
                if tun["value"] != "2":
                    self.middleware.call_sync("tunable.update", tun["id"], {
                        "value": "2",
                    })
                    reload_loader = True

        if data["mode"] == "DISABLED":
            if role:
                # From initiator to disabled, just set sysctl
                if role.value == 2:
                    role.value = 0

            try:
                tun = self.middleware.call_sync("tunable.query",
                                                [["var", "=", tun_var]],
                                                {"get": True})
            except IndexError:
                pass
            else:
                self.middleware.call_sync("tunable.delete", tun["id"])
                reload_loader = True

        if data["mode"] == "TARGET":
            if role:
                # From initiator to target, first set sysctl
                if role.value == 2:
                    role.value = 0

            try:
                tun = self.middleware.call_sync("tunable.query",
                                                [["var", "=", tun_var]],
                                                {"get": True})
            except IndexError:
                pass
            else:
                self.middleware.call_sync("tunable.delete", tun["id"])
                reload_loader = True

        if data["mode"] != "DISABLED":
            self.middleware.call_sync("datastore.insert",
                                      "services.fibrechanneltotarget", {
                                          "fc_port": id,
                                          "fc_target": data["target"],
                                      })

        self.middleware.call_sync("service.reload", "iscsitarget")

        for mibname, val in set_sysctl.items():
            role = sysctl.filter(f"dev.isp.{mibname}.role")
            if role:
                role = role[0]
                role.value = val

        if reload_loader:
            self.middleware.call_sync("service.reload", "loader")

        return self.middleware.run_coroutine(self._get_instance(id))
Exemplo n.º 18
0
class ClusterCacheService(Service):
    tdb_options = {
        "cluster": True,
        "data_type": "STRING"
    }

    class Config:
        private = True

    @accepts(Str('key'))
    async def get(self, key):
        """
        Get `key` from cache.

        Raises:
            KeyError: not found in the cache
            CallError: issue with clustered key-value store

        CLOCK_REALTIME because clustered
        """
        payload = {
            "name": 'middlewared',
            "key": key,
            "tdb-options": self.tdb_options
        }
        try:
            tdb_value = await self.middleware.call('tdb.fetch', payload)
        except MatchNotFound:
            raise KeyError(key)

        expires = float(tdb_value[:12])
        now = time.clock_gettime(time.CLOCK_REALTIME)
        if expires and now > expires:
            await self.middleware.call('tdb.remove', payload)
            raise KeyError(f'{key} has expired')

        is_encrypted = bool(int(tdb_value[14]))
        if is_encrypted:
            raise NotImplementedError

        data = json.loads(tdb_value[18:])
        return data

    @accepts(Str('key'))
    async def pop(self, key):
        """
        Removes and returns `key` from cache.
        """
        payload = {
            "name": 'middlewared',
            "key": key,
            "tdb-options": self.tdb_options
        }
        try:
            tdb_value = await self.middleware.call('tdb.fetch', payload)
        except MatchNotFound:
            tdb_value = None

        if tdb_value:
            await self.middleware.call('tdb.remove', payload)
            # Will uncomment / add handling for private entries
            # once there's a cluster-wide method for encrypting data
            # is_encrypted = bool(int(tdb_value[14]))
            tdb_value = json.loads(tdb_value[18:])

        return tdb_value

    @accepts(Str('key'))
    async def has_key(self, key):
        try:
            await self.middleware.call('tdb.fetch', {
                "name": 'middlewared',
                "key": key,
                "tdb-options": self.tdb_options
            })
            return True
        except MatchNotFound:
            return False

    @accepts(
        Str('key'),
        Dict('value', additional_attrs=True),
        Int('timeout', default=0),
        Dict('options', Str('flag', enum=["CREATE", "REPLACE"], default=None, null=True), Bool('private', default=False),)
    )
    async def put(self, key, value, timeout, options):
        """
        Put `key` of `value` in the cache. `timeout` specifies time limit
        after which it will be removed.

        The following options are supported:
        `flag` optionally specifies insertion behavior.
        `CREATE` flag raises KeyError if entry exists. `UPDATE` flag
        raises KeyError if entry does not exist. When no flags are specified
        then entry is simply inserted.

        `private` determines whether data should be encrypted before being
        committed to underlying storage backend.
        """
        if options['private']:
            # will implement in later commit
            raise NotImplementedError

        if timeout != 0:
            ts = f'{time.clock_gettime(time.CLOCK_REALTIME) + timeout:.2f}'
        else:
            ts = '0000000000.00'

        tdb_key = key

        # This format must not be changed without careful consideration
        # Zeros are left as padding in middle to expand boolean options if needed
        tdb_val = f'{ts}{int(options["private"])}0000{json.dumps(value)}'

        if options['flag']:
            has_entry = False
            try:
                has_entry = bool(await self.get(tdb_key))
            except KeyError:
                pass

            if options['flag'] == "CREATE" and has_entry:
                raise KeyError(key)

            if options['flag'] == "UPDATE" and not has_entry:
                raise KeyError(key)

        await self.middleware.call('tdb.store', {
            'name': 'middlewared',
            'key': tdb_key,
            'value': {'payload': tdb_val},
            'tdb-options': self.tdb_options
        })
        return

    @filterable
    async def query(self, filters, options):
        def cache_convert_fn(tdb_key, tdb_val, entries):
            entries.append({
                "key": tdb_key,
                "timeout": float(tdb_val[:12]),
                "private": bool(int(tdb_val[14])),
                "value": json.loads(tdb_val[18:])
            })
            return True

        if not filters:
            filters = []
        if not options:
            options = {}

        parsed = []
        tdb_entries = await self.middleware.call('tdb.entries', {
            'name': 'middlewared',
            'tdb-options': self.tdb_options
        })
        for entry in tdb_entries:
            cache_convert_fn(entry['key'], entry['val'], parsed)

        return filter_list(parsed, filters, options)
Exemplo n.º 19
0
class ZFSSnapshot(CRUDService):
    class Config:
        datastore_primary_key_type = 'string'
        namespace = 'zfs.snapshot'
        process_pool = True
        cli_namespace = 'storage.snapshot'

    @filterable
    def query(self, filters, options):
        """
        Query all ZFS Snapshots with `query-filters` and `query-options`.
        """
        # Special case for faster listing of snapshot names (#53149)
        if (options and options.get('select') == ['name']
                and (not filters
                     or filter_getattrs(filters).issubset({'name', 'pool'}))):
            with libzfs.ZFS() as zfs:
                snaps = zfs.snapshots_serialized(['name'])

            if filters or len(options) > 1:
                return filter_list(snaps, filters, options)
            return snaps

        with libzfs.ZFS() as zfs:
            # Handle `id` filter to avoid getting all snapshots first
            kwargs = dict(holds=False, mounted=False)
            if filters and len(filters) == 1 and list(
                    filters[0][:2]) == ['id', '=']:
                kwargs['datasets'] = [filters[0][2]]

            snapshots = zfs.snapshots_serialized(**kwargs)

        # FIXME: awful performance with hundreds/thousands of snapshots
        return filter_list(snapshots, filters, options)

    @accepts(
        Dict(
            'snapshot_create',
            Str('dataset', required=True, empty=False),
            Str('name', empty=False),
            Str('naming_schema',
                empty=False,
                validators=[ReplicationSnapshotNamingSchema()]),
            Bool('recursive', default=False),
            Bool('vmware_sync', default=False),
            Dict('properties', additional_attrs=True),
        ))
    def do_create(self, data):
        """
        Take a snapshot from a given dataset.
        """

        dataset = data['dataset']
        recursive = data['recursive']
        properties = data['properties']

        verrors = ValidationErrors()

        if 'name' in data and 'naming_schema' in data:
            verrors.add(
                'snapshot_create.naming_schema',
                'You can\'t specify name and naming schema at the same time')
        elif 'name' in data:
            name = data['name']
        elif 'naming_schema' in data:
            # We can't do `strftime` here because we are in the process pool and `TZ` environment variable update
            # is not propagated here.
            name = self.middleware.call_sync('replication.new_snapshot_name',
                                             data['naming_schema'])
        else:
            verrors.add('snapshot_create.naming_schema',
                        'You must specify either name or naming schema')

        if verrors:
            raise verrors

        vmware_context = None
        if data['vmware_sync']:
            vmware_context = self.middleware.call_sync('vmware.snapshot_begin',
                                                       dataset, recursive)

        try:
            with libzfs.ZFS() as zfs:
                ds = zfs.get_dataset(dataset)
                ds.snapshot(f'{dataset}@{name}',
                            recursive=recursive,
                            fsopts=properties)

                if vmware_context and vmware_context['vmsynced']:
                    ds.properties['freenas:vmsynced'] = libzfs.ZFSUserProperty(
                        'Y')

            self.logger.info(f"Snapshot taken: {dataset}@{name}")
        except libzfs.ZFSException as err:
            self.logger.error(f'Failed to snapshot {dataset}@{name}: {err}')
            raise CallError(f'Failed to snapshot {dataset}@{name}: {err}')
        else:
            return self.middleware.call_sync('zfs.snapshot.get_instance',
                                             f'{dataset}@{name}')
        finally:
            if vmware_context:
                self.middleware.call_sync('vmware.snapshot_end',
                                          vmware_context)

    @accepts(
        Dict('snapshot_remove', Str('dataset', required=True),
             Str('name', required=True), Bool('defer_delete')))
    def remove(self, data):
        """
        Remove a snapshot from a given dataset.

        Returns:
            bool: True if succeed otherwise False.
        """
        self.logger.debug(
            'zfs.snapshot.remove is deprecated, use zfs.snapshot.delete')
        snapshot_name = data['dataset'] + '@' + data['name']
        try:
            self.do_delete(snapshot_name,
                           {'defer': data.get('defer_delete') or False})
        except Exception:
            return False
        return True

    @accepts(
        Str('id'),
        Dict(
            'options',
            Bool('defer', default=False),
            Bool('recursive', default=False),
        ),
    )
    def do_delete(self, id, options):
        """
        Delete snapshot of name `id`.

        `options.defer` will defer the deletion of snapshot.
        """
        try:
            with libzfs.ZFS() as zfs:
                snap = zfs.get_snapshot(id)
                snap.delete(defer=options['defer'],
                            recursive=options['recursive'])
        except libzfs.ZFSException as e:
            raise CallError(str(e))
        else:
            return True

    @accepts(Dict(
        'snapshot_clone',
        Str('snapshot'),
        Str('dataset_dst'),
    ))
    def clone(self, data):
        """
        Clone a given snapshot to a new dataset.

        Returns:
            bool: True if succeed otherwise False.
        """

        snapshot = data.get('snapshot', '')
        dataset_dst = data.get('dataset_dst', '')

        if not snapshot or not dataset_dst:
            return False

        try:
            with libzfs.ZFS() as zfs:
                snp = zfs.get_snapshot(snapshot)
                snp.clone(dataset_dst)
                dataset = zfs.get_dataset(dataset_dst)
                if dataset.type.name == 'FILESYSTEM':
                    dataset.mount_recursive()
            self.logger.info("Cloned snapshot {0} to dataset {1}".format(
                snapshot, dataset_dst))
            return True
        except libzfs.ZFSException as err:
            self.logger.error("{0}".format(err))
            raise CallError(f'Failed to clone snapshot: {err}')

    @accepts(
        Str('id'),
        Dict(
            'options',
            Bool('recursive', default=False),
            Bool('recursive_clones', default=False),
            Bool('force', default=False),
            Bool('recursive_rollback', default=False),
        ),
    )
    def rollback(self, id, options):
        """
        Rollback to a given snapshot `id`.

        `options.recursive` will destroy any snapshots and bookmarks more recent than the one
        specified.

        `options.recursive_clones` is just like `recursive` but will also destroy any clones.

        `options.force` will force unmount of any clones.

        `options.recursive_rollback` will do a complete recursive rollback of each child snapshots for `id`. If
        any child does not have specified snapshot, this operation will fail.
        """
        args = []
        if options['force']:
            args += ['-f']
        if options['recursive']:
            args += ['-r']
        if options['recursive_clones']:
            args += ['-R']

        if options['recursive_rollback']:
            dataset, snap_name = id.rsplit('@', 1)
            datasets = set({
                f'{ds["id"]}@{snap_name}'
                for ds in self.middleware.call_sync('zfs.dataset.query', [[
                    'OR', [['id', '^', f'{dataset}/'], ['id', '=', dataset]]
                ]])
            })

            for snap in filter(
                    lambda sn: self.middleware.call_sync(
                        'zfs.snapshot.query', [['id', '=', sn]]), datasets):
                self.rollback_impl(args, snap)

        else:
            self.rollback_impl(args, id)

    @private
    def rollback_impl(self, args, id):
        try:
            subprocess.run(
                ['zfs', 'rollback'] + args + [id],
                text=True,
                capture_output=True,
                check=True,
            )
        except subprocess.CalledProcessError as e:
            raise CallError(f'Failed to rollback snapshot: {e.stderr.strip()}')
Exemplo n.º 20
0
class CacheService(Service):

    class Config:
        private = True

    def __init__(self, *args, **kwargs):
        super(CacheService, self).__init__(*args, **kwargs)
        self.__cache = {}
        self.kv_tuple = namedtuple('Cache', ['value', 'timeout'])

    @accepts(Str('key'))
    def has_key(self, key):
        """
        Check if given `key` is in cache.
        """
        return key in self.__cache

    @accepts(Str('key'))
    def get(self, key):
        """
        Get `key` from cache.

        Raises:
            KeyError: not found in the cache
        """

        if self.__cache[key].timeout > 0:
            self.get_timeout(key)

        return self.__cache[key].value

    @accepts(Str('key'), Any('value'), Int('timeout', default=0))
    def put(self, key, value, timeout):
        """
        Put `key` of `value` in the cache.
        """

        if timeout != 0:
            timeout = time.monotonic() + timeout

        v = self.kv_tuple(value=value, timeout=timeout)
        self.__cache[key] = v

    @accepts(Str('key'))
    def pop(self, key):
        """
        Removes and returns `key` from cache.
        """
        cache = self.__cache.pop(key, None)

        if cache is not None:
            cache = cache.value

        return cache

    @private
    def get_timeout(self, key):
        """
        Check if 'key' has expired
        """
        now = time.monotonic()
        value, timeout = self.__cache[key]

        if now >= timeout:
            # Bust the cache
            del self.__cache[key]

            raise KeyError(f'{key} has expired')

    @private
    def get_or_put(self, key, timeout, method):
        try:
            return self.get(key)
        except KeyError:
            value = method()
            self.put(key, value, timeout)
            return value
Exemplo n.º 21
0
class ZFSPoolService(CRUDService):
    class Config:
        namespace = 'zfs.pool'
        private = True
        process_pool = True

    @filterable
    def query(self, filters, options):
        # We should not get datasets, there is zfs.dataset.query for that
        state_kwargs = {'datasets_recursive': False}
        with libzfs.ZFS() as zfs:
            # Handle `id` filter specially to avoiding getting all pool
            if filters and len(filters) == 1 and list(
                    filters[0][:2]) == ['id', '=']:
                try:
                    pools = [
                        zfs.get(filters[0][2]).__getstate__(**state_kwargs)
                    ]
                except libzfs.ZFSException:
                    pools = []
            else:
                pools = [i.__getstate__(**state_kwargs) for i in zfs.pools]
        return filter_list(pools, filters, options)

    @accepts(
        Dict(
            'zfspool_create',
            Str('name', required=True),
            List('vdevs',
                 items=[
                     Dict(
                         'vdev',
                         Str('root',
                             enum=[
                                 'DATA', 'CACHE', 'LOG', 'SPARE', 'SPECIAL',
                                 'DEDUP'
                             ],
                             required=True),
                         Str('type',
                             enum=[
                                 'RAIDZ1', 'RAIDZ2', 'RAIDZ3', 'MIRROR',
                                 'STRIPE'
                             ],
                             required=True),
                         List('devices', items=[Str('disk')], required=True),
                     ),
                 ],
                 required=True),
            Dict('options', additional_attrs=True),
            Dict('fsoptions', additional_attrs=True),
        ), )
    def do_create(self, data):
        with libzfs.ZFS() as zfs:
            topology = convert_topology(zfs, data['vdevs'])
            zfs.create(data['name'], topology, data['options'],
                       data['fsoptions'])

        return self.middleware.call_sync('zfs.pool.get_instance', data['name'])

    @accepts(Str('pool'),
             Dict(
                 'options',
                 Dict('properties', additional_attrs=True),
             ))
    def do_update(self, name, options):
        try:
            with libzfs.ZFS() as zfs:
                pool = zfs.get(name)
                for k, v in options['properties'].items():
                    prop = pool.properties[k]
                    if 'value' in v:
                        prop.value = v['value']
                    elif 'parsed' in v:
                        prop.parsed = v['parsed']
        except libzfs.ZFSException as e:
            raise CallError(str(e))

    @accepts(Str('pool'), Dict(
        'options',
        Bool('force', default=False),
    ))
    def do_delete(self, name, options):
        try:
            with libzfs.ZFS() as zfs:
                zfs.destroy(name, force=options['force'])
        except libzfs.ZFSException as e:
            errno_ = errno.EFAULT
            if e.code == libzfs.Error.UMOUNTFAILED:
                errno_ = errno.EBUSY
            raise CallError(str(e), errno_)

    @accepts(Str('pool', required=True))
    def upgrade(self, pool):
        try:
            with libzfs.ZFS() as zfs:
                zfs.get(pool).upgrade()
        except libzfs.ZFSException as e:
            raise CallError(str(e))

    @accepts(Str('pool'), Dict(
        'options',
        Bool('force', default=False),
    ))
    def export(self, name, options):
        try:
            with libzfs.ZFS() as zfs:
                # FIXME: force not yet implemented
                pool = zfs.get(name)
                zfs.export_pool(pool)
        except libzfs.ZFSException as e:
            raise CallError(str(e))

    @accepts(Str('pool'))
    def get_devices(self, name):
        try:
            with libzfs.ZFS() as zfs:
                return [i.replace('/dev/', '') for i in zfs.get(name).disks]
        except libzfs.ZFSException as e:
            raise CallError(str(e), errno.ENOENT)

    @accepts(
        Str('name'),
        List('new', default=None, null=True),
        List('existing',
             items=[
                 Dict(
                     'attachvdev',
                     Str('target'),
                     Str('type', enum=['DISK']),
                     Str('path'),
                 ),
             ],
             null=True,
             default=None),
    )
    @job()
    def extend(self, job, name, new, existing):
        """
        Extend a zfs pool `name` with `new` vdevs or attach to `existing` vdevs.
        """

        if new is None and existing is None:
            raise CallError('New or existing vdevs must be provided',
                            errno.EINVAL)

        try:
            with libzfs.ZFS() as zfs:
                pool = zfs.get(name)

                if new:
                    topology = convert_topology(zfs, new)
                    pool.attach_vdevs(topology)

                # Make sure we can find all target vdev
                for i in (existing or []):
                    target = find_vdev(pool, i['target'])
                    if target is None:
                        raise CallError(
                            f"Failed to find vdev for {i['target']}",
                            errno.EINVAL)
                    i['target'] = target

                for i in (existing or []):
                    newvdev = libzfs.ZFSVdev(zfs, i['type'].lower())
                    newvdev.path = i['path']
                    i['target'].attach(newvdev)

        except libzfs.ZFSException as e:
            raise CallError(str(e), e.code)

    def __zfs_vdev_operation(self, name, label, op, *args):
        try:
            with libzfs.ZFS() as zfs:
                pool = zfs.get(name)
                target = find_vdev(pool, label)
                if target is None:
                    raise CallError(f'Failed to find vdev for {label}',
                                    errno.EINVAL)
                op(target, *args)
        except libzfs.ZFSException as e:
            raise CallError(str(e), e.code)

    @accepts(Str('pool'), Str('label'),
             Dict('options', Bool('clear_label', default=False)))
    def detach(self, name, label, options):
        """
        Detach device `label` from the pool `pool`.
        """
        self.detach_remove_impl('detach', name, label, options)

    def detach_remove_impl(self, op, name, label, options):
        def impl(target):
            getattr(target, op)()
            if options['clear_label']:
                self.clear_label(target.path)

        self.__zfs_vdev_operation(name, label, impl)

    @accepts(Str('device'))
    def clear_label(self, device):
        """
        Clear label from `device`.
        """
        try:
            libzfs.clear_label(device)
        except (libzfs.ZFSException, OSError) as e:
            raise CallError(str(e))

    @accepts(Str('pool'), Str('label'))
    def offline(self, name, label):
        """
        Offline device `label` from the pool `pool`.
        """
        self.__zfs_vdev_operation(name, label, lambda target: target.offline())

    @accepts(Str('pool'), Str('label'), Bool('expand', default=False))
    def online(self, name, label, expand):
        """
        Online device `label` from the pool `pool`.
        """
        self.__zfs_vdev_operation(name, label,
                                  lambda target, *args: target.online(*args),
                                  expand)

    @accepts(Str('pool'), Str('label'),
             Dict('options', Bool('clear_label', default=False)))
    def remove(self, name, label, options):
        """
        Remove device `label` from the pool `pool`.
        """
        self.detach_remove_impl('remove', name, label, options)

    @accepts(Str('pool'), Str('label'), Str('dev'))
    def replace(self, name, label, dev):
        """
        Replace device `label` with `dev` in pool `name`.
        """
        try:
            with libzfs.ZFS() as zfs:
                pool = zfs.get(name)
                target = find_vdev(pool, label)
                if target is None:
                    raise CallError(f'Failed to find vdev for {label}',
                                    errno.EINVAL)

                newvdev = libzfs.ZFSVdev(zfs, 'disk')
                newvdev.path = f'/dev/{dev}'
                # FIXME: Replace using old path is not working for some reason
                # Lets use guid for now.
                target.path = str(target.guid)
                target.replace(newvdev)
        except libzfs.ZFSException as e:
            raise CallError(str(e), e.code)

    @accepts(Str('name', required=True),
             Str('action', enum=['START', 'STOP', 'PAUSE'], default='START'))
    @job(lock=lambda i: f'{i[0]}-{i[1] if len(i) >= 2 else "START"}')
    def scrub(self, job, name, action):
        """
        Start/Stop/Pause a scrub on pool `name`.
        """
        if action != 'PAUSE':
            try:
                with libzfs.ZFS() as zfs:
                    pool = zfs.get(name)

                    if action == 'START':
                        pool.start_scrub()
                    else:
                        pool.stop_scrub()
            except libzfs.ZFSException as e:
                raise CallError(str(e), e.code)
        else:
            proc = subprocess.Popen(f'zpool scrub -p {name}'.split(' '),
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.PIPE)
            proc.communicate()

            if proc.returncode != 0:
                raise CallError('Unable to pause scrubbing')

        def watch():
            while True:
                with libzfs.ZFS() as zfs:
                    scrub = zfs.get(name).scrub.__getstate__()

                if scrub['pause']:
                    job.set_progress(100, 'Scrub paused')
                    break

                if scrub['function'] != 'SCRUB':
                    break

                if scrub['state'] == 'FINISHED':
                    job.set_progress(100, 'Scrub finished')
                    break

                if scrub['state'] == 'CANCELED':
                    break

                if scrub['state'] == 'SCANNING':
                    job.set_progress(scrub['percentage'], 'Scrubbing')
                time.sleep(1)

        if action == 'START':
            t = threading.Thread(target=watch, daemon=True)
            t.start()
            t.join()

    @accepts()
    def find_import(self):
        with libzfs.ZFS() as zfs:
            return [i.__getstate__() for i in zfs.find_import()]

    @accepts(
        Str('name_or_guid'),
        Dict('options', additional_attrs=True),
        Bool('any_host', default=True),
        Str('cachefile', null=True, default=None),
        Str('new_name', null=True, default=None),
    )
    def import_pool(self, name_or_guid, options, any_host, cachefile,
                    new_name):
        found = False
        with libzfs.ZFS() as zfs:
            for pool in zfs.find_import(cachefile=cachefile,
                                        search_paths=['/dev/disk/by-partuuid']
                                        if osc.IS_LINUX else None):
                if pool.name == name_or_guid or str(pool.guid) == name_or_guid:
                    found = pool
                    break

            if not found:
                raise CallError(f'Pool {name_or_guid} not found.',
                                errno.ENOENT)

            try:
                zfs.import_pool(found,
                                new_name or found.name,
                                options,
                                any_host=any_host)
            except libzfs.ZFSException as e:
                # We only log if some datasets failed to mount after pool import
                if e.code != libzfs.Error.MOUNTFAILED:
                    raise
                else:
                    self.logger.error(
                        'Failed to mount datasets after importing "%s" pool: %s',
                        name_or_guid,
                        str(e),
                        exc_info=True)

    @accepts(Str('pool'))
    def find_not_online(self, pool):
        pool = self.middleware.call_sync('zfs.pool.query', [['id', '=', pool]],
                                         {'get': True})

        unavails = []
        for nodes in pool['groups'].values():
            for node in nodes:
                unavails.extend(self.__find_not_online(node))
        return unavails

    def __find_not_online(self, node):
        if len(node['children']) == 0 and node['status'] not in ('ONLINE',
                                                                 'AVAIL'):
            return [node]

        unavails = []
        for child in node['children']:
            unavails.extend(self.__find_not_online(child))
        return unavails

    def get_vdev(self, name, vname):
        try:
            with libzfs.ZFS() as zfs:
                pool = zfs.get(name)
                vdev = find_vdev(pool, vname)
                if not vdev:
                    raise CallError(f'{vname} not found in {name}',
                                    errno.ENOENT)
                return vdev.__getstate__()
        except libzfs.ZFSException as e:
            raise CallError(str(e))
Exemplo n.º 22
0
class DSCache(Service):

    class Config:
        private = True

    @accepts(
        Str('directory_service', required=True, enum=["ACTIVEDIRECTORY", "LDAP"]),
        Str('idtype', enum=['USER', 'GROUP'], required=True),
        Dict('cache_entry', additional_attrs=True),
    )
    async def insert(self, ds, idtype, entry):
        if idtype == "GROUP":
            id_key = "gid"
            name_key = "name"
        else:
            id_key = "uid"
            name_key = "username"

        ops = [
            {"action": "SET", "key": f'ID_{entry[id_key]}', "val": entry},
            {"action": "SET", "key": f'NAME_{entry[name_key]}', "val": entry}
        ]
        await self.middleware.call('tdb.batch_ops', {
            "name": f'{ds.lower()}_{idtype.lower()}',
            "ops": ops
        })
        return True

    @accepts(
        Str('directory_service', required=True, enum=["ACTIVEDIRECTORY", "LDAP"]),
        Dict(
            'principal_info',
            Str('idtype', enum=['USER', 'GROUP']),
            Str('who'),
            Int('id'),
        ),
        Dict('options', Bool('synthesize', default=False))
    )
    async def retrieve(self, ds, data, options):
        who_str = data.get('who')
        who_id = data.get('id')
        if who_str is None and who_id is None:
            raise CallError("`who` or `id` entry is required to uniquely "
                            "identify the entry to be retrieved.")

        tdb_name = f'{ds.lower()}_{data["idtype"].lower()}'
        prefix = "NAME" if who_str else "ID"
        tdb_key = f'{prefix}_{who_str if who_str else who_id}'

        try:
            entry = await self.middleware.call("tdb.fetch", {"name": tdb_name, "key": tdb_key})
        except MatchNotFound:
            entry = None

        if not entry and options['synthesize']:
            """
            if cache lacks entry, create one from passwd / grp info,
            insert into cache and return synthesized value.
            get_uncached_* will raise KeyError if NSS lookup fails.
            """
            try:
                if data['idtype'] == 'USER':
                    pwdobj = await self.middleware.call('dscache.get_uncached_user',
                                                        who_str, who_id)
                    entry = await self.middleware.call('idmap.synthetic_user',
                                                       ds.lower(), pwdobj)
                else:
                    grpobj = await self.middleware.call('dscache.get_uncached_group',
                                                        who_str, who_id)
                    entry = await self.middleware.call('idmap.synthetic_group',
                                                       ds.lower(), grpobj)
                await self.insert(ds, data['idtype'], entry)
            except KeyError:
                entry = None

        elif not entry:
            raise KeyError(who_str if who_str else who_id)

        return entry

    @accepts(
        Str('ds', required=True, enum=["ACTIVEDIRECTORY", "LDAP"]),
        Str('idtype', required=True, enum=["USER", "GROUP"]),
    )
    async def entries(self, ds, idtype):
        entries = await self.middleware.call('tdb.entries', {
            'name': f'{ds.lower()}_{idtype.lower()}',
            'query-filters': [('key', '^', 'ID')]
        })
        return [x['val'] for x in entries]

    def get_uncached_user(self, username=None, uid=None, getgroups=False):
        """
        Returns dictionary containing pwd_struct data for
        the specified user or uid. Will raise an exception
        if the user does not exist. This method is appropriate
        for user validation.
        """
        if username:
            u = pwd.getpwnam(username)
        elif uid is not None:
            u = pwd.getpwuid(uid)
        else:
            return {}

        user_obj = {
            'pw_name': u.pw_name,
            'pw_uid': u.pw_uid,
            'pw_gid': u.pw_gid,
            'pw_gecos': u.pw_gecos,
            'pw_dir': u.pw_dir,
            'pw_shell': u.pw_shell,
        }
        if getgroups:
            user_obj['grouplist'] = os.getgrouplist(u.pw_name, u.pw_gid)

        return user_obj

    def get_uncached_group(self, groupname=None, gid=None):
        """
        Returns dictionary containing grp_struct data for
        the specified group or gid. Will raise an exception
        if the group does not exist. This method is appropriate
        for group validation.
        """
        if groupname:
            g = grp.getgrnam(groupname)
        elif gid is not None:
            g = grp.getgrgid(gid)
        else:
            return {}
        return {
            'gr_name': g.gr_name,
            'gr_gid': g.gr_gid,
            'gr_mem': g.gr_mem
        }

    @accepts(
        Str('objtype', enum=['USERS', 'GROUPS'], default='USERS'),
        Ref('query-filters'),
        Ref('query-options'),
    )
    async def query(self, objtype, filters, options):
        """
        Query User / Group cache with `query-filters` and `query-options`.

        `objtype`: 'USERS' or 'GROUPS'
        """
        res = []
        ds_state = await self.middleware.call('directoryservices.get_state')
        enabled_ds = None
        extra = options.get("extra", {})

        is_name_check = bool(filters and len(filters) == 1 and filters[0][0] in ['username', 'name'])
        is_id_check = bool(filters and len(filters) == 1 and filters[0][0] in ['uid', 'gid'])

        res.extend((await self.middleware.call(f'{objtype.lower()[:-1]}.query', filters, options)))

        for dstype, state in ds_state.items():
            if state != 'DISABLED':
                enabled_ds = dstype
                break

        if not enabled_ds:
            return res

        if is_name_check and filters[0][1] == '=':
            # exists in local sqlite database, return results
            if res:
                return res

            entry = await self.retrieve(enabled_ds.upper(), {
                'idtype': objtype[:-1],
                'who': filters[0][2],
            }, {'synthesize': True})
            return [entry] if entry else []

        if is_id_check and filters[0][1] == '=':
            # exists in local sqlite database, return results
            if res:
                return res

            entry = await self.retrieve(enabled_ds.upper(), {
                'idtype': objtype[:-1],
                'id': filters[0][2],
            }, {'synthesize': True})
            return [entry] if entry else []

        entries = await self.entries(enabled_ds.upper(), objtype[:-1])
        if 'SMB' in extra.get('additional_information', []):
            for entry in entries:
                sid = await self.middleware.call('idmap.unixid_to_sid', {
                    'id_type': objtype[:-1],
                    'id': entry[f'{objtype[0].lower()}id'],
                })
                name_key = "username" if objtype == 'USERS' else 'group'
                entry.update({
                    'nt_name': entry[name_key],
                    'sid': sid,
                })

        entries_by_id = sorted(entries, key=lambda i: i['id'])
        res.extend(filter_list(entries_by_id, filters, options))
        return res

    @job(lock="dscache_refresh")
    async def refresh(self, job):
        """
        This is called from a cronjob every 24 hours and when a user clicks on the
        UI button to 'rebuild directory service cache'.
        """
        for ds in ['activedirectory', 'ldap']:
            await self.middleware.call('tdb.wipe', {'name': f'{ds}_user'})
            await self.middleware.call('tdb.wipe', {'name': f'{ds}_group'})

            ds_state = await self.middleware.call(f'{ds}.get_state')

            if ds_state == 'HEALTHY':
                await job.wrap(await self.middleware.call(f'{ds}.fill_cache', True))
            elif ds_state != 'DISABLED':
                self.logger.debug('Unable to refresh [%s] cache, state is: %s' % (ds, ds_state))
Exemplo n.º 23
0
class SharingSMBService(CRUDService):
    class Config:
        namespace = 'sharing.smb'
        datastore = 'sharing.cifs_share'
        datastore_prefix = 'cifs_'
        datastore_extend = 'sharing.smb.extend'

    @accepts(
        Dict('sharingsmb_create',
             Str('purpose',
                 enum=[x.name for x in SMBSharePreset],
                 default=SMBSharePreset.DEFAULT_SHARE.name),
             Str('path', required=True),
             Str('path_suffix', default=''),
             Bool('home', default=False),
             Str('name', max_length=80),
             Str('comment', default=''),
             Bool('ro', default=False),
             Bool('browsable', default=True),
             Bool('timemachine', default=False),
             Bool('recyclebin', default=False),
             Bool('guestok', default=False),
             Bool('abe', default=False),
             List('hostsallow', default=[]),
             List('hostsdeny', default=[]),
             Bool('aapl_name_mangling', default=False),
             Bool('acl', default=True),
             Bool('durablehandle', default=True),
             Bool('shadowcopy', default=True),
             Bool('streams', default=True),
             Bool('fsrvp', default=False),
             Str('auxsmbconf', max_length=None, default=''),
             Bool('enabled', default=True),
             register=True))
    async def do_create(self, data):
        """
        Create a SMB Share.

        `purpose` applies common configuration presets depending on intended purpose.

        `timemachine` when set, enables Time Machine backups for this share.

        `ro` when enabled, prohibits write access to the share.

        `guestok` when enabled, allows access to this share without a password.

        `hostsallow` is a list of hostnames / IP addresses which have access to this share.

        `hostsdeny` is a list of hostnames / IP addresses which are not allowed access to this share. If a handful
        of hostnames are to be only allowed access, `hostsdeny` can be passed "ALL" which means that it will deny
        access to ALL hostnames except for the ones which have been listed in `hostsallow`.

        `acl` enables support for storing the SMB Security Descriptor as a Filesystem ACL.

        `streams` enables support for storing alternate datastreams as filesystem extended attributes.

        `fsrvp` enables support for the filesystem remote VSS protocol. This allows clients to create
        ZFS snapshots through RPC.

        `shadowcopy` enables support for the volume shadow copy service.

        `auxsmbconf` is a string of additional smb4.conf parameters not covered by the system's API.
        """
        verrors = ValidationErrors()
        path = data['path']

        await self.clean(data, 'sharingsmb_create', verrors)
        await self.validate(data, 'sharingsmb_create', verrors)

        if verrors:
            raise verrors

        if path and not os.path.exists(path):
            try:
                os.makedirs(path)
            except OSError as e:
                raise CallError(f'Failed to create {path}: {e}')

        await self.apply_presets(data)
        await self.compress(data)
        vuid = await self.generate_vuid(data['timemachine'])
        data.update({'vuid': vuid})
        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})

        await self.middleware.call('sharing.smb.reg_addshare', data)
        await self.extend(data)  # We should do this in the insert call ?

        enable_aapl = await self.check_aapl(data)

        if enable_aapl:
            await self._service_change('cifs', 'restart')
        else:
            await self._service_change('cifs', 'reload')

        return data

    @accepts(Int('id'),
             Patch('sharingsmb_create', 'sharingsmb_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        """
        Update SMB Share of `id`.
        """
        verrors = ValidationErrors()
        path = data.get('path')

        old = await self.middleware.call(
            'datastore.query', self._config.datastore, [('id', '=', id)], {
                'extend': self._config.datastore_extend,
                'prefix': self._config.datastore_prefix,
                'get': True
            })

        new = old.copy()
        new.update(data)

        oldname = 'homes' if old['home'] else old['name']
        newname = 'homes' if new['home'] else new['name']

        new['vuid'] = await self.generate_vuid(new['timemachine'], new['vuid'])
        await self.clean(new, 'sharingsmb_update', verrors, id=id)
        await self.validate(new, 'sharingsmb_update', verrors, old=old)

        if verrors:
            raise verrors

        if path and not os.path.exists(path):
            try:
                os.makedirs(path)
            except OSError as e:
                raise CallError(f'Failed to create {path}: {e}')

        if old['purpose'] != new['purpose']:
            await self.apply_presets(new)

        await self.compress(new)
        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})

        enable_aapl = await self.check_aapl(new)
        if newname != oldname:
            # This is disruptive change. Share is actually being removed and replaced.
            # Forcibly closes any existing SMB sessions.
            await self.close_share(oldname)
            try:
                await self.middleware.call('sharing.smb.reg_delshare', oldname)
            except Exception:
                self.logger.warning('Failed to remove stale share [%s]',
                                    old['name'],
                                    exc_info=True)
            await self.middleware.call('sharing.smb.reg_addshare', new)
        else:
            diff = await self.middleware.call(
                'sharing.smb.diff_middleware_and_registry', new['name'], new)
            share_name = new['name'] if not new['home'] else 'homes'
            await self.middleware.call('sharing.smb.apply_conf_diff',
                                       'REGISTRY', share_name, diff)

        await self.extend(new)  # same here ?

        if enable_aapl:
            await self._service_change('cifs', 'restart')
        else:
            await self._service_change('cifs', 'reload')

        return new

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete SMB Share of `id`. This will forcibly disconnect SMB clients
        that are accessing the share.
        """
        share = await self._get_instance(id)
        result = await self.middleware.call('datastore.delete',
                                            self._config.datastore, id)
        await self.close_share(share['name'])
        try:
            await self.middleware.call(
                'smb.sharesec._delete',
                share['name'] if not share['home'] else 'homes')
        except Exception:
            self.logger.debug('Failed to delete share ACL for [%s].',
                              share['name'],
                              exc_info=True)

        try:
            await self.middleware.call(
                'sharing.smb.reg_delshare',
                share['name'] if not share['home'] else 'homes')
        except Exception:
            self.logger.warn('Failed to remove registry entry for [%s].',
                             share['name'],
                             exc_info=True)

        if share['timemachine']:
            await self.middleware.call('service.restart', 'mdns')

        return result

    @private
    async def check_aapl(self, data):
        """
        Returns whether we changed the global aapl support settings.
        """
        aapl_extensions = (
            await self.middleware.call('smb.config'))['aapl_extensions']

        if not aapl_extensions and data['timemachine']:
            await self.middleware.call('datastore.update', 'services_cifs', 1,
                                       {'cifs_srv_aapl_extensions': True})
            return True

        return False

    @private
    async def close_share(self, share_name):
        c = await run(
            [SMBCmd.SMBCONTROL.value, 'smbd', 'close-share', share_name],
            check=False)
        if c.returncode != 0:
            self.logger.warn('Failed to close smb share [%s]: [%s]',
                             share_name,
                             c.stderr.decode().strip())

    @private
    async def clean(self, data, schema_name, verrors, id=None):
        data['name'] = await self.name_exists(data, schema_name, verrors, id)

    @private
    async def validate(self, data, schema_name, verrors, old=None):
        home_result = await self.home_exists(data['home'], schema_name,
                                             verrors, old)

        if home_result:
            verrors.add(f'{schema_name}.home',
                        'Only one share is allowed to be a home share.')
        elif not home_result and not data['path']:
            verrors.add(f'{schema_name}.path', 'This field is required.')

        if data['path']:
            await check_path_resides_within_volume(verrors, self.middleware,
                                                   f"{schema_name}.path",
                                                   data['path'])

        if not data['acl'] and not await self.middleware.call(
                'filesystem.acl_is_trivial', data['path']):
            verrors.add(
                f'{schema_name}.acl',
                f'ACL detected on {data["path"]}. ACLs must be stripped prior to creation '
                'of SMB share.')

        if data.get('name') and data['name'].lower() in [
                'global', 'homes', 'printers'
        ]:
            verrors.add(
                f'{schema_name}.name',
                f'{data["name"]} is a reserved section name, please select another one'
            )
        if data.get('path_suffix') and len(data['path_suffix'].split('/')) > 2:
            verrors.add(
                f'{schema_name}.name',
                'Path suffix may not contain more than two components.')

    @private
    async def home_exists(self, home, schema_name, verrors, old=None):
        home_filters = [('home', '=', True)]
        home_result = None

        if home:
            if old and old['id'] is not None:
                id = old['id']

                if not old['home']:
                    home_filters.append(('id', '!=', id))
                    # The user already had this set as the home share
                    home_result = await self.middleware.call(
                        'datastore.query', self._config.datastore,
                        home_filters,
                        {'prefix': self._config.datastore_prefix})

        return home_result

    @private
    async def auxsmbconf_dict(self, aux, direction="TO"):
        ret = None
        if direction == 'TO':
            ret = {}
            for entry in aux.splitlines():
                try:
                    kv = param.split('=', 1)
                    ret[kv[0].strip()] = kv[1].strip()
                except Exception:
                    self.logger.debug(
                        "Share contains invalid auxiliary parameter: [%s]",
                        param)
            return ret

        if direction == 'FROM':
            return '\n'.join([f'{k}={v}' for k, v in aux.items()])

    @private
    async def name_exists(self, data, schema_name, verrors, id=None):
        name = data['name']
        path = data['path']

        if path and not name:
            name = path.rsplit('/', 1)[-1]

        name_filters = [('name', '=', name)]

        if id is not None:
            name_filters.append(('id', '!=', id))

        name_result = await self.middleware.call(
            'datastore.query', self._config.datastore, name_filters,
            {'prefix': self._config.datastore_prefix})

        if name_result:
            verrors.add(f'{schema_name}.name',
                        'A share with this name already exists.')

        return name

    @private
    async def extend(self, data):
        data['hostsallow'] = data['hostsallow'].split()
        data['hostsdeny'] = data['hostsdeny'].split()
        if data['fsrvp']:
            data['shadowcopy'] = True

        if 'share_acl' in data:
            data.pop('share_acl')

        return data

    @private
    async def compress(self, data):
        data['hostsallow'] = ' '.join(data['hostsallow'])
        data['hostsdeny'] = ' '.join(data['hostsdeny'])

        return data

    @private
    async def generate_vuid(self, timemachine, vuid=""):
        try:
            if timemachine and vuid:
                uuid.UUID(vuid, version=4)
        except ValueError:
            self.logger.debug(
                f"Time machine VUID string ({vuid}) is invalid. Regenerating.")
            vuid = ""

        if timemachine and not vuid:
            vuid = str(uuid.uuid4())

        return vuid

    @private
    async def apply_presets(self, data):
        """
        Apply settings from presets. Only include auxiliary parameters
        from preset if user-defined aux parameters already exist. In this
        case user-defined takes precedence.
        """
        params = (SMBSharePreset[data["purpose"]].value)["params"].copy()
        aux = params.pop("auxsmbconf")
        data.update(params)
        if data["auxsmbconf"]:
            preset_aux = await self.auxsmbconf_dict(aux, direction="TO")
            data_aux = await self.auxsmbconf_dict(data["auxsmbconf"],
                                                  direction="TO")
            preset_aux.update(data_aux)
            data["auxsmbconf"] = await self.auxsmbconf_dict(preset_aux,
                                                            direction="FROM")

        return data

    @accepts()
    async def presets(self):
        """
        Retrieve pre-defined configuration sets for specific use-cases. These parameter
        combinations are often non-obvious, but beneficial in these scenarios.
        """
        return {x.name: x.value for x in SMBSharePreset}
Exemplo n.º 24
0
class BootService(Service):

    @accepts(
        Str('dev'),
        Dict(
            'options',
            Int('size'),
            Int('swap_size'),
        )
    )
    @private
    async def format(self, dev, options):
        """
        Format a given disk `dev` using the appropriate partition layout
        """
        job = await self.middleware.call('disk.wipe', dev, 'QUICK')
        await job.wait()
        if job.error:
            raise CallError(job.error)

        disk_details = await self.middleware.call('device.get_disk', dev)
        if not disk_details:
            raise CallError(f'Details for {dev} not found.')

        swap_size = options.get('swap_size')
        commands = []
        partitions = []
        efi_boot = (await self.middleware.call('boot.get_boot_type')) == 'EFI'
        if osc.IS_FREEBSD:
            commands.append(('gpart', 'create', '-s', 'gpt', '-f', 'active', f'/dev/{dev}'))
            # 272629760 bytes ( 260 mb ) are required by FreeBSD
            # for EFI partition and 524288 bytes ( 512kb ) if it's bios
            partitions.append(
                ('efi' if efi_boot else 'freebsd-boot', 272629760 if efi_boot else 524288),
            )
            if options.get('swap_size'):
                partitions.append(('freebsd-swap', options['swap_size']))
            if options.get('size'):
                partitions.append(('freebsd-zfs', options['size']))
        else:
            partitions.extend([
                ('BIOS boot partition', 1048576),  # We allot 1MiB to bios boot partition
                ('EFI System', 536870912)   # We allot 512MiB for EFI partition
            ])
            if swap_size:
                partitions.append(('Linux swap', swap_size))
            if options.get('size'):
                partitions.append(('Solaris /usr & Mac ZFS', options['size']))

        # Around 80 sectors are reserved by Linux/FreeBSD for GPT tables and
        # our 4096 bytes alignment offset for the boot disk
        partitions.append((
            'GPT partition table', (73 if osc.IS_LINUX else 80) * disk_details['sectorsize']
        ))
        total_partition_size = sum(map(lambda y: y[1], partitions))
        if disk_details['size'] < total_partition_size:
            partitions = [
                '%s, %s blocks' % (p[0], '{:,}'.format(int(p[1] / disk_details['sectorsize']))) for p in partitions
            ]
            partitions.append(
                'total of %s blocks' % '{:,}'.format(int(total_partition_size / disk_details['sectorsize']))
            )
            raise CallError(
                f'The new device ({dev}, {disk_details["size"]/(1024**3)} GB, {disk_details["blocks"]} blocks) '
                f'does not have enough space to to hold the required new partitions ({", ".join(partitions)}). '
                'New mirrored devices might require more space than existing devices due to changes in the '
                'booting procedure.'
            )

        if osc.IS_LINUX:
            zfs_part_size = f'+{int(options["size"]/1024)}K' if options.get('size') else 0
            commands.extend((
                ['sgdisk', f'-a{int(4096/disk_details["sectorsize"])}', f'-n1:0:+1024K', '-t1:EF02', f'/dev/{dev}'],
                ['sgdisk', '-n2:0:+524288K', '-t2:EF00', f'/dev/{dev}'],
                ['sgdisk', f'-n3:0:{zfs_part_size}', f'-t3:BF01', f'/dev/{dev}'],
            ))
        else:
            if efi_boot:
                efi_size = 260
                commands.extend((
                    ['gpart', 'add', '-t', 'efi', '-i', '1', '-s', f'{efi_size}m', dev],
                    ['newfs_msdos', '-F', '16', f'/dev/{dev}p1'],
                ))
            else:
                commands.extend((
                    ['gpart', 'add', '-t', 'freebsd-boot', '-i', '1', '-s', '512k', dev],
                    ['gpart', 'set', '-a', 'active', dev],
                ))

        if swap_size:
            if osc.IS_LINUX:
                commands.insert(2, [
                    'sgdisk',
                    f'-n4:0:+{int(swap_size / 1024)}K',
                    '-t4:8200', f'/dev/{dev}'
                ])
            else:
                commands.append([
                    'gpart', 'add', '-t', 'freebsd-swap', '-i', '3',
                    '-s', str(options['swap_size']) + 'B', dev
                ])

        if osc.IS_FREEBSD:
            commands.append(
                ['gpart', 'add', '-t', 'freebsd-zfs', '-i', '2', '-a', '4k'] + (
                    ['-s', str(options['size']) + 'B'] if options.get('size') else []
                ) + [dev]
            )

        for command in commands:
            p = await run(*command, check=False)
            if p.returncode != 0:
                raise CallError(
                    '{} failed:\n{}{}'.format(' '.join(command), p.stdout.decode('utf-8'), p.stderr.decode('utf-8'))
                )

        if osc.IS_LINUX:
            await self.middleware.call('device.settle_udev_events')
Exemplo n.º 25
0
class BootEnvService(CRUDService):
    class Config:
        cli_namespace = 'system.bootenv'

    BE_TOOL = 'zectl' if osc.IS_LINUX else 'beadm'

    @filterable
    def query(self, filters, options):
        """
        Query all Boot Environments with `query-filters` and `query-options`.
        """
        results = []

        cp = subprocess.run([self.BE_TOOL, 'list', '-H'],
                            capture_output=True,
                            text=True)
        datasets_origins = [
            d['properties']['origin']['parsed']
            for d in self.middleware.call_sync('zfs.dataset.query')
        ]
        boot_pool = self.middleware.call_sync('boot.pool_name')
        for line in cp.stdout.strip().split('\n'):
            fields = line.split('\t')
            name = fields[0]
            if len(fields) > 5 and fields[5] != '-':
                name = fields[5]
            be = {
                'id':
                name,
                'realname':
                fields[0],
                'name':
                name,
                'active':
                fields[1],
                'activated':
                'n' in fields[1].lower(),
                'can_activate':
                False,
                'mountpoint':
                fields[2],
                'space':
                None if osc.IS_LINUX else fields[3],
                'created':
                datetime.strptime(fields[3 if osc.IS_LINUX else 4],
                                  '%Y-%m-%d %H:%M'),
                'keep':
                None,
                'rawspace':
                None
            }

            ds = self.middleware.call_sync('zfs.dataset.query', [
                ('id', '=', rf'{boot_pool}/ROOT/{fields[0]}'),
            ], {'extra': {
                'snapshots': True
            }})
            if ds:
                ds = ds[0]
                snapshot = None
                origin = ds['properties']['origin']['parsed']
                if '@' in origin:
                    snapshot = self.middleware.call_sync(
                        'zfs.snapshot.query', [('id', '=', origin)])
                    if snapshot:
                        snapshot = snapshot[0]
                if f'{self.BE_TOOL}:keep' in ds['properties']:
                    if ds['properties'][f'{self.BE_TOOL}:keep'][
                            'value'] == 'True':
                        be['keep'] = True
                    elif ds['properties'][f'{self.BE_TOOL}:keep'][
                            'value'] == 'False':
                        be['keep'] = False

                # When a BE is deleted, following actions happen
                # 1) It's descendants ( if any ) are promoted once
                # 2) BE is deleted
                # 3) Filesystems dependent on BE's origin are promoted
                # 4) Origin is deleted
                #
                # Now we would like to find out the space which will be freed when a BE is removed.
                # We classify a BE as of being 2 types,
                # 1) BE without descendants
                # 2) BE with descendants
                #
                # For (1), space freed is "usedbydataset" property and space freed by it's "origin".
                # For (2), space freed is "usedbydataset" property and space freed by it's "origin" but this cannot
                # actively determined because all the descendants are promoted once for this BE and at the end origin
                # of current BE would be determined by last descendant promoted. So we ignore this for now and rely
                # only on the space it is currently consuming as a best effort to predict.
                # There is also "usedbysnaps" property, for that we will retrieve all snapshots of the dataset,
                # find if any of them do not have a dataset cloned, that space will also be freed when we delete
                # this dataset. And we will also factor in the space consumed by children.

                be['rawspace'] = ds['properties']['usedbydataset'][
                    'parsed'] + ds['properties']['usedbychildren']['parsed']

                children = False
                for snap in ds['snapshots']:
                    if snap['name'] not in datasets_origins:
                        be['rawspace'] += snap['properties']['used']['parsed']
                    else:
                        children = True

                if snapshot and not children:
                    # This indicates the current BE is a leaf and it is safe to add the BE's origin
                    # space to the space freed when it is deleted.
                    be['rawspace'] += snapshot['properties']['used']['parsed']

                if be['rawspace'] < 1024:
                    be['space'] = f'{be["rawspace"]}B'
                elif 1024 <= be['rawspace'] < 1048576:
                    be['space'] = f'{be["rawspace"] / 1024}K'
                elif 1048576 <= be['rawspace'] < 1073741824:
                    be['space'] = f'{be["rawspace"] / 1048576}M'
                elif 1073741824 <= be['rawspace'] < 1099511627776:
                    be['space'] = f'{be["rawspace"] / 1073741824}G'
                elif 1099511627776 <= be['rawspace'] < 1125899906842624:
                    be['space'] = f'{be["rawspace"] / 1099511627776}T'
                elif 1125899906842624 <= be['rawspace'] < 1152921504606846976:
                    be['space'] = f'{be["rawspace"] / 1125899906842624}P'
                elif 1152921504606846976 <= be[
                        'rawspace'] < 1152921504606846976:
                    be['space'] = f'{be["rawspace"] / 1152921504606846976}E'
                else:
                    be['space'] = f'{be["rawspace"] / 1152921504606846976}Z'

                be['space'] = f'{round(float(be["space"][:-1]), 2)}{be["space"][-1]}'

                if osc.IS_FREEBSD:
                    be['can_activate'] = 'truenas:kernel_version' not in ds[
                        'properties']
                if osc.IS_LINUX:
                    be['can_activate'] = ('truenas:kernel_version'
                                          in ds['properties']
                                          or 'truenas:12' in ds['properties'])

            results.append(be)
        return filter_list(results, filters, options)

    @item_method
    @accepts(Str('id'))
    def activate(self, oid):
        """
        Activates boot environment `id`.
        """
        be = self.middleware.call_sync('bootenv.query', [['id', '=', oid]],
                                       {'get': True})
        if not be['can_activate']:
            raise CallError('This BE cannot be activated')

        try:
            subprocess.run([self.BE_TOOL, 'activate', oid],
                           capture_output=True,
                           text=True,
                           check=True)
        except subprocess.CalledProcessError as cpe:
            raise CallError(f'Failed to activate BE: {cpe.stdout.strip()}')
        else:
            return True

    @item_method
    @accepts(Str('id'), Dict(
        'attributes',
        Bool('keep', default=False),
    ))
    async def set_attribute(self, oid, attrs):
        """
        Sets attributes boot environment `id`.

        Currently only `keep` attribute is allowed.
        """
        boot_pool = await self.middleware.call('boot.pool_name')
        boot_env = await self.get_instance(oid)
        dsname = f'{boot_pool}/ROOT/{boot_env["realname"]}'
        ds = await self.middleware.call('zfs.dataset.query',
                                        [('id', '=', dsname)])
        if not ds:
            raise CallError(f'BE {oid!r} does not exist.', errno.ENOENT)
        await self.middleware.call(
            'zfs.dataset.update', dsname, {
                'properties': {
                    f'{self.BE_TOOL}:keep': {
                        'value': str(attrs['keep'])
                    }
                },
            })
        return True

    @accepts(
        Dict(
            'bootenv_create',
            Str('name', required=True, validators=[Match(RE_BE_NAME)]),
            Str('source'),
        ))
    async def do_create(self, data):
        """
        Create a new boot environment using `name`.

        If a new boot environment is desired which is a clone of another boot environment, `source` can be passed.
        Then, a new boot environment of `name` is created using boot environment `source` by cloning it.

        Ensure that `name` and `source` are valid boot environment names.
        """
        verrors = ValidationErrors()
        await self._clean_be_name(verrors, 'bootenv_create', data['name'])
        verrors.check()

        args = [self.BE_TOOL, 'create']
        source = data.get('source')
        if source:
            args += [
                '-e',
                os.path.join(await self.middleware.call('boot.pool_name'),
                             'ROOT', source) if osc.IS_LINUX else source
            ]
        args.append(data['name'])
        try:
            await run(args, encoding='utf8', check=True)
        except subprocess.CalledProcessError as cpe:
            raise CallError(f'Failed to create boot environment: {cpe.stdout}')
        return data['name']

    @accepts(Str('id'),
             Dict(
                 'bootenv_update',
                 Str('name', required=True, validators=[Match(RE_BE_NAME)]),
             ))
    async def do_update(self, oid, data):
        """
        Update `id` boot environment name with a new provided valid `name`.
        """
        be = await self._get_instance(oid)

        verrors = ValidationErrors()
        await self._clean_be_name(verrors, 'bootenv_update', data['name'])
        verrors.check()

        try:
            await run(self.BE_TOOL,
                      'rename',
                      oid,
                      data['name'],
                      encoding='utf8',
                      check=True)
        except subprocess.CalledProcessError as cpe:
            raise CallError(f'Failed to update boot environment: {cpe.stdout}')
        return data['name']

    async def _clean_be_name(self, verrors, schema, name):
        beadm_names = (await (await Popen(
            f"{self.BE_TOOL} list -H | awk '{{print ${1 if osc.IS_LINUX else 7}}}'",
            shell=True,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
        )).communicate())[0].decode().split('\n')
        if name in filter(None, beadm_names):
            verrors.add(f'{schema}.name', f'The name "{name}" already exists',
                        errno.EEXIST)

    @accepts(Str('id'))
    @job(lock=lambda args: f'bootenv_delete_{args[0]}')
    async def do_delete(self, job, oid):
        """
        Delete `id` boot environment. This removes the clone from the system.
        """
        be = await self._get_instance(oid)
        try:
            await run(self.BE_TOOL,
                      'destroy',
                      '-F',
                      be['id'],
                      encoding='utf8',
                      check=True)
        except subprocess.CalledProcessError as cpe:
            raise CallError(f'Failed to delete boot environment: {cpe.stdout}')
        return True
Exemplo n.º 26
0
class SNMPTrapAlertService(ThreadedAlertService):
    title = "SNMP Trap"

    schema = Dict(
        "snmp_attributes",
        Str("host", required=True),
        Int("port", required=True),
        Bool("v3", required=True),
        # v1/v2
        Str("community", null=True, default=None, empty=False),
        # v3
        Str("v3_username", null=True, default=None, empty=False),
        Str("v3_authkey", null=True, default=None),
        Str("v3_privkey", null=True, default=None),
        Str("v3_authprotocol",
            enum=[
                None, "MD5", "SHA", "128SHA224", "192SHA256", "256SHA384",
                "384SHA512"
            ],
            null=True,
            default=None),
        Str("v3_privprotocol",
            enum=[
                None, "DES", "3DESEDE", "AESCFB128", "AESCFB192", "AESCFB256",
                "AESBLUMENTHALCFB192", "AESBLUMENTHALCFB256"
            ],
            null=True,
            default=None),
    )

    def __init__(self, middleware, attributes):
        super().__init__(middleware, attributes)

        self.initialized = False

    def send_sync(self, alerts, gone_alerts, new_alerts):
        if self.attributes["host"] in ("localhost", "127.0.0.1", "::1"):
            if not self.middleware.call_sync("service.started", "snmp"):
                self.logger.trace(
                    "Local SNMP service not started, not sending traps")
                return

        if not self.initialized:
            self.snmp_engine = pysnmp.hlapi.SnmpEngine()
            if self.attributes["v3"]:
                self.auth_data = pysnmp.hlapi.UsmUserData(
                    self.attributes["v3_username"] or "",
                    self.attributes["v3_authkey"],
                    self.attributes["v3_privkey"],
                    {
                        None: pysnmp.hlapi.usmNoAuthProtocol,
                        "MD5": pysnmp.hlapi.usmHMACMD5AuthProtocol,
                        "SHA": pysnmp.hlapi.usmHMACSHAAuthProtocol,
                        "128SHA224": pysnmp.hlapi.usmHMAC128SHA224AuthProtocol,
                        "192SHA256": pysnmp.hlapi.usmHMAC192SHA256AuthProtocol,
                        "256SHA384": pysnmp.hlapi.usmHMAC256SHA384AuthProtocol,
                        "384SHA512": pysnmp.hlapi.usmHMAC384SHA512AuthProtocol,
                    }[self.attributes["v3_authprotocol"]],
                    {
                        None:
                        pysnmp.hlapi.usmNoPrivProtocol,
                        "DES":
                        pysnmp.hlapi.usmDESPrivProtocol,
                        "3DESEDE":
                        pysnmp.hlapi.usm3DESEDEPrivProtocol,
                        "AESCFB128":
                        pysnmp.hlapi.usmAesCfb128Protocol,
                        "AESCFB192":
                        pysnmp.hlapi.usmAesCfb192Protocol,
                        "AESCFB256":
                        pysnmp.hlapi.usmAesCfb256Protocol,
                        "AESBLUMENTHALCFB192":
                        pysnmp.hlapi.usmAesBlumenthalCfb192Protocol,
                        "AESBLUMENTHALCFB256":
                        pysnmp.hlapi.usmAesBlumenthalCfb256Protocol,
                    }[self.attributes["v3_privprotocol"]],
                )
            else:
                self.auth_data = pysnmp.hlapi.CommunityData(
                    self.attributes["community"])
            self.transport_target = pysnmp.hlapi.UdpTransportTarget(
                (self.attributes["host"], self.attributes["port"]))
            self.context_data = pysnmp.hlapi.ContextData()

            mib_builder = pysnmp.smi.builder.MibBuilder()
            mib_sources = mib_builder.getMibSources() + (
                pysnmp.smi.builder.DirMibSource(
                    "/usr/local/share/pysnmp/mibs"), )
            mib_builder.setMibSources(*mib_sources)
            mib_builder.loadModules("FREENAS-MIB")
            self.snmp_alert_level_type = mib_builder.importSymbols(
                "FREENAS-MIB", "AlertLevelType")[0]
            mib_view_controller = pysnmp.smi.view.MibViewController(
                mib_builder)
            self.snmp_alert = pysnmp.hlapi.ObjectIdentity("FREENAS-MIB", "alert"). \
                resolveWithMib(mib_view_controller)
            self.snmp_alert_id = pysnmp.hlapi.ObjectIdentity("FREENAS-MIB", "alertId"). \
                resolveWithMib(mib_view_controller)
            self.snmp_alert_level = pysnmp.hlapi.ObjectIdentity("FREENAS-MIB", "alertLevel"). \
                resolveWithMib(mib_view_controller)
            self.snmp_alert_message = pysnmp.hlapi.ObjectIdentity("FREENAS-MIB", "alertMessage"). \
                resolveWithMib(mib_view_controller)
            self.snmp_alert_cancellation = pysnmp.hlapi.ObjectIdentity("FREENAS-MIB", "alertCancellation"). \
                resolveWithMib(mib_view_controller)

            self.initialized = True

        classes = (self.middleware.call_sync("alertclasses.config"))["classes"]

        for alert in gone_alerts:
            error_indication, error_status, error_index, var_binds = next(
                pysnmp.hlapi.sendNotification(
                    self.snmp_engine, self.auth_data, self.transport_target,
                    self.context_data, "trap",
                    pysnmp.hlapi.NotificationType(
                        self.snmp_alert_cancellation).addVarBinds(
                            (pysnmp.hlapi.ObjectIdentifier(self.snmp_alert_id),
                             pysnmp.hlapi.OctetString(alert.uuid)))))

            if error_indication:
                self.logger.error(f"Failed to send SNMP trap: %s",
                                  error_indication)

        for alert in new_alerts:
            error_indication, error_status, error_index, var_binds = next(
                pysnmp.hlapi.sendNotification(
                    self.snmp_engine, self.auth_data, self.transport_target,
                    self.context_data, "trap",
                    pysnmp.hlapi.NotificationType(self.snmp_alert).addVarBinds(
                        (pysnmp.hlapi.ObjectIdentifier(self.snmp_alert_id),
                         pysnmp.hlapi.OctetString(alert.uuid)),
                        (pysnmp.hlapi.ObjectIdentifier(self.snmp_alert_level),
                         self.snmp_alert_level_type(
                             self.snmp_alert_level_type.namedValues.getValue(
                                 classes.get(alert.klass.name, {}).get(
                                     "level",
                                     alert.klass.level.name).lower()))),
                        (pysnmp.hlapi.ObjectIdentifier(
                            self.snmp_alert_message),
                         pysnmp.hlapi.OctetString(alert.formatted)))))

            if error_indication:
                self.logger.warning(f"Failed to send SNMP trap: %s",
                                    error_indication)
Exemplo n.º 27
0
class GlusterVolumeService(CRUDService):
    class Config:
        namespace = 'gluster.volume'

    def __volume_wrapper(self, method, *args, **kwargs):

        result = b''

        try:
            result = method(*args, **kwargs)
        except GlusterCmdException as e:
            # the gluster cli utility will return stderr
            # to stdout and vice versa on certain failures.
            # account for this and decode appropriately
            rc, out, err = e.args[0]
            err = err if err else out
            if isinstance(err, bytes):
                err = err.decode()
            raise CallError(f'{err.strip()}')

        if isinstance(result, bytes):
            return result.decode().strip()

        return result

    @private
    def removebrick_volume(self, name, data):

        temp = data.pop('bricks')
        op = data.pop('operation')

        bricks = []
        for i in temp:
            peer = i['peer_name']
            path = i['peer_path']
            brick = peer + ':' + path
            bricks.append(brick)
        # TODO
        # glustercli-python has a bug where if provided the "force"
        # option, it will concatenate it with the "start" option
        # This is wrong, you can choose "start" or "force" exclusively
        # (i.e. gluster volume name remove-brick peer:path start OR force)
        if op.lower() == 'start':
            result = self.__volume_wrapper(volume.bricks.remove_start, name,
                                           bricks, **data)

        if op.lower() == 'stop':
            result = self.__volume_wrapper(volume.bricks.remove_stop, name,
                                           bricks, **data)

        if op.lower() == 'commit':
            result = self.__volume_wrapper(volume.bricks.remove_commit, name,
                                           bricks, **data)

        if op.lower() == 'status':
            result = self.__volume_wrapper(volume.bricks.remove_status, name,
                                           bricks, **data)

        return result

    @private
    def replacebrick_volume(self, name, data):

        src = data.pop('src_brick')
        new = data.pop('new_brick')

        src_peer = src['peer_name']
        src_path = src['peer_path']
        src_brick = src_peer + ':' + src_path

        new_peer = new['peer_name']
        new_path = new['peer_path']
        new_brick = new_peer + ':' + new_path

        result = self.__volume_wrapper(volume.bricks.replace_commit, name,
                                       src_brick, new_brick, **data)

        return result

    @accepts(
        Dict(
            'glustervolume_create',
            Str('name', required=True),
            List('bricks',
                 items=[
                     Dict(
                         'brick',
                         Str('peer_name', required=True),
                         Str('peer_path', required=True),
                     ),
                 ],
                 required=True),
            Int('replica'),
            Int('arbiter'),
            Int('disperse'),
            Int('disperse_data'),
            Int('redundancy'),
            Bool('force'),
        ))
    @job(lock=GLUSTER_JOB_LOCK)
    def do_create(self, job, data):
        """
        Create a gluster volume.

        `name` Name to be given to the gluster volume
        `bricks` List of brick paths
            `peer_name` IP or DNS name of the peer.
            `peer_path` The full path of the brick

        `replica` Number of replica bricks
        `arbiter` Number of arbiter bricks
        `disperse` Number of disperse bricks
        `disperse_data` Number of disperse data bricks
        `redundancy` Number of redundancy bricks
        `force` Create volume forcefully, ignoring potential warnings
        """

        name = data.pop('name')
        temp = data.pop('bricks')

        bricks = []
        for i in temp:
            peer = i['peer_name']
            path = i['peer_path']
            brick = peer + ':' + path
            bricks.append(brick)

        return self.__volume_wrapper(volume.create, name, bricks, **data)

    @item_method
    @accepts(
        Str('name', required=True),
        Dict('data', Bool('force')),
    )
    @job(lock=GLUSTER_JOB_LOCK)
    def start(self, job, name, data):
        """
        Start a gluster volume.

        `name` Name of gluster volume
        `force` Forcefully start the gluster volume
        """

        return self.__volume_wrapper(volume.start, name, **data)

    @item_method
    @accepts(
        Str('name', required=True),
        Dict('data', Bool('force')),
    )
    @job(lock=GLUSTER_JOB_LOCK)
    def restart(self, job, name, data):
        """
        Restart a gluster volume.

        `name` Name of gluster volume
        `force` Forcefully restart the gluster volume
        """

        return self.__volume_wrapper(volume.restart, name, **data)

    @item_method
    @accepts(
        Str('name', required=True),
        Dict('data', Bool('force')),
    )
    @job(lock=GLUSTER_JOB_LOCK)
    def stop(self, job, name, data):
        """
        Stop a gluster volume.

        `name` Name of gluster volume
        `force` Forcefully stop the gluster volume
        """

        return self.__volume_wrapper(volume.stop, name, **data)

    @accepts(Dict(
        'glustervolume_delete',
        Str('name', required=True),
    ))
    @job(lock=GLUSTER_JOB_LOCK)
    def do_delete(self, job, data):
        """
        Delete a gluster volume.

        `name` Name of the volume to be deleted
        """

        return self.__volume_wrapper(volume.delete, data['name'])

    @item_method
    @accepts(Str('name'))
    @job(lock=GLUSTER_JOB_LOCK)
    def info(self, job, name):
        """
        Return information about gluster volume(s).

        `name` Name of the gluster volume
        """

        rv = {}
        rv['volname'] = name

        return self.__volume_wrapper(volume.info, **rv)

    @item_method
    @accepts(Str('name', required=True),
             Dict(
                 'data',
                 Bool('verbose', default=True),
             ))
    @job(lock=GLUSTER_JOB_LOCK)
    def status(self, job, name, data):
        """
        Return detailed information about gluster volume(s).

        `name` Name of the gluster volume
        `verbose` If False, only return brick information
            for gluster volume with `name`.
        """

        rv = {}
        rv['volname'] = name
        rv['group_subvols'] = data.pop('verbose')

        return self.__volume_wrapper(volume.status_detail, **rv)

    @accepts()
    @job(lock=GLUSTER_JOB_LOCK)
    def list(self, job):
        """
        Return list of gluster volumes.
        """

        return self.__volume_wrapper(volume.vollist)

    @item_method
    @accepts(Str('name', required=True),
             Dict(
                 'data',
                 Str('opt'),
                 Bool('force'),
             ))
    @job(lock=GLUSTER_JOB_LOCK)
    def optreset(self, job, name, data):
        """
        Reset volumes options.
            If `opt` is not provided, then all options
            will be reset.

        `name` Name of the gluster volume
        `opt` Name of the option to reset
        `force` Forcefully reset option(s)
        """

        return self.__volume_wrapper(volume.optreset, name, **data)

    @item_method
    @accepts(
        Str('name', required=True),
        Dict('opts', required=True, additional_attrs=True),
    )
    @job(lock=GLUSTER_JOB_LOCK)
    def optset(self, job, name, data):
        """
        Set gluster volume options.

        `name` Name of the gluster volume
        `opts` Dict where
            --key-- is the name of the option
            --value-- is the value to be given to the option
        """

        data = data.pop("opts")

        return self.__volume_wrapper(volume.optset, name, data)

    @item_method
    @accepts(Str('name', required=True),
             Dict(
                 'data',
                 List('bricks',
                      items=[
                          Dict(
                              'brick',
                              Str('peer_name', required=True),
                              Str('peer_path', required=True),
                          ),
                      ],
                      required=True),
                 Int('replica'),
                 Int('arbiter'),
                 Bool('force'),
             ))
    @job(lock=GLUSTER_JOB_LOCK)
    def addbrick(self, job, name, data):
        """
        Add bricks to a gluster volume.

        `name` Gluster volume name
        `bricks` List of brick paths.
            `peer_name` IP or DNS name of the peer
            `peer_path` The full path of the brick to be added

        `replica` Replica count
        `arbiter` Arbiter count
        `force` Forcefully add brick(s)
        """

        temp = data.pop('bricks')

        bricks = []
        for i in temp:
            peer = i['peer_name']
            path = i['peer_path']
            brick = peer + ':' + path
            bricks.append(brick)

        return self.__volume_wrapper(volume.bricks.add, name, bricks, **data)

    @item_method
    @accepts(Str('name', required=True),
             Dict(
                 'data',
                 List('bricks',
                      items=[
                          Dict(
                              'brick',
                              Str('peer_name', required=True),
                              Str('peer_path', required=True),
                          ),
                      ],
                      required=True),
                 Str('operation',
                     enum=['START', 'STOP', 'COMMIT', 'STATUS'],
                     required=True),
                 Int('replica'),
             ))
    @job(lock=GLUSTER_JOB_LOCK)
    def removebrick(self, job, name, data):
        """
        Perform a remove operation on the brick(s) in the gluster volume.

        `name` Gluster volume name
        `bricks` List of brick paths.
            `peer_name` IP or DNS name of the peer
            `peer_path` The full path of the brick

        `operation` The operation to be performed
            `START` Start the removal of the brick(s)
            `STOP` Stop the removal of the brick(s)
            `COMMIT` Commit the removal of the brick(s)
            `STATUS` Display status of the removal of the brick(s)

        `replica` Replica count
        `force` Forcefully run the removal operation.
        """

        return self.removebrick_volume(name, data)

    @item_method
    @accepts(Str('name', required=True),
             Dict(
                 'data',
                 Dict(
                     'src_brick',
                     Str('peer_name', required=True),
                     Str('peer_path', required=True),
                     required=True,
                 ),
                 Dict(
                     'new_brick',
                     Str('peer_name', required=True),
                     Str('peer_path', required=True),
                     required=True,
                 ),
                 Bool('force'),
             ))
    @job(lock=GLUSTER_JOB_LOCK)
    def replacebrick(self, job, name, data):
        """
        Commit the replacement of a brick.

        `name` Gluster volume name
        `src_brick` Brick to be replaced
            `peer_name` IP or DNS name of the peer
            `peer_path` The full path of the brick

        `new_brick` New replacement brick
            `peer_name` IP or DNS name of the peer
            `peer_path` The full path of the brick

        `force` Forcefully replace bricks
        """

        return self.replacebrick_volume(name, data)

    @item_method
    @accepts(Str('name', required=True),
             Dict(
                 'data',
                 Bool('enable', required=True),
             ))
    @job(lock=GLUSTER_JOB_LOCK)
    def quota(self, job, name, data):
        """
        Enable/Disable the quota for a given gluster volume.

        `name` Gluster volume name
        `enable` enable quota (True) or disable it (False)
        """

        return self.__volume_wrapper(
            quota.enable if data['enable'] else quota.disable, name)
Exemplo n.º 28
0
class OpenVPNClientService(SystemServiceService):
    class Config:
        namespace = 'openvpn.client'
        service = 'openvpn_client'
        service_model = 'openvpnclient'
        service_verb = 'restart'
        datastore_extend = 'openvpn.client.client_extend'

    @private
    async def client_extend(self, data):
        data['client_certificate'] = None if not data[
            'client_certificate'] else data['client_certificate']['id']
        data[
            'root_ca'] = None if not data['root_ca'] else data['root_ca']['id']
        data['tls_crypt_auth_enabled'] = bool(data['tls_crypt_auth'])
        return data

    @accepts()
    async def authentication_algorithm_choices(self):
        """
        Returns a dictionary of valid authentication algorithms which can be used with OpenVPN server.
        """
        return OpenVPN.digests()

    @accepts()
    async def cipher_choices(self):
        """
        Returns a dictionary of valid ciphers which can be used with OpenVPN server.
        """
        return OpenVPN.ciphers()

    @private
    async def validate(self, data, schema_name):
        verrors, data = await OpenVPN.common_validation(
            self.middleware, data, schema_name, 'client')

        if not data.get('remote'):
            verrors.add(f'{schema_name}.remote', 'This field is required.')

        if not await self.validate_nobind(data):
            verrors.add(
                f'{schema_name}.nobind',
                'Please enable this to concurrently run OpenVPN Server/Client on the same local port.'
            )

        verrors.check()

        return data

    @private
    async def validate_nobind(self, config):
        if (await self.middleware.call('service.started', 'openvpn_server')
                and config['port']
                == (await
                    self.middleware.call('openvpn.server.config'))['port']
                and not config['nobind']):
            return False
        else:
            return True

    @private
    async def config_valid(self):
        config = await self.config()
        if not config['root_ca']:
            raise CallError('Please configure root_ca first.')
        else:
            if not await self.middleware.call(
                    'certificateauthority.query',
                [['id', '=', config['root_ca']], ['revoked', '=', False]]):
                raise CallError(
                    'Root CA has been revoked. Please select another Root CA.')

        if not config['client_certificate']:
            raise CallError('Please configure client certificate first.')
        else:
            if not await self.middleware.call('certificate.query', [[
                    'id', '=', config['client_certificate']
            ], ['revoked', '=', False]]):
                raise CallError(
                    'Client certificate has been revoked. Please select another Client certificate.'
                )

        if not config['remote']:
            raise CallError('Please configure remote first.')

        if not await self.validate_nobind(config):
            raise CallError(
                'Please enable "nobind" to concurrently run OpenVPN Server/Client on the same local port.'
            )

    @accepts(
        Dict('openvpn_client_update',
             Bool('nobind'),
             Bool('tls_crypt_auth_enabled'),
             Int('client_certificate'),
             Int('root_ca'),
             Int('port', validators=[Port()]),
             Str('additional_parameters'),
             Str('authentication_algorithm', null=True),
             Str('cipher', null=True),
             Str('compression', null=True, enum=['LZO', 'LZ4']),
             Str('device_type', enum=['TUN', 'TAP']),
             Str('protocol', enum=['UDP', 'TCP']),
             Str('remote'),
             Str('tls_crypt_auth', null=True),
             update=True))
    async def do_update(self, data):
        """
        Update OpenVPN Client configuration.

        `remote` can be a valid ip address / domain which openvpn will try to connect to.

        `nobind` must be enabled if OpenVPN client / server are to run concurrently.
        """
        old_config = await self.config()
        config = old_config.copy()

        config.update(data)

        config = await self.validate(config, 'openvpn_client_update')

        await self._update_service(old_config, config)

        return await self.config()
Exemplo n.º 29
0
class OpenVPNServerService(SystemServiceService):

    class Config:
        namespace = 'openvpn.server'
        service = 'openvpn_server'
        service_model = 'openvpnserver'
        service_verb = 'restart'
        datastore_extend = 'openvpn.server.server_extend'
        cli_namespace = 'service.openvpn.server'

    ENTRY = Dict(
        'openvpn_server_entry',
        Bool('tls_crypt_auth_enabled', required=True),
        Int('id', required=True),
        Int('netmask', validators=[Range(min=0, max=128)], required=True),
        Int('server_certificate', null=True, required=True),
        Int('port', validators=[Port()], required=True),
        Int('root_ca', null=True, required=True),
        IPAddr('server', required=True),
        Str('additional_parameters', required=True),
        Str('authentication_algorithm', null=True, required=True),
        Str('cipher', null=True, required=True),
        Str('compression', null=True, enum=['LZO', 'LZ4'], required=True),
        Str('device_type', enum=['TUN', 'TAP'], required=True),
        Str('protocol', enum=PROTOCOLS, required=True),
        Str('tls_crypt_auth', null=True, required=True),
        Str('topology', null=True, enum=['NET30', 'P2P', 'SUBNET'], required=True),
        Str('interface', required=True),
    )

    @private
    async def server_extend(self, data):
        data.update({
            'server_certificate': None if not data['server_certificate'] else data['server_certificate']['id'],
            'root_ca': None if not data['root_ca'] else data['root_ca']['id'],
            'tls_crypt_auth_enabled': bool(data['tls_crypt_auth']),
            'interface': 'openvpn-server',
        })
        return data

    @private
    async def config_valid(self):
        config = await self.config()
        if not config['root_ca']:
            raise CallError('Please configure root_ca first.')
        else:
            if not await self.middleware.call(
                'certificateauthority.query', [
                    ['id', '=', config['root_ca']],
                    ['revoked', '=', False]
                ]
            ):
                raise CallError('Root CA has been revoked. Please select another Root CA.')

        if not config['server_certificate']:
            raise CallError('Please configure server certificate first.')
        else:
            if not await self.middleware.call(
                'certificate.query', [
                    ['id', '=', config['server_certificate']],
                    ['revoked', '=', False]
                ]
            ):
                raise CallError('Server certificate has been revoked. Please select another Server certificate.')

        if not await self.validate_nobind(config):
            raise CallError(
                'Please enable "nobind" on OpenVPN Client to concurrently run OpenVPN Server/Client '
                'on the same local port without any issues.'
            )

    @accepts()
    @returns(Dict(
        'openvpn_authentication_algorithm_choices',
        additional_attrs=True,
        register=True,
        example={'RSA-SHA512': '512 bit digest size'}
    ))
    async def authentication_algorithm_choices(self):
        """
        Returns a dictionary of valid authentication algorithms which can be used with OpenVPN server.
        """
        return OpenVPN.digests()

    @accepts()
    @returns(Dict(
        'openvpn_cipher_choices',
        additional_attrs=True,
        example={'RC2-40-CBC': '(40 bit key by default, 64 bit block)'},
        register=True,
    ))
    async def cipher_choices(self):
        """
        Returns a dictionary of valid ciphers which can be used with OpenVPN server.
        """
        return OpenVPN.ciphers()

    @private
    async def validate(self, data, schema_name):
        verrors, data = await OpenVPN.common_validation(
            self.middleware, data, schema_name, 'server'
        )

        if not await self.validate_nobind(data):
            verrors.add(
                f'{schema_name}.nobind',
                'Please enable "nobind" on OpenVPN Client to concurrently run OpenVPN Server/Client '
                'on the same local port without any issues.'
            )

        if ipaddress.ip_address(data['server']).version == 4 and data['netmask'] > 32:
            verrors.add(
                f'{schema_name}.netmask',
                'For IPv4 server addresses please provide a netmask value from 0-32.'
            )

        verrors.check()

        return data

    @private
    async def validate_nobind(self, config):
        client_config = await self.middleware.call('openvpn.client.config')
        if (
            await self.middleware.call(
                'service.started',
                'openvpn_client'
            ) and config['port'] == client_config['port'] and not client_config['nobind']
        ):
            return False
        else:
            return True

    @private
    async def generate_static_key(self):
        keyfile = tempfile.NamedTemporaryFile(mode='w+', dir='/tmp/')
        await run(
            ['openvpn', '--genkey', '--secret', keyfile.name]
        )
        keyfile.seek(0)
        key = keyfile.read()
        keyfile.close()
        return key.strip()

    @accepts()
    @returns(Ref('openvpn_server_entry'))
    async def renew_static_key(self):
        """
        Reset OpenVPN server's TLS static key which will be used to encrypt/authenticate control channel packets.
        """
        return await self.update({
            'tls_crypt_auth': (await self.generate_static_key()),
            'tls_crypt_auth_enabled': True
        })

    @accepts(
        Int('client_certificate_id'),
        Str('server_address', null=True, default=None)
    )
    @returns(Str('openvpn_client_config', max_length=None))
    async def client_configuration_generation(self, client_certificate_id, server_address):
        """
        Returns a configuration for OpenVPN client which can be used with any client to connect to FN/TN OpenVPN
        server.

        `client_certificate_id` should be a valid certificate issued for use with OpenVPN client service.

        `server_address` if specified auto-fills the remote directive in the OpenVPN configuration enabling the end
        user to use the file without making any edits to connect to OpenVPN server.
        """
        await self.config_valid()
        config = await self.config()
        root_ca = await self.middleware.call(
            'certificateauthority.query', [
                ['id', '=', config['root_ca']]
            ], {
                'get': True
            }
        )
        client_cert = await self.middleware.call(
            'certificate.query', [
                ['id', '=', client_certificate_id],
                ['revoked', '=', False]
            ]
        )
        if not client_cert:
            raise CallError(
                'Please provide a client certificate id for a certificate which exists on '
                'the system and hasn\'t been marked as revoked.'
            )
        else:
            client_cert = client_cert[0]
            verrors = (
                await OpenVPN.common_validation(
                    self.middleware, {
                        **config,
                        'client_certificate': client_certificate_id
                    }, '', 'client'
                )
            )[0]
            if verrors:
                err_str = '\n'.join([f'{i + 1}) {error.errmsg}' for i, error in enumerate(verrors.errors)])

                raise CallError(
                    f'Please ensure provided client certificate is valid, following errors were found:\n{err_str}'
                )

        client_config = [
            'client',
            f'dev {config["device_type"].lower()}',
            f'proto {config["protocol"].lower()}',
            f'port {config["port"]}',
            f'remote "{server_address or "PLEASE FILL OUT SERVER DOMAIN/IP HERE"}"',
            'user nobody',
            'group nobody',
            'persist-key',
            'persist-tun',
            '<ca>',
            f'{root_ca["certificate"]}',
            '</ca>',
            '<cert>',
            client_cert['certificate'],
            '</cert>',
            '<key>',
            client_cert['privatekey'],
            '</key>',
            'verb 3',
            'remote-cert-tls server',
            f'compress {config["compression"].lower()}' if config['compression'] else None,
            f'auth {config["authentication_algorithm"]}' if config['authentication_algorithm'] else None,
            f'cipher {config["cipher"]}' if config['cipher'] else None,
        ]

        if config['tls_crypt_auth_enabled']:
            client_config.extend([
                '<tls-crypt>',
                config['tls_crypt_auth'],
                '</tls-crypt>'
            ])

        return '\n'.join(filter(bool, client_config)).strip()

    @accepts(
        Patch(
            'openvpn_server_entry', 'openvpn_server_update',
            ('rm', {'name': 'id'}),
            ('rm', {'name': 'interface'}),
            ('attr', {'update': True}),
        ),
    )
    async def do_update(self, data):
        """
        Update OpenVPN Server configuration.

        When `tls_crypt_auth_enabled` is enabled and `tls_crypt_auth` not provided, a static key is automatically
        generated to be used with OpenVPN server.
        """
        old_config = await self.config()
        old_config.pop('interface')
        config = old_config.copy()

        config.update(data)

        # If tls_crypt_auth_enabled is set and we don't have a tls_crypt_auth key,
        # let's generate one please
        if config['tls_crypt_auth_enabled'] and not config['tls_crypt_auth']:
            config['tls_crypt_auth'] = await self.generate_static_key()

        config = await self.validate(config, 'openvpn_server_update')

        await self._update_service(old_config, config)

        return await self.config()
Exemplo n.º 30
0
class ChartReleaseService(Service):
    class Config:
        namespace = 'chart.release'

    @accepts(Str('release_name'),
             Dict(
                 'rollback_options',
                 Bool('force_rollback', default=False),
                 Bool('recreate_resources', default=False),
                 Bool('rollback_snapshot', default=True),
                 Str('item_version', required=True),
             ))
    @returns(Ref('chart_release_entry'))
    @job(lock=lambda args: f'chart_release_rollback_{args[0]}')
    async def rollback(self, job, release_name, options):
        """
        Rollback a chart release to a previous chart version.

        `item_version` is version which we want to rollback a chart release to.

        `rollback_snapshot` is a boolean value which when set will rollback snapshots of any PVC's or ix volumes being
        consumed by the chart release.

        `force_rollback` is a boolean which when set will force rollback operation to move forward even if no
        snapshots are found. This is only useful when `rollback_snapshot` is set.

        `recreate_resources` is a boolean which will delete and then create the kubernetes resources on rollback
        of chart release. This should be used with caution as if chart release is consuming immutable objects like
        a PVC, the rollback operation can't be performed and will fail as helm tries to do a 3 way patch for rollback.

        Rollback is functional for the actual configuration of the release at the `item_version` specified and
        any associated `ix_volumes` with any PVC's which were consuming chart release storage class.
        """
        await self.middleware.call('kubernetes.validate_k8s_setup')
        release = await self.middleware.call('chart.release.query',
                                             [['id', '=', release_name]], {
                                                 'extra': {
                                                     'history': True,
                                                     'retrieve_resources': True
                                                 },
                                                 'get': True,
                                             })
        rollback_version = options['item_version']
        if rollback_version not in release['history']:
            raise CallError(
                f'Unable to find {rollback_version!r} item version in {release_name!r} history',
                errno=errno.ENOENT)

        chart_path = os.path.join(release['path'], 'charts', rollback_version)
        if not await self.middleware.run_in_thread(
                lambda: os.path.exists(chart_path)):
            raise CallError(
                f'Unable to locate {chart_path!r} path for rolling back',
                errno=errno.ENOENT)

        chart_details = await self.middleware.call(
            'catalog.item_version_details', chart_path)
        await self.middleware.call('catalog.version_supported_error_check',
                                   chart_details)

        history_item = release['history'][rollback_version]
        history_ver = str(history_item['version'])
        force_rollback = options['force_rollback']
        helm_force_flag = options['recreate_resources']

        # If helm force flag is specified, we should see if the chart release is consuming any PVC's and if it is,
        # let's not initiate a rollback as it's destined to fail by helm
        if helm_force_flag and release['resources']['persistent_volume_claims']:
            raise CallError(
                f'Unable to rollback {release_name!r} as chart release is consuming PVC. '
                'Please unset recreate_resources to proceed with rollback.')

        # TODO: Remove the logic for ix_volumes as moving on we would be only snapshotting volumes and only rolling
        #  it back
        snap_data = {'volumes': False, 'volumes/ix_volumes': False}
        for snap in snap_data:
            volumes_ds = os.path.join(release['dataset'], snap)
            snap_name = f'{volumes_ds}@{history_ver}'
            if await self.middleware.call('zfs.snapshot.query',
                                          [['id', '=', snap_name]]):
                snap_data[snap] = snap_name

        if options['rollback_snapshot'] and not any(
                snap_data.values()) and not force_rollback:
            raise CallError(
                f'Unable to locate {", ".join(snap_data.keys())!r} snapshot(s) for {release_name!r} volumes',
                errno=errno.ENOENT)

        current_dataset_paths = {
            os.path.join('/mnt', d['id'])
            for d in await self.middleware.call('zfs.dataset.query', [[
                'id', '^',
                f'{os.path.join(release["dataset"], "volumes/ix_volumes")}/'
            ]])
        }
        history_datasets = {
            d['hostPath']
            for d in history_item['config'].get('ixVolumes', [])
        }
        if history_datasets - current_dataset_paths:
            raise CallError(
                'Please specify a rollback version where following iX Volumes are not being used as they don\'t '
                f'exist anymore: {", ".join(d.split("/")[-1] for d in history_datasets - current_dataset_paths)}'
            )

        job.set_progress(25, 'Initial validation complete')

        # TODO: Upstream helm does not have ability to force stop a release, until we have that ability
        #  let's just try to do a best effort to scale down scaleable workloads and then scale them back up
        job.set_progress(45, 'Scaling down workloads')
        scale_stats = await (await self.middleware.call(
            'chart.release.scale', release_name,
            {'replica_count': 0})).wait(raise_error=True)

        job.set_progress(50, 'Rolling back chart release')

        command = []
        if helm_force_flag:
            command.append('--force')

        cp = await run(
            [
                'helm', 'rollback', release_name, history_ver, '-n',
                get_namespace(release_name), '--recreate-pods'
            ] + command,
            check=False,
        )
        await self.middleware.call('chart.release.sync_secrets_for_release',
                                   release_name)
        await self.middleware.call('chart.release.refresh_events_state',
                                   release_name)

        # Helm rollback is a bit tricky, it utilizes rollout functionality of kubernetes and rolls back the
        # resources to specified version. However in this process, if the rollback is to fail for any reason, it's
        # possible that some k8s resources got rolled back to previous version whereas others did not. We should
        # in this case check if helm treats the chart release as on the previous version of the chart release, we
        # should still do a rollback of snapshots in this case and raise the error afterwards. However if helm
        # does not recognize the chart release on a previous version, we can just raise it right away then.
        current_version = (await self.middleware.call(
            'chart.release.get_instance',
            release_name))['chart_metadata']['version']
        if current_version != rollback_version and cp.returncode:
            raise CallError(
                f'Failed to rollback {release_name!r} chart release to {rollback_version!r}: {cp.stderr.decode()}'
            )

        # We are going to remove old chart version copies
        await self.middleware.call(
            'chart.release.remove_old_upgraded_chart_version_copies',
            os.path.join(release['path'], 'charts'),
            rollback_version,
        )

        if options['rollback_snapshot'] and any(snap_data.values()):
            for snap_name in filter(bool, snap_data.values()):
                await self.middleware.call(
                    'zfs.snapshot.rollback', snap_name, {
                        'force': True,
                        'recursive': True,
                        'recursive_clones': True,
                        'recursive_rollback': True,
                    })
                break

        await self.middleware.call(
            'chart.release.scale_release_internal',
            release['resources'],
            None,
            scale_stats['before_scale'],
            True,
        )

        job.set_progress(100, 'Rollback complete for chart release')

        await self.middleware.call(
            'chart.release.chart_releases_update_checks_internal',
            [['id', '=', release_name]])

        if cp.returncode:
            # This means that helm partially rolled back k8s resources and recognizes the chart release as being
            # on the previous version, we should raise an appropriate exception explaining the behavior
            raise CallError(
                f'Failed to complete rollback {release_name!r} chart release to {rollback_version}. Chart release\'s '
                f'datasets have been rolled back to {rollback_version!r} version\'s snapshot. Errors encountered '
                f'during rollback were: {cp.stderr.decode()}')

        return await self.middleware.call('chart.release.get_instance',
                                          release_name)

    @private
    def remove_old_upgraded_chart_version_copies(self, charts_path,
                                                 current_version):
        c_v = parse_version(current_version)
        for v_path in filter(lambda p: p != current_version,
                             os.listdir(charts_path)):
            if parse_version(v_path) > c_v:
                shutil.rmtree(path=os.path.join(charts_path, v_path),
                              ignore_errors=True)