Ejemplo n.º 1
0
class DiskService(Service):

    @accepts(Ref('query-filters'), Ref('query-options'))
    def query(self, filters=None, options=None):
        if filters is None:
            filters = []
        if options is None:
            options = {}
        filters.append(('disk_enabled', '=', True))
        options['extend'] = 'disk.disk_extend'
        return self.middleware.call('datastore.query', 'storage.disk', filters, options)

    @private
    def disk_extend(self, disk):
        """
        This is a compatiblity method to remove superfluous "disk_" suffix from attributes
        from the Django datastore
        """
        for k, v in disk.items():
            if k.startswith('disk_'):
                del disk[k]
                disk[k[5:]] = v
        # enabled is an internal attribute that does not need to be exposed
        disk.pop('enabled', None)
        return disk
Ejemplo n.º 2
0
class KubernetesCronJobService(CRUDService):
    class Config:
        namespace = 'k8s.cronjob'
        private = True

    @filterable
    async def query(self, filters, options):
        async with api_client() as (api, context):
            return filter_list([
                d.to_dict() for d in (await context['cronjob_batch_api'].
                                      list_cron_job_for_all_namespaces()).items
            ], filters, options)

    @accepts(Ref('k8s_job_create'))
    async def do_create(self, data):
        async with api_client() as (api, context):
            try:
                await context['cronjob_batch_api'].create_namespaced_cron_job(
                    namespace=data['namespace'], body=data['body'])
            except client.exceptions.ApiException as e:
                raise CallError(f'Unable to create job: {e}')
            else:
                return await self.query([
                    ['metadata.name', '=', data['metadata.name']],
                    ['metadata.namespace', '=', data['namespace']],
                ], {'get': True})

    @accepts(
        Str('name'),
        Ref('k8s_job_create'),
    )
    async def do_update(self, name, data):
        async with api_client() as (api, context):
            try:
                await context['cronjob_batch_api'].patch_namespaced_cron_job(
                    name, namespace=data['namespace'], body=data['body'])
            except client.exceptions.ApiException as e:
                raise CallError(f'Unable to patch {name} job: {e}')
            else:
                return await self.query([
                    ['metadata.name', '=', name],
                    ['metadata.namespace', '=', data['namespace']],
                ], {'get': True})

    @accepts(Str('name'),
             Dict(
                 'k8s_job_delete_options',
                 Str('namespace', required=True),
             ))
    async def do_delete(self, name, options):
        async with api_client() as (api, context):
            try:
                await context['cronjob_batch_api'].delete_namespaced_cron_job(
                    name, options['namespace'])
            except client.exceptions.ApiException as e:
                raise CallError(f'Unable to delete job: {e}')
            else:
                return True
Ejemplo n.º 3
0
class CertificateauthorityService(CRUDService):
    @accepts(Ref('query-filters'), Ref('query-options'))
    def query(self, filters=None, options=None):
        if options is None:
            options = {}
        options['extend'] = 'certificate.cert_extend'
        return self.middleware.call('datastore.query',
                                    'system.certificateauthority', filters,
                                    options)
Ejemplo n.º 4
0
class SMBService(Service):

    class Config:
        service = 'cifs'
        service_verb = 'restart'

    @accepts(
        Str('info_level', enum=[x.name for x in InfoLevel], default=InfoLevel.ALL.name),
        Ref('query-filters'),
        Ref('query-options'),
        Dict('status_options',
             Bool('verbose', default=True),
             Bool('fast', default=False),
             Str('restrict_user', default='')
             )
    )
    async def status(self, info_level, filters, options, status_options):
        """
        Returns SMB server status (sessions, open files, locks, notifications).

        `info_level` type of information requests. Defaults to ALL.

        `status_options` additional options to filter query results. Supported
        values are as follows: `verbose` gives more verbose status output
        `fast` causes smbstatus to not check if the status data is valid by
        checking if the processes that the status data refer to all still
        exist. This speeds up execution on busy systems and clusters but
        might display stale data of processes that died without cleaning up
        properly. `restrict_user` specifies the limits results to the specified
        user.
        """
        flags = '-j'
        flags = flags + InfoLevel[info_level].value
        flags = flags + 'v' if status_options['verbose'] else flags
        flags = flags + 'f' if status_options['fast'] else flags

        statuscmd = [SMBCmd.STATUS.value, '-d' '0', flags]

        if status_options['restrict_user']:
            statuscmd.extend(['-U', status_options['restrict_user']])

        smbstatus = await run(statuscmd, check=False)

        if smbstatus.returncode != 0:
            self.logger.debug('smbstatus [{%s}] failed with error: ({%s})',
                              flags, smbstatus.stderr.decode().strip())

        return filter_list(json.loads(smbstatus.stdout.decode()), filters, options)
Ejemplo n.º 5
0
class RsyncModService(CRUDService):
    class Config:
        datastore = 'services.rsyncmod'
        datastore_prefix = 'rsyncmod_'

    @accepts(
        Dict(
            'rsyncmod',
            Str('name', validators=[Match(r'[^/\]]')]),
            Str('comment'),
            Str('path'),
            Str('mode'),
            Int('maxconn'),
            Str('user'),
            Str('group'),
            List('hostsallow', items=[Str('hostsallow')]),
            List('hostsdeny', items=[Str('hostdeny')]),
            Str('auxiliary'),
            register=True,
        ))
    async def do_create(self, data):
        if data.get("hostsallow"):
            data["hostsallow"] = " ".join(data["hostsallow"])
        else:
            data["hostsallow"] = ''

        if data.get("hostsdeny"):
            data["hostsdeny"] = " ".join(data["hostsdeny"])
        else:
            data["hostsdeny"] = ''

        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})
        await self.middleware.call('service.reload', 'rsync')
        return data

    @accepts(Int('id'), Ref('rsyncmod'))
    async def do_update(self, id, data):
        module = await self.middleware.call(
            'datastore.query', self._config.datastore, [('id', '=', id)], {
                'prefix': self._config.datastore_prefix,
                'get': True
            })
        module.update(data)

        module["hostsallow"] = " ".join(module["hostsallow"])
        module["hostsdeny"] = " ".join(module["hostsdeny"])

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, data,
                                   {'prefix': self._config.datastore_prefix})
        await self.middleware.call('service.reload', 'rsync')

        return module

    @accepts(Int('id'))
    async def do_delete(self, id):
        return await self.middleware.call('datastore.delete',
                                          self._config.datastore, id)
Ejemplo n.º 6
0
class CredentialsService(CRUDService):
    class Config:
        namespace = "cloudsync.credentials"

        datastore = "system.cloudcredentials"

    @accepts(
        Dict(
            "cloud_sync_credentials",
            Str("name"),
            Str("provider"),
            Dict("attributes", additional_attrs=True),
            register=True,
        ))
    async def do_create(self, data):
        self._validate("cloud_sync_credentials", data)

        data["id"] = await self.middleware.call(
            "datastore.insert",
            "system.cloudcredentials",
            data,
        )
        return data

    @accepts(Int("id"), Ref("cloud_sync_credentials"))
    async def do_update(self, id, data):
        self._validate("cloud_sync_credentials", data)

        await self.middleware.call(
            "datastore.update",
            "system.cloudcredentials",
            id,
            data,
        )

        return data

    @accepts(Int("id"))
    async def do_delete(self, id):
        await self.middleware.call(
            "datastore.delete",
            "system.cloudcredentials",
            id,
        )

    def _validate(self, schema_name, data):
        verrors = ValidationErrors()

        if data["provider"] not in REMOTES:
            verrors.add(f"{schema_name}.provider", "Invalid provider")
        else:
            provider = REMOTES[data["provider"]]

            attributes_verrors = validate_attributes(
                provider.credentials_schema, data)
            verrors.add_child(f"{schema_name}.attributes", attributes_verrors)

        if verrors:
            raise verrors
Ejemplo n.º 7
0
class KubernetesStatefulsetService(CRUDService):
    class Config:
        namespace = 'k8s.statefulset'
        private = True

    @filterable
    async def query(self, filters, options):
        async with api_client() as (api, context):
            stateful_sets = [
                d.to_dict()
                for d in (await context['apps_api'].
                          list_stateful_set_for_all_namespaces()).items
            ]
            events = await self.middleware.call(
                'kubernetes.get_events_of_resource_type', 'StatefulSet',
                [s['metadata']['uid'] for s in stateful_sets])
            for stateful_set in stateful_sets:
                stateful_set['events'] = events[stateful_set['metadata']
                                                ['uid']]

        return filter_list(stateful_sets, filters, options)

    @accepts(
        Dict('statefulset_create',
             Str('namespace', required=True),
             Dict('body', additional_attrs=True, required=True),
             register=True))
    async def do_create(self, data):
        async with api_client() as (api, context):
            try:
                await context['apps_api'].create_namespaced_stateful_set(
                    namespace=data['namespace'], body=data['body'])
            except client.exceptions.ApiException as e:
                raise CallError(f'Unable to create statefulset: {e}')

    @accepts(
        Str('name'),
        Ref('statefulset_create'),
    )
    async def do_update(self, name, data):
        async with api_client() as (api, context):
            try:
                await context['apps_api'].patch_namespaced_stateful_set(
                    name, namespace=data['namespace'], body=data['body'])
            except client.exceptions.ApiException as e:
                raise CallError(f'Unable to patch {name} statefulset: {e}')

    @accepts(Str('name'),
             Dict(
                 'statefulset_delete_options',
                 Str('namespace', required=True),
             ))
    async def do_delete(self, name, options):
        async with api_client() as (api, context):
            try:
                await context['apps_api'].delete_namespaced_stateful_set(
                    name, options['namespace'])
            except client.exceptions.ApiException as e:
                raise CallError(f'Unable to delete statefulset: {e}')
Ejemplo n.º 8
0
class DiskService(Service):

    @accepts(Ref('query-filters'), Ref('query-options'))
    def query(self, filters=None, options=None):
        if filters is None:
            filters = []
        if options is None:
            options = {}
        options['suffix'] = 'disk_'
        filters.append(('enabled', '=', True))
        options['extend'] = 'disk.disk_extend'
        return self.middleware.call('datastore.query', 'storage.disk', filters, options)

    @private
    def disk_extend(self, disk):
        disk.pop('enabled', None)
        return disk
Ejemplo n.º 9
0
class BackupCredentialService(CRUDService):
    class Config:
        namespace = 'backup.credential'

    @accepts(Ref('query-filters'), Ref('query-options'))
    def query(self, filters=None, options=None):
        return self.middleware.call('datastore.query',
                                    'system.cloudcredentials', filters,
                                    options)

    @accepts(
        Dict(
            'backup-credential',
            Str('name'),
            Str('provider', enum=[
                'AMAZON',
            ]),
            Dict('attributes', additional_attrs=True),
            register=True,
        ))
    def do_create(self, data):
        return self.middleware.call(
            'datastore.insert',
            'system.cloudcredentials',
            data,
        )

    @accepts(Int('id'), Ref('backup-credential'))
    def do_update(self, id, data):
        return self.middleware.call(
            'datastore.update',
            'system.cloudcredentials',
            id,
            data,
        )

    @accepts(Int('id'))
    def do_delete(self, id):
        return self.middleware.call(
            'datastore.delete',
            'system.cloudcredentials',
            id,
        )
Ejemplo n.º 10
0
class KubernetesDaemonsetService(CRUDService):

    class Config:
        namespace = 'k8s.daemonset'
        private = True

    @filterable
    async def query(self, filters, options):
        async with api_client() as (api, context):
            return filter_list(
                [d.to_dict() for d in (await context['apps_api'].list_daemon_set_for_all_namespaces()).items],
                filters, options
            )

    @accepts(
        Dict(
            'daemonset_create',
            Str('namespace', required=True),
            Dict('body', additional_attrs=True, required=True),
            register=True
        )
    )
    async def do_create(self, data):
        async with api_client() as (api, context):
            try:
                await context['apps_api'].create_namespaced_daemon_set(namespace=data['namespace'], body=data['body'])
            except client.exceptions.ApiException as e:
                raise CallError(f'Unable to create daemonset: {e}')

    @accepts(
        Str('name'),
        Ref('daemonset_create'),
    )
    async def do_update(self, name, data):
        async with api_client() as (api, context):
            try:
                await context['apps_api'].patch_namespaced_daemon_set(
                    name, namespace=data['namespace'], body=data['body']
                )
            except client.exceptions.ApiException as e:
                raise CallError(f'Unable to patch {name} daemonset: {e}')

    @accepts(
        Str('name'),
        Dict(
            'daemonset_delete_options',
            Str('namespace', required=True),
        )
    )
    async def do_delete(self, name, options):
        async with api_client() as (api, context):
            try:
                await context['apps_api'].delete_namespaced_daemon_set(name, options['namespace'])
            except client.exceptions.ApiException as e:
                raise CallError(f'Unable to delete daemonset: {e}')
Ejemplo n.º 11
0
class KubernetesSecretService(CRUDService):
    class Config:
        namespace = 'k8s.secret'
        private = True

    @filterable
    async def query(self, filters=None, options=None):
        options = options or {}
        label_selector = options.get('extra', {}).get('label_selector')
        kwargs = {k: v for k, v in [('label_selector', label_selector)] if v}
        async with api_client() as (api, context):
            return filter_list([
                d.to_dict() for d in (
                    await context['core_api'].list_secret_for_all_namespaces(
                        **kwargs)).items
            ], filters, options)

    @accepts(
        Dict('secret_create',
             Str('namespace', required=True),
             Dict('body', additional_attrs=True, required=True),
             register=True))
    async def do_create(self, data):
        async with api_client() as (api, context):
            try:
                await context['core_api'].create_namespaced_secret(
                    namespace=data['namespace'], body=data['body'])
            except client.exceptions.ApiException as e:
                raise CallError(f'Unable to create secret: {e}')

    @accepts(
        Str('name'),
        Ref('secret_create'),
    )
    async def do_update(self, name, data):
        async with api_client() as (api, context):
            try:
                await context['core_api'].patch_namespaced_secret(
                    name, namespace=data['namespace'], body=data['body'])
            except client.exceptions.ApiException as e:
                raise CallError(f'Unable to patch {name} secret: {e}')

    @accepts(Str('name'),
             Dict(
                 'secret_delete_options',
                 Str('namespace', required=True),
             ))
    async def do_delete(self, name, options):
        async with api_client() as (api, context):
            try:
                await context['core_api'].delete_namespaced_secret(
                    name, options['namespace'])
            except client.exceptions.ApiException as e:
                raise CallError(f'Unable to delete secret: {e}')
Ejemplo n.º 12
0
class LLDPService(SystemServiceService):
    class Config:
        service = 'lldp'
        datastore_prefix = 'lldp_'
        cli_namespace = 'service.lldp'

    ENTRY = Dict(
        'lldp_entry',
        Bool('intdesc', required=True),
        Str('country', max_length=2, required=True),
        Str('location', required=True),
        Int('id', required=True),
    )

    @accepts()
    @returns(Ref('country_choices'))
    async def country_choices(self):
        """
        Returns country choices for LLDP.
        """
        return await self.middleware.call('system.general.country_choices')

    async def do_update(self, data):
        """
        Update LLDP Service Configuration.

        `country` is a two letter ISO 3166 country code required for LLDP location support.

        `location` is an optional attribute specifying the physical location of the host.
        """
        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()
        if new['country'] not in await self.country_choices():
            verrors.add(
                'lldp_update.country',
                f'{new["country"]} not in countries recognized by the system.')
        verrors.check()

        await self._update_service(old, new)

        return await self.config()
Ejemplo n.º 13
0
class ChartReleaseService(Service):
    class Config:
        namespace = 'chart.release'

    @accepts(Str('release_name'))
    @returns(Ref('chart_release_entry'))
    @job(lock=lambda args: f'chart_release_redeploy_{args[0]}')
    async def redeploy(self, job, release_name):
        """
        Redeploy will initiate a new rollout of the Helm chart according to upgrade strategy defined by the chart release
        workloads. A good example for redeploying is updating kubernetes pods with an updated container image.
        """
        release = await self.middleware.call('chart.release.get_instance',
                                             release_name)
        chart_path = os.path.join(release['path'], 'charts',
                                  release['chart_metadata']['version'])
        if not os.path.exists(chart_path):
            raise CallError(
                f'Unable to locate {chart_path!r} chart version for redeploying {release!r} chart release',
                errno=errno.ENOENT)

        config = await add_context_to_configuration(
            release['config'], {
                CONTEXT_KEY_NAME: {
                    **get_action_context(release_name),
                    'operation': 'UPDATE',
                    'isUpdate': True,
                }
            }, self.middleware)
        await self.middleware.call('chart.release.helm_action', release_name,
                                   chart_path, config, 'update')

        job.set_progress(90, 'Syncing secrets for chart release')
        await self.middleware.call('chart.release.sync_secrets_for_release',
                                   release_name)
        await self.middleware.call('chart.release.refresh_events_state',
                                   release_name)
        job.set_progress(
            100, f'Successfully redeployed {release_name!r} chart release')

        return await self.middleware.call('chart.release.get_instance',
                                          release_name)
Ejemplo n.º 14
0
class CryptoKeyService(Service):
    class Config:
        private = True

    @accepts(Ref('cert_extensions'), Str('schema'))
    def validate_extensions(self, extensions_data, schema):
        # We do not need to validate some extensions like `AuthorityKeyIdentifier`.
        # They are generated from the cert/ca's public key contents. So we skip these.

        skip_extension = ['AuthorityKeyIdentifier']
        verrors = ValidationErrors()

        for extension in filter(
                lambda v: v[1]['enabled'] and v[0] not in skip_extension,
                extensions_data.items()):
            klass = getattr(x509.extensions, extension[0])
            try:
                klass(*get_extension_params(extension))
            except Exception as e:
                verrors.add(
                    f'{schema}.{extension[0]}',
                    f'Please provide valid values for {extension[0]}: {e}')

        if extensions_data['KeyUsage']['enabled'] and extensions_data[
                'KeyUsage']['key_cert_sign']:
            if not extensions_data['BasicConstraints'][
                    'enabled'] or not extensions_data['BasicConstraints']['ca']:
                verrors.add(
                    f'{schema}.BasicConstraints',
                    'Please enable ca when key_cert_sign is set in KeyUsage as per RFC 5280.'
                )

        if extensions_data['ExtendedKeyUsage'][
                'enabled'] and not extensions_data['ExtendedKeyUsage'][
                    'usages']:
            verrors.add(
                f'{schema}.ExtendedKeyUsage.usages',
                'Please specify at least one USAGE for this extension.')

        return verrors
Ejemplo n.º 15
0
class ServiceService(CRUDService):

    SERVICE_DEFS = {
        's3':
        ServiceDefinition('minio', '/var/run/minio.pid'),
        'ssh':
        ServiceDefinition('sshd', '/var/run/sshd.pid'),
        'rsync':
        ServiceDefinition('rsync', '/var/run/rsyncd.pid'),
        'nfs':
        ServiceDefinition('nfsd', None),
        'afp':
        ServiceDefinition('netatalk', None),
        'cifs':
        ServiceDefinition('smbd', '/var/run/samba4/smbd.pid'),
        'dynamicdns':
        ServiceDefinition('inadyn', None),
        'snmp':
        ServiceDefinition('snmpd', '/var/run/net_snmpd.pid'),
        'ftp':
        ServiceDefinition('proftpd', '/var/run/proftpd.pid'),
        'tftp':
        ServiceDefinition('inetd', '/var/run/inetd.pid'),
        'iscsitarget':
        ServiceDefinition('ctld', '/var/run/ctld.pid'),
        'lldp':
        ServiceDefinition('ladvd', '/var/run/ladvd.pid'),
        'ups':
        ServiceDefinition('upsd', '/var/db/nut/upsd.pid'),
        'upsmon':
        ServiceDefinition('upsmon', '/var/db/nut/upsmon.pid'),
        'smartd':
        ServiceDefinition('smartd', 'smartd-daemon',
                          '/var/run/smartd-daemon.pid'),
        'webshell':
        ServiceDefinition(None, '/var/run/webshell.pid'),
        'webdav':
        ServiceDefinition('httpd', '/var/run/httpd.pid'),
        'netdata':
        ServiceDefinition('netdata', '/var/db/netdata/netdata.pid'),
        'asigra':
        ServiceDefinition('asigra', '/var/run/dssystem.pid')
    }

    @filterable
    async def query(self, filters=None, options=None):
        """
        Query all system services with `query-filters` and `query-options`.
        """
        if options is None:
            options = {}
        options['prefix'] = 'srv_'

        services = await self.middleware.call('datastore.query',
                                              'services.services', filters,
                                              options)

        # In case a single service has been requested
        if not isinstance(services, list):
            services = [services]

        jobs = {
            asyncio.ensure_future(self._get_status(entry)): entry
            for entry in services
        }
        if jobs:
            done, pending = await asyncio.wait(list(jobs.keys()), timeout=15)

        def result(task):
            """
            Method to handle results of the coroutines.
            In case of error or timeout, provide UNKNOWN state.
            """
            result = None
            try:
                if task in done:
                    result = task.result()
            except Exception:
                pass
            if result is None:
                entry = jobs.get(task)
                self.logger.warn('Failed to get status for %s',
                                 entry['service'])
                entry['state'] = 'UNKNOWN'
                entry['pids'] = []
                return entry
            else:
                return result

        services = list(map(result, jobs))
        return filter_list(services, filters, options)

    @accepts(
        Str('id_or_name'),
        Dict(
            'service-update',
            Bool('enable', default=False),
        ),
    )
    async def do_update(self, id_or_name, data):
        """
        Update service entry of `id_or_name`.

        Currently it only accepts `enable` option which means whether the
        service should start on boot.

        """
        if not id_or_name.isdigit():
            svc = await self.middleware.call(
                'datastore.query', 'services.services',
                [('srv_service', '=', id_or_name)])
            if not svc:
                raise CallError(f'Service {id_or_name} not found.',
                                errno.ENOENT)
            id_or_name = svc[0]['id']

        rv = await self.middleware.call('datastore.update',
                                        'services.services', id_or_name,
                                        {'srv_enable': data['enable']})
        await self.middleware.call('etc.generate', 'rc')
        return rv

    @accepts(
        Str('service'),
        Dict(
            'service-control',
            Bool('onetime', default=True),
            Bool('wait', default=None, null=True),
            Bool('sync', default=None, null=True),
            register=True,
        ),
    )
    async def start(self, service, options=None):
        """ Start the service specified by `service`.

        The helper will use method self._start_[service]() to start the service.
        If the method does not exist, it would fallback using service(8)."""
        await self.middleware.call_hook('service.pre_action', service, 'start',
                                        options)
        sn = self._started_notify("start", service)
        await self._simplecmd("start", service, options)
        return await self.started(service, sn)

    async def started(self, service, sn=None):
        """
        Test if service specified by `service` has been started.
        """
        if sn:
            await self.middleware.run_in_thread(sn.join)

        try:
            svc = await self.query([('service', '=', service)], {'get': True})
            self.middleware.send_event('service.query', 'CHANGED', fields=svc)
            return svc['state'] == 'RUNNING'
        except IndexError:
            f = getattr(self, '_started_' + service, None)
            if callable(f):
                if inspect.iscoroutinefunction(f):
                    return (await f())[0]
                else:
                    return f()[0]
            else:
                return (await self._started(service))[0]

    @accepts(
        Str('service'),
        Ref('service-control'),
    )
    async def stop(self, service, options=None):
        """ Stop the service specified by `service`.

        The helper will use method self._stop_[service]() to stop the service.
        If the method does not exist, it would fallback using service(8)."""
        await self.middleware.call_hook('service.pre_action', service, 'stop',
                                        options)
        sn = self._started_notify("stop", service)
        await self._simplecmd("stop", service, options)
        return await self.started(service, sn)

    @accepts(
        Str('service'),
        Ref('service-control'),
    )
    async def restart(self, service, options=None):
        """
        Restart the service specified by `service`.

        The helper will use method self._restart_[service]() to restart the service.
        If the method does not exist, it would fallback using service(8)."""
        await self.middleware.call_hook('service.pre_action', service,
                                        'restart', options)
        sn = self._started_notify("restart", service)
        await self._simplecmd("restart", service, options)
        return await self.started(service, sn)

    @accepts(
        Str('service'),
        Ref('service-control'),
    )
    async def reload(self, service, options=None):
        """
        Reload the service specified by `service`.

        The helper will use method self._reload_[service]() to reload the service.
        If the method does not exist, the helper will try self.restart of the
        service instead."""
        await self.middleware.call_hook('service.pre_action', service,
                                        'reload', options)
        try:
            await self._simplecmd("reload", service, options)
        except Exception as e:
            await self.restart(service, options)
        return await self.started(service)

    async def _get_status(self, service):
        f = getattr(self, '_started_' + service['service'], None)
        if callable(f):
            if inspect.iscoroutinefunction(f):
                running, pids = await f()
            else:
                running, pids = f()
        else:
            running, pids = await self._started(service['service'])

        if running:
            state = 'RUNNING'
        else:
            state = 'STOPPED'

        service['state'] = state
        service['pids'] = pids
        return service

    async def _simplecmd(self, action, what, options=None):
        self.logger.debug("Calling: %s(%s) ", action, what)
        f = getattr(self, '_' + action + '_' + what, None)
        if f is None:
            # Provide generic start/stop/restart verbs for rc.d scripts
            if what in self.SERVICE_DEFS:
                if self.SERVICE_DEFS[what].rc_script:
                    what = self.SERVICE_DEFS[what].rc_script
            if action in ("start", "stop", "restart", "reload"):
                if action == 'restart':
                    await self._system("/usr/sbin/service " + what +
                                       " forcestop ")
                await self._service(what, action, **options)
            else:
                raise ValueError("Internal error: Unknown command")
        else:
            call = f(**(options or {}))
            if inspect.iscoroutinefunction(f):
                await call

    async def _system(self, cmd):
        proc = await Popen(cmd,
                           stdout=subprocess.PIPE,
                           stderr=subprocess.PIPE,
                           shell=True,
                           close_fds=True)
        stdout = (await proc.communicate())[0]
        if proc.returncode != 0:
            self.logger.warning("Command %r failed with code %d:\n%s", cmd,
                                proc.returncode, stdout)
        return proc.returncode

    async def _service(self, service, verb, **options):
        onetime = options.pop('onetime', None)
        force = options.pop('force', None)
        quiet = options.pop('quiet', None)
        extra = options.pop('extra', '')

        # force comes before one which comes before quiet
        # they are mutually exclusive
        preverb = ''
        if force:
            preverb = 'force'
        elif onetime:
            preverb = 'one'
        elif quiet:
            preverb = 'quiet'

        return await self._system('/usr/sbin/service {} {}{} {}'.format(
            service,
            preverb,
            verb,
            extra,
        ))

    def _started_notify(self, verb, what):
        """
        The check for started [or not] processes is currently done in 2 steps
        This is the first step which involves a thread StartNotify that watch for event
        before actually start/stop rc.d scripts

        Returns:
            StartNotify object if the service is known or None otherwise
        """

        if what in self.SERVICE_DEFS:
            sn = StartNotify(verb=verb,
                             pidfile=self.SERVICE_DEFS[what].pidfile)
            sn.start()
            return sn
        else:
            return None

    async def _started(self, what, notify=None):
        """
        This is the second step::
        Wait for the StartNotify thread to finish and then check for the
        status of pidfile/procname using pgrep

        Returns:
            True whether the service is alive, False otherwise
        """

        if what in self.SERVICE_DEFS:
            if notify:
                await self.middleware.run_in_thread(notify.join)

            if self.SERVICE_DEFS[what].pidfile:
                pgrep = "/bin/pgrep -F {}{}".format(
                    self.SERVICE_DEFS[what].pidfile,
                    ' ' + self.SERVICE_DEFS[what].procname
                    if self.SERVICE_DEFS[what].procname else '',
                )
            else:
                pgrep = "/bin/pgrep {}".format(
                    self.SERVICE_DEFS[what].procname)
            proc = await Popen(pgrep,
                               shell=True,
                               stdout=subprocess.PIPE,
                               stderr=subprocess.PIPE,
                               close_fds=True)
            data = (await proc.communicate())[0].decode()

            if proc.returncode == 0:
                return True, [
                    int(i) for i in data.strip().split('\n') if i.isdigit()
                ]
        return False, []

    async def _start_asigra(self, **kwargs):
        await self.middleware.call('asigra.setup_filesystems')
        await self.middleware.call('asigra.setup_postgresql')
        await self._service("postgresql", "start", force=True, **kwargs)
        await self.middleware.call('asigra.setup_asigra')
        await self.middleware.call('etc.generate', 'asigra')
        await self._service("dssystem", "start", force=True, **kwargs)

    async def _stop_asigra(self, **kwargs):
        await self._service("dssystem", "stop", force=True, **kwargs)
        await self._service("postgresql", "stop", force=True, **kwargs)

    async def _restart_asigra(self, **kwargs):
        await self._stop_asigra(**kwargs)
        await self._start_asigra(**kwargs)

    async def _started_asigra(self, **kwargs):
        if await self._service("dssystem", "status", force=True, **
                               kwargs) != 0:
            return False, []
        return True, []

    async def _start_webdav(self, **kwargs):
        await self.middleware.call('etc.generate', 'webdav')
        await self._service("apache24", "start", **kwargs)

    async def _stop_webdav(self, **kwargs):
        await self._service("apache24", "stop", **kwargs)

    async def _restart_webdav(self, **kwargs):
        await self._service("apache24", "stop", force=True, **kwargs)
        await self.middleware.call('etc.generate', 'webdav')
        await self._service("apache24", "restart", **kwargs)

    async def _reload_webdav(self, **kwargs):
        await self.middleware.call('etc.generate', 'webdav')
        await self._service("apache24", "reload", **kwargs)

    async def _restart_django(self, **kwargs):
        await self._service("django", "restart", **kwargs)

    async def _start_webshell(self, **kwargs):
        await self._system(
            "/usr/local/bin/python /usr/local/www/freenasUI/tools/webshell.py")

    async def _restart_webshell(self, **kwargs):
        try:
            with open('/var/run/webshell.pid', 'r') as f:
                pid = f.read()
                os.kill(int(pid), signal.SIGTERM)
                time.sleep(0.2)
                os.kill(int(pid), signal.SIGKILL)
        except Exception:
            pass
        await self._system(
            "ulimit -n 1024 && /usr/local/bin/python /usr/local/www/freenasUI/tools/webshell.py"
        )

    async def _restart_iscsitarget(self, **kwargs):
        await self.middleware.call("etc.generate", "ctld")
        await self._service("ctld", "stop", force=True, **kwargs)
        await self.middleware.call("etc.generate", "ctld")
        await self._service("ctld", "restart", **kwargs)

    async def _start_iscsitarget(self, **kwargs):
        await self.middleware.call("etc.generate", "ctld")
        await self._service("ctld", "start", **kwargs)

    async def _stop_iscsitarget(self, **kwargs):
        with contextlib.suppress(IndexError):
            sysctl.filter("kern.cam.ctl.ha_peer")[0].value = ""

        await self._service("ctld", "stop", force=True, **kwargs)

    async def _reload_iscsitarget(self, **kwargs):
        await self.middleware.call("etc.generate", "ctld")
        await self._service("ctld", "reload", **kwargs)

    async def _start_collectd(self, **kwargs):
        if not await self.started('rrdcached'):
            # Let's ensure that before we start collectd, rrdcached is always running
            await self.start('rrdcached')

        await self.middleware.call('etc.generate', 'collectd')
        await self._service("collectd-daemon", "restart", **kwargs)

    async def _stop_collectd(self, **kwargs):
        await self._service("collectd-daemon", "stop", **kwargs)

    async def _restart_collectd(self, **kwargs):
        await self._stop_collectd(**kwargs)
        await self._start_collectd(**kwargs)

    async def _started_collectd(self, **kwargs):
        if await self._service('collectd-daemon',
                               'status',
                               quiet=True,
                               **kwargs):
            return False, []
        else:
            return True, []

    async def _started_rrdcached(self, **kwargs):
        if await self._service('rrdcached', 'status', quiet=True, **kwargs):
            return False, []
        else:
            return True, []

    async def _stop_rrdcached(self, **kwargs):
        await self.stop('collectd')
        await self._service('rrdcached', 'stop', **kwargs)

    async def _restart_rrdcached(self, **kwargs):
        await self._stop_rrdcached(**kwargs)
        await self.start('rrdcached')
        await self.start('collectd')

    async def _reload_rc(self, **kwargs):
        await self.middleware.call('etc.generate', 'rc')

    async def _restart_powerd(self, **kwargs):
        await self.middleware.call('etc.generate', 'rc')
        await self._service('powerd', 'restart', **kwargs)

    async def _reload_sysctl(self, **kwargs):
        await self.middleware.call('etc.generate', 'sysctl')

    async def _start_network(self, **kwargs):
        await self.middleware.call('interface.sync')
        await self.middleware.call('route.sync')

    async def _reload_named(self, **kwargs):
        await self._service("named", "reload", **kwargs)

    async def _restart_syscons(self, **kwargs):
        await self.middleware.call('etc.generate', 'rc')
        await self._service('syscons', 'restart', **kwargs)

    async def _reload_hostname(self, **kwargs):
        await self._system('/bin/hostname ""')
        await self.middleware.call('etc.generate', 'hostname')
        await self.middleware.call('etc.generate', 'rc')
        await self._service("hostname", "start", quiet=True, **kwargs)
        await self._service("mdnsd", "restart", quiet=True, **kwargs)
        await self._restart_collectd(**kwargs)

    async def _reload_resolvconf(self, **kwargs):
        await self._reload_hostname()
        await self.middleware.call('dns.sync')

    async def _reload_networkgeneral(self, **kwargs):
        await self._reload_resolvconf()
        await self._service("routing", "restart", **kwargs)

    async def _start_routing(self, **kwargs):
        await self.middleware.call('etc.generate', 'rc')
        await self._service('routing', 'start', **kwargs)

    async def _reload_timeservices(self, **kwargs):
        await self.middleware.call('etc.generate', 'localtime')
        await self.middleware.call('etc.generate', 'ntpd')
        await self._service("ntpd", "restart", **kwargs)
        settings = await self.middleware.call('datastore.query',
                                              'system.settings', [], {
                                                  'order_by': ['-id'],
                                                  'get': True
                                              })
        os.environ['TZ'] = settings['stg_timezone']
        time.tzset()

    async def _restart_ntpd(self, **kwargs):
        await self.middleware.call('etc.generate', 'ntpd')
        await self._service('ntpd', 'restart', **kwargs)

    async def _start_smartd(self, **kwargs):
        await self.middleware.call("etc.generate", "smartd")
        await self._service("smartd-daemon", "start", **kwargs)

    def _initializing_smartd_pid(self):
        """
        smartd initialization can take a long time if lots of disks are present
        It only writes pidfile at the end of the initialization but forks immediately
        This method returns PID of smartd process that is still initializing and has not written pidfile yet
        """
        if os.path.exists(self.SERVICE_DEFS["smartd"].pidfile):
            # Already started, no need for special handling
            return

        for process in psutil.process_iter(attrs=["cmdline", "create_time"]):
            if process.info["cmdline"][:1] == ["/usr/local/sbin/smartd"]:
                break
        else:
            # No smartd process present
            return

        lifetime = time.time() - process.info["create_time"]
        if lifetime < 300:
            # Looks like just the process we need
            return process.pid

        self.logger.warning(
            "Got an orphan smartd process: pid=%r, lifetime=%r", process.pid,
            lifetime)

    async def _started_smartd(self, **kwargs):
        result = await self._started("smartd")
        if result[0]:
            return result

        if await self.middleware.run_in_thread(self._initializing_smartd_pid
                                               ) is not None:
            return True, []

        return False, []

    async def _reload_smartd(self, **kwargs):
        await self.middleware.call("etc.generate", "smartd")

        pid = await self.middleware.run_in_thread(self._initializing_smartd_pid
                                                  )
        if pid is None:
            await self._service("smartd-daemon", "reload", **kwargs)
            return

        os.kill(pid, signal.SIGKILL)
        await self._service("smartd-daemon", "start", **kwargs)

    async def _restart_smartd(self, **kwargs):
        await self.middleware.call("etc.generate", "smartd")

        pid = await self.middleware.run_in_thread(self._initializing_smartd_pid
                                                  )
        if pid is None:
            await self._service("smartd-daemon", "stop", force=True, **kwargs)
            await self._service("smartd-daemon", "restart", **kwargs)
            return

        os.kill(pid, signal.SIGKILL)
        await self._service("smartd-daemon", "start", **kwargs)

    async def _stop_smartd(self, **kwargs):
        pid = await self.middleware.run_in_thread(self._initializing_smartd_pid
                                                  )
        if pid is None:
            await self._service("smartd-daemon", "stop", force=True, **kwargs)
            return

        os.kill(pid, signal.SIGKILL)

    async def _reload_ssh(self, **kwargs):
        await self.middleware.call('etc.generate', 'ssh')
        await self.middleware.call('mdnsadvertise.restart')
        await self._service("openssh", "reload", **kwargs)
        await self._service("ix_sshd_save_keys", "start", quiet=True, **kwargs)

    async def _start_ssh(self, **kwargs):
        await self.middleware.call('etc.generate', 'ssh')
        await self.middleware.call('mdnsadvertise.restart')
        await self._service("openssh", "start", **kwargs)
        await self._service("ix_sshd_save_keys", "start", quiet=True, **kwargs)

    async def _stop_ssh(self, **kwargs):
        await self._service("openssh", "stop", force=True, **kwargs)
        await self.middleware.call('mdnsadvertise.restart')

    async def _restart_ssh(self, **kwargs):
        await self.middleware.call('etc.generate', 'ssh')
        await self._service("openssh", "stop", force=True, **kwargs)
        await self.middleware.call('mdnsadvertise.restart')
        await self._service("openssh", "restart", **kwargs)
        await self._service("ix_sshd_save_keys", "start", quiet=True, **kwargs)

    async def _start_ssl(self, **kwargs):
        await self.middleware.call('etc.generate', 'ssl')

    async def _start_s3(self, **kwargs):
        await self.middleware.call('etc.generate', 's3')
        await self._service("minio", "start", quiet=True, **kwargs)

    async def _reload_s3(self, **kwargs):
        await self.middleware.call('etc.generate', 's3')
        await self._service("minio", "restart", quiet=True, **kwargs)

    async def _reload_rsync(self, **kwargs):
        await self.middleware.call('etc.generate', 'rsync')
        await self._service("rsyncd", "restart", **kwargs)

    async def _restart_rsync(self, **kwargs):
        await self._stop_rsync()
        await self._start_rsync()

    async def _start_rsync(self, **kwargs):
        await self.middleware.call('etc.generate', 'rsync')
        await self._service("rsyncd", "start", **kwargs)

    async def _stop_rsync(self, **kwargs):
        await self._service("rsyncd", "stop", force=True, **kwargs)

    async def _started_nis(self, **kwargs):
        return (await self.middleware.call('nis.started')), []

    async def _start_nis(self, **kwargs):
        return (await self.middleware.call('nis.start')), []

    async def _restart_nis(self, **kwargs):
        await self.middleware.call('nis.stop')
        return (await self.middleware.call('nis.start')), []

    async def _stop_nis(self, **kwargs):
        return (await self.middleware.call('nis.stop')), []

    async def _started_ldap(self, **kwargs):
        return await self.middleware.call('ldap.started'), []

    async def _start_ldap(self, **kwargs):
        return await self.middleware.call('ldap.start'), []

    async def _stop_ldap(self, **kwargs):
        return await self.middleware.call('ldap.stop'), []

    async def _restart_ldap(self, **kwargs):
        await self.middleware.call('ldap.stop')
        return await self.middleware.call('ldap.start'), []

    async def _start_lldp(self, **kwargs):
        await self._service("ladvd", "start", **kwargs)

    async def _stop_lldp(self, **kwargs):
        await self._service("ladvd", "stop", force=True, **kwargs)

    async def _restart_lldp(self, **kwargs):
        await self._service("ladvd", "stop", force=True, **kwargs)
        await self._service("ladvd", "restart", **kwargs)

    async def _started_activedirectory(self, **kwargs):
        return await self.middleware.call('activedirectory.started'), []

    async def _start_activedirectory(self, **kwargs):
        return await self.middleware.call('activedirectory.start'), []

    async def _stop_activedirectory(self, **kwargs):
        return await self.middleware.call('activedirectory.stop'), []

    async def _restart_activedirectory(self, **kwargs):
        await self.middleware.call('kerberos.stop'), []
        return await self.middleware.call('activedirectory.start'), []

    async def _reload_activedirectory(self, **kwargs):
        await self._service("samba_server", "stop", force=True, **kwargs)
        await self._service("samba_server", "start", quiet=True, **kwargs)

    async def _restart_syslogd(self, **kwargs):
        await self.middleware.call("etc.generate", "syslogd")
        await self._system("/etc/local/rc.d/syslog-ng restart")

    async def _start_syslogd(self, **kwargs):
        await self.middleware.call("etc.generate", "syslogd")
        await self._system("/etc/local/rc.d/syslog-ng start")

    async def _stop_syslogd(self, **kwargs):
        await self._system("/etc/local/rc.d/syslog-ng stop")

    async def _reload_syslogd(self, **kwargs):
        await self.middleware.call("etc.generate", "syslogd")
        await self._system("/etc/local/rc.d/syslog-ng reload")

    async def _start_tftp(self, **kwargs):
        await self.middleware.call('etc.generate', 'inetd')
        await self._service("inetd", "start", **kwargs)

    async def _reload_tftp(self, **kwargs):
        await self.middleware.call('etc.generate', 'inetd')
        await self._service("inetd", "stop", force=True, **kwargs)
        await self._service("inetd", "restart", **kwargs)

    async def _restart_tftp(self, **kwargs):
        await self.middleware.call('etc.generate', 'inetd')
        await self._service("inetd", "stop", force=True, **kwargs)
        await self._service("inetd", "restart", **kwargs)

    async def _restart_cron(self, **kwargs):
        await self.middleware.call('etc.generate', 'cron')

    async def _start_motd(self, **kwargs):
        await self.middleware.call('etc.generate', 'motd')
        await self._service("motd", "start", quiet=True, **kwargs)

    async def _start_ttys(self, **kwargs):
        await self.middleware.call('etc.generate', 'ttys')

    async def _reload_ftp(self, **kwargs):
        await self.middleware.call("etc.generate", "ftp")
        await self._service("proftpd", "restart", **kwargs)

    async def _restart_ftp(self, **kwargs):
        await self._stop_ftp()
        await self._start_ftp()

    async def _start_ftp(self, **kwargs):
        await self.middleware.call("etc.generate", "ftp")
        await self._service("proftpd", "start", **kwargs)

    async def _stop_ftp(self, **kwargs):
        await self._service("proftpd", "stop", force=True, **kwargs)

    async def _start_ups(self, **kwargs):
        await self.middleware.call('ups.dismiss_alerts')
        await self.middleware.call('etc.generate', 'ups')
        await self._service("nut", "start", **kwargs)
        await self._service("nut_upsmon", "start", **kwargs)
        await self._service("nut_upslog", "start", **kwargs)
        if await self.started('collectd'):
            asyncio.ensure_future(self.restart('collectd'))

    async def _stop_ups(self, **kwargs):
        await self.middleware.call('ups.dismiss_alerts')
        await self._service("nut_upslog", "stop", force=True, **kwargs)
        await self._service("nut_upsmon", "stop", force=True, **kwargs)
        await self._service("nut", "stop", force=True, **kwargs)
        if await self.started('collectd'):
            asyncio.ensure_future(self.restart('collectd'))

    async def _restart_ups(self, **kwargs):
        await self.middleware.call('ups.dismiss_alerts')
        await self.middleware.call('etc.generate', 'ups')
        await self._service("nut", "stop", force=True, onetime=True)
        # We need to wait on upsmon service to die properly as multiple processes are
        # associated with it and in most cases they haven't exited when a restart is initiated
        # for upsmon which fails as the older process is still running.
        await self._service("nut_upsmon", "stop", force=True, onetime=True)
        upsmon_processes = await run(['pgrep', '-x', 'upsmon'],
                                     encoding='utf8',
                                     check=False)
        if upsmon_processes.returncode == 0:
            gone, alive = await self.middleware.run_in_thread(
                psutil.wait_procs,
                map(lambda v: psutil.Process(int(v)),
                    upsmon_processes.stdout.split()),
                timeout=10)
            if alive:
                for pid in map(int, upsmon_processes.stdout.split()):
                    with contextlib.suppress(ProcessLookupError):
                        os.kill(pid, signal.SIGKILL)

        await self._service("nut_upslog", "stop", force=True, onetime=True)

        await self._service("nut", "restart", onetime=True)
        await self._service("nut_upsmon", "restart", onetime=True)
        await self._service("nut_upslog", "restart", onetime=True)
        if await self.started('collectd'):
            asyncio.ensure_future(self.restart('collectd'))

    async def _started_ups(self, **kwargs):
        return await self._started('upsmon')

    async def _start_afp(self, **kwargs):
        await self.middleware.call("etc.generate", "afpd")
        await self._service("netatalk", "start", **kwargs)

    async def _stop_afp(self, **kwargs):
        await self._service("netatalk", "stop", force=True, **kwargs)
        # when netatalk stops if afpd or cnid_metad is stuck
        # they'll get left behind, which can cause issues
        # restarting netatalk.
        await self._system("pkill -9 afpd")
        await self._system("pkill -9 cnid_metad")

    async def _restart_afp(self, **kwargs):
        await self._stop_afp()
        await self._start_afp()

    async def _reload_afp(self, **kwargs):
        await self.middleware.call("etc.generate", "afpd")
        await self._system("killall -1 netatalk")

    async def _reload_nfs(self, **kwargs):
        await self.middleware.call("etc.generate", "nfsd")
        await self.middleware.call("nfs.setup_v4")
        await self._service("mountd", "reload", force=True, **kwargs)

    async def _restart_nfs(self, **kwargs):
        await self._stop_nfs(**kwargs)
        await self._start_nfs(**kwargs)

    async def _stop_nfs(self, **kwargs):
        await self._service("lockd", "stop", force=True, **kwargs)
        await self._service("statd", "stop", force=True, **kwargs)
        await self._service("nfsd", "stop", force=True, **kwargs)
        await self._service("mountd", "stop", force=True, **kwargs)
        await self._service("nfsuserd", "stop", force=True, **kwargs)
        await self._service("gssd", "stop", force=True, **kwargs)
        await self._service("rpcbind", "stop", force=True, **kwargs)

    async def _start_nfs(self, **kwargs):
        await self.middleware.call("etc.generate", "nfsd")
        await self._service("rpcbind", "start", quiet=True, **kwargs)
        await self.middleware.call("nfs.setup_v4")
        await self._service("mountd", "start", quiet=True, **kwargs)
        await self._service("nfsd", "start", quiet=True, **kwargs)
        await self._service("statd", "start", quiet=True, **kwargs)
        await self._service("lockd", "start", quiet=True, **kwargs)

    async def _start_dynamicdns(self, **kwargs):
        await self.middleware.call('etc.generate', 'inadyn')
        await self._service("inadyn", "start", **kwargs)

    async def _restart_dynamicdns(self, **kwargs):
        await self.middleware.call('etc.generate', 'inadyn')
        await self._service("inadyn", "stop", force=True, **kwargs)
        await self._service("inadyn", "restart", **kwargs)

    async def _reload_dynamicdns(self, **kwargs):
        await self.middleware.call('etc.generate', 'inadyn')
        await self._service("inadyn", "stop", force=True, **kwargs)
        await self._service("inadyn", "restart", **kwargs)

    async def _restart_system(self, **kwargs):
        asyncio.ensure_future(
            self.middleware.call('system.reboot', {'delay': 3}))

    async def _stop_system(self, **kwargs):
        asyncio.ensure_future(
            self.middleware.call('system.shutdown', {'delay': 3}))

    async def _reload_cifs(self, **kwargs):
        await self.middleware.call("etc.generate", "smb_share")
        await self._service("samba_server", "reload", force=True, **kwargs)
        await self._service("mdnsd", "restart", **kwargs)
        # After mdns is restarted we need to reload netatalk to have it rereregister
        # with mdns. Ticket #7133
        await self._service("netatalk", "reload", **kwargs)

    async def _restart_cifs(self, **kwargs):
        await self.middleware.call("etc.generate", "smb")
        await self.middleware.call("etc.generate", "smb_share")
        await self._service("samba_server", "stop", force=True, **kwargs)
        await self._service("samba_server", "restart", quiet=True, **kwargs)
        await self._service("mdnsd", "restart", **kwargs)
        # After mdns is restarted we need to reload netatalk to have it rereregister
        # with mdns. Ticket #7133
        await self._service("netatalk", "reload", **kwargs)

    async def _start_cifs(self, **kwargs):
        await self.middleware.call("etc.generate", "smb")
        await self.middleware.call("etc.generate", "smb_share")
        await self._service("samba_server", "start", quiet=True, **kwargs)
        try:
            await self.middleware.call("smb.add_admin_group", "", True)
        except Exception as e:
            raise CallError(e)

    async def _stop_cifs(self, **kwargs):
        await self._service("samba_server", "stop", force=True, **kwargs)

    async def _started_cifs(self, **kwargs):
        if await self._service("samba_server",
                               "status",
                               quiet=True,
                               onetime=True,
                               **kwargs):
            return False, []
        else:
            return True, []

    async def _start_snmp(self, **kwargs):
        await self.middleware.call("etc.generate", "snmpd")
        await self._service("snmpd", "start", quiet=True, **kwargs)
        await self._service("snmp-agent", "start", quiet=True, **kwargs)

    async def _stop_snmp(self, **kwargs):
        await self._service("snmp-agent", "stop", quiet=True, **kwargs)
        await self._service("snmpd", "stop", quiet=True, **kwargs)

    async def _restart_snmp(self, **kwargs):
        await self._service("snmp-agent", "stop", quiet=True, **kwargs)
        await self._service("snmpd", "stop", force=True, **kwargs)
        await self.middleware.call("etc.generate", "snmpd")
        await self._service("snmpd", "start", quiet=True, **kwargs)
        await self._service("snmp-agent", "start", quiet=True, **kwargs)

    async def _reload_snmp(self, **kwargs):
        await self._service("snmp-agent", "stop", quiet=True, **kwargs)
        await self._service("snmpd", "stop", force=True, **kwargs)
        await self.middleware.call("etc.generate", "snmpd")
        await self._service("snmpd", "start", quiet=True, **kwargs)
        await self._service("snmp-agent", "start", quiet=True, **kwargs)

    async def _restart_http(self, **kwargs):
        await self.middleware.call("etc.generate", "nginx")
        await self.middleware.call('mdnsadvertise.restart')
        await self._service("nginx", "restart", **kwargs)

    async def _reload_http(self, **kwargs):
        await self.middleware.call("etc.generate", "nginx")
        await self.middleware.call('mdnsadvertise.restart')
        await self._service("nginx", "reload", **kwargs)

    async def _reload_loader(self, **kwargs):
        await self.middleware.call("etc.generate", "loader")

    async def _restart_disk(self, **kwargs):
        await self._reload_disk(**kwargs)

    async def _reload_disk(self, **kwargs):
        await self.middleware.call('etc.generate', 'fstab')
        await self._service("mountlate", "start", quiet=True, **kwargs)
        # Restarting rrdcached can take a long time. There is no
        # benefit in waiting for it, since even if it fails it will not
        # tell the user anything useful.
        asyncio.ensure_future(self.restart("collectd", kwargs))

    async def _reload_user(self, **kwargs):
        await self.middleware.call("etc.generate", "user")
        await self.middleware.call('etc.generate', 'aliases')
        await self.middleware.call('etc.generate', 'sudoers')
        await self.reload("cifs", kwargs)

    async def _restart_system_datasets(self, **kwargs):
        systemdataset = await self.middleware.call('systemdataset.setup')
        if not systemdataset:
            return None
        if systemdataset['syslog']:
            await self.restart("syslogd", kwargs)
        await self.restart("cifs", kwargs)

        # Restarting rrdcached can take a long time. There is no
        # benefit in waiting for it, since even if it fails it will not
        # tell the user anything useful.
        # Restarting rrdcached will make sure that we start/restart collectd as well
        asyncio.ensure_future(self.restart("rrdcached", kwargs))

    async def _start_netdata(self, **kwargs):
        await self.middleware.call('etc.generate', 'netdata')
        await self._service('netdata', 'start', **kwargs)

    async def _restart_netdata(self, **kwargs):
        await self._service('netdata', 'stop')
        await self._start_netdata(**kwargs)

    @private
    async def identify_process(self, name):
        for service, definition in self.SERVICE_DEFS.items():
            if definition.procname == name:
                return service

    @accepts(Int("pid"), Int("timeout", default=10))
    def terminate_process(self, pid, timeout):
        """
        Terminate process by `pid`.

        First send `TERM` signal, then, if was not terminated in `timeout` seconds, send `KILL` signal.

        Returns `true` is process has been successfully terminated with `TERM` and `false` if we had to use `KILL`.
        """
        try:
            process = psutil.Process(pid)
        except psutil.NoSuchProcessError:
            raise CallError("Process does not exist")

        process.terminate()

        gone, alive = psutil.wait_procs([process], timeout)
        if not alive:
            return True

        alive[0].kill()
        return False
Ejemplo n.º 16
0
class ChartReleaseService(Service):
    class Config:
        namespace = 'chart.release'

    @accepts(Str('release_name'),
             Dict(
                 'rollback_options',
                 Bool('force_rollback', default=False),
                 Bool('recreate_resources', default=False),
                 Bool('rollback_snapshot', default=True),
                 Str('item_version', required=True),
             ))
    @returns(Ref('chart_release_entry'))
    @job(lock=lambda args: f'chart_release_rollback_{args[0]}')
    async def rollback(self, job, release_name, options):
        """
        Rollback a chart release to a previous chart version.

        `item_version` is version which we want to rollback a chart release to.

        `rollback_snapshot` is a boolean value which when set will rollback snapshots of any PVC's or ix volumes being
        consumed by the chart release.

        `force_rollback` is a boolean which when set will force rollback operation to move forward even if no
        snapshots are found. This is only useful when `rollback_snapshot` is set.

        `recreate_resources` is a boolean which will delete and then create the kubernetes resources on rollback
        of chart release. This should be used with caution as if chart release is consuming immutable objects like
        a PVC, the rollback operation can't be performed and will fail as helm tries to do a 3 way patch for rollback.

        Rollback is functional for the actual configuration of the release at the `item_version` specified and
        any associated `ix_volumes` with any PVC's which were consuming chart release storage class.
        """
        await self.middleware.call('kubernetes.validate_k8s_setup')
        release = await self.middleware.call('chart.release.query',
                                             [['id', '=', release_name]], {
                                                 'extra': {
                                                     'history': True,
                                                     'retrieve_resources': True
                                                 },
                                                 'get': True,
                                             })
        rollback_version = options['item_version']
        if rollback_version not in release['history']:
            raise CallError(
                f'Unable to find {rollback_version!r} item version in {release_name!r} history',
                errno=errno.ENOENT)

        chart_path = os.path.join(release['path'], 'charts', rollback_version)
        if not await self.middleware.run_in_thread(
                lambda: os.path.exists(chart_path)):
            raise CallError(
                f'Unable to locate {chart_path!r} path for rolling back',
                errno=errno.ENOENT)

        chart_details = await self.middleware.call(
            'catalog.item_version_details', chart_path)
        await self.middleware.call('catalog.version_supported_error_check',
                                   chart_details)

        history_item = release['history'][rollback_version]
        history_ver = str(history_item['version'])
        force_rollback = options['force_rollback']
        helm_force_flag = options['recreate_resources']

        # If helm force flag is specified, we should see if the chart release is consuming any PVC's and if it is,
        # let's not initiate a rollback as it's destined to fail by helm
        if helm_force_flag and release['resources']['persistent_volume_claims']:
            raise CallError(
                f'Unable to rollback {release_name!r} as chart release is consuming PVC. '
                'Please unset recreate_resources to proceed with rollback.')

        # TODO: Remove the logic for ix_volumes as moving on we would be only snapshotting volumes and only rolling
        #  it back
        snap_data = {'volumes': False, 'volumes/ix_volumes': False}
        for snap in snap_data:
            volumes_ds = os.path.join(release['dataset'], snap)
            snap_name = f'{volumes_ds}@{history_ver}'
            if await self.middleware.call('zfs.snapshot.query',
                                          [['id', '=', snap_name]]):
                snap_data[snap] = snap_name

        if options['rollback_snapshot'] and not any(
                snap_data.values()) and not force_rollback:
            raise CallError(
                f'Unable to locate {", ".join(snap_data.keys())!r} snapshot(s) for {release_name!r} volumes',
                errno=errno.ENOENT)

        current_dataset_paths = {
            os.path.join('/mnt', d['id'])
            for d in await self.middleware.call('zfs.dataset.query', [[
                'id', '^',
                f'{os.path.join(release["dataset"], "volumes/ix_volumes")}/'
            ]])
        }
        history_datasets = {
            d['hostPath']
            for d in history_item['config'].get('ixVolumes', [])
        }
        if history_datasets - current_dataset_paths:
            raise CallError(
                'Please specify a rollback version where following iX Volumes are not being used as they don\'t '
                f'exist anymore: {", ".join(d.split("/")[-1] for d in history_datasets - current_dataset_paths)}'
            )

        job.set_progress(25, 'Initial validation complete')

        # TODO: Upstream helm does not have ability to force stop a release, until we have that ability
        #  let's just try to do a best effort to scale down scaleable workloads and then scale them back up
        job.set_progress(45, 'Scaling down workloads')
        scale_stats = await (await self.middleware.call(
            'chart.release.scale', release_name,
            {'replica_count': 0})).wait(raise_error=True)

        job.set_progress(50, 'Rolling back chart release')

        command = []
        if helm_force_flag:
            command.append('--force')

        cp = await run(
            [
                'helm', 'rollback', release_name, history_ver, '-n',
                get_namespace(release_name), '--recreate-pods'
            ] + command,
            check=False,
        )
        await self.middleware.call('chart.release.sync_secrets_for_release',
                                   release_name)
        await self.middleware.call('chart.release.refresh_events_state',
                                   release_name)

        # Helm rollback is a bit tricky, it utilizes rollout functionality of kubernetes and rolls back the
        # resources to specified version. However in this process, if the rollback is to fail for any reason, it's
        # possible that some k8s resources got rolled back to previous version whereas others did not. We should
        # in this case check if helm treats the chart release as on the previous version of the chart release, we
        # should still do a rollback of snapshots in this case and raise the error afterwards. However if helm
        # does not recognize the chart release on a previous version, we can just raise it right away then.
        current_version = (await self.middleware.call(
            'chart.release.get_instance',
            release_name))['chart_metadata']['version']
        if current_version != rollback_version and cp.returncode:
            raise CallError(
                f'Failed to rollback {release_name!r} chart release to {rollback_version!r}: {cp.stderr.decode()}'
            )

        # We are going to remove old chart version copies
        await self.middleware.call(
            'chart.release.remove_old_upgraded_chart_version_copies',
            os.path.join(release['path'], 'charts'),
            rollback_version,
        )

        if options['rollback_snapshot'] and any(snap_data.values()):
            for snap_name in filter(bool, snap_data.values()):
                await self.middleware.call(
                    'zfs.snapshot.rollback', snap_name, {
                        'force': True,
                        'recursive': True,
                        'recursive_clones': True,
                        'recursive_rollback': True,
                    })
                break

        await self.middleware.call(
            'chart.release.scale_release_internal',
            release['resources'],
            None,
            scale_stats['before_scale'],
            True,
        )
        await self.middleware.call(
            'chart.release.clear_chart_release_portal_cache', release_name)

        job.set_progress(100, 'Rollback complete for chart release')

        await self.middleware.call(
            'chart.release.chart_releases_update_checks_internal',
            [['id', '=', release_name]])

        if cp.returncode:
            # This means that helm partially rolled back k8s resources and recognizes the chart release as being
            # on the previous version, we should raise an appropriate exception explaining the behavior
            raise CallError(
                f'Failed to complete rollback {release_name!r} chart release to {rollback_version}. Chart release\'s '
                f'datasets have been rolled back to {rollback_version!r} version\'s snapshot. Errors encountered '
                f'during rollback were: {cp.stderr.decode()}')

        if await self.middleware.call(
                'chart.release.get_chart_releases_consuming_outdated_certs',
            [['id', '=', release_name]]):
            await self.middleware.call('chart.release.update', release_name,
                                       {'values': {}})

        return await self.middleware.call('chart.release.get_instance',
                                          release_name)

    @private
    def remove_old_upgraded_chart_version_copies(self, charts_path,
                                                 current_version):
        c_v = parse_version(current_version)
        for v_path in filter(lambda p: p != current_version,
                             os.listdir(charts_path)):
            if parse_version(v_path) > c_v:
                shutil.rmtree(path=os.path.join(charts_path, v_path),
                              ignore_errors=True)
Ejemplo n.º 17
0
class KubernetesDeploymentService(CRUDService):

    class Config:
        namespace = 'k8s.deployment'
        private = True

    @filterable
    async def query(self, filters, options):
        async with api_client() as (api, context):
            if len(filters) == 1 and len(filters[0]) == 3 and list(filters[0])[:2] == ['metadata.namespace', '=']:
                func = functools.partial(context['apps_api'].list_namespaced_deployment, namespace=filters[0][2])
            else:
                func = functools.partial(context['apps_api'].list_deployment_for_all_namespaces)

            deployments = [d.to_dict() for d in (await func()).items]
            if options['extra'].get('events'):
                events = await self.middleware.call(
                    'kubernetes.get_events_of_resource_type', 'Deployment', [d['metadata']['uid'] for d in deployments]
                )
                for deployment in deployments:
                    deployment['events'] = events[deployment['metadata']['uid']]

        return filter_list(deployments, filters, options)

    @accepts(
        Dict(
            'deployment_create',
            Str('namespace', required=True),
            Dict('body', additional_attrs=True, required=True),
            register=True
        )
    )
    async def do_create(self, data):
        async with api_client() as (api, context):
            try:
                await context['apps_api'].create_namespaced_deployment(namespace=data['namespace'], body=data['body'])
            except client.exceptions.ApiException as e:
                raise CallError(f'Unable to create deployment: {e}')
            else:
                return await self.query([
                    ['metadata.name', '=', data['body']['metadata']['name']],
                    ['metadata.namespace', '=', data['namespace']],
                ], {'get': True})

    @accepts(
        Str('name'),
        Ref('deployment_create'),
    )
    async def do_update(self, name, data):
        async with api_client() as (api, context):
            try:
                await context['apps_api'].patch_namespaced_deployment(
                    name, namespace=data['namespace'], body=data['body']
                )
            except client.exceptions.ApiException as e:
                raise CallError(f'Unable to patch {name} deployment: {e}')
            else:
                return await self.query([
                    ['metadata.name', '=', name],
                    ['metadata.namespace', '=', data['namespace']],
                ], {'get': True})

    @accepts(
        Str('name'),
        Dict(
            'deployment_delete_options',
            Str('namespace', required=True),
        )
    )
    async def do_delete(self, name, options):
        async with api_client() as (api, context):
            try:
                await context['apps_api'].delete_namespaced_deployment(name, options['namespace'])
            except client.exceptions.ApiException as e:
                raise CallError(f'Unable to delete deployment: {e}')
            else:
                return True
Ejemplo n.º 18
0
class KerberosKeytabService(TDBWrapCRUDService):
    class Config:
        datastore = 'directoryservice.kerberoskeytab'
        datastore_prefix = 'keytab_'
        namespace = 'kerberos.keytab'
        cli_namespace = 'directory_service.kerberos.keytab'

    ENTRY = Patch(
        'kerberos_keytab_create',
        'kerberos_keytab_entry',
        ('add', Int('id')),
    )

    @accepts(
        Dict('kerberos_keytab_create',
             Str('file', max_length=None),
             Str('name'),
             register=True))
    async def do_create(self, data):
        """
        Create a kerberos keytab. Uploaded keytab files will be merged with the system
        keytab under /etc/krb5.keytab.

        `file` b64encoded kerberos keytab
        `name` name for kerberos keytab
        """
        verrors = ValidationErrors()

        verrors.add_child('kerberos_principal_create', await
                          self._validate(data))

        if verrors:
            raise verrors

        id = await super().do_create(data)
        await self.middleware.call('etc.generate', 'kerberos')

        return await self._get_instance(id)

    @accepts(Int('id', required=True),
             Patch(
                 'kerberos_keytab_create',
                 'kerberos_keytab_update',
             ))
    async def do_update(self, id, data):
        """
        Update kerberos keytab by id.
        """
        old = await self._get_instance(id)
        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        verrors.add_child('kerberos_principal_update', await
                          self._validate(new))

        if verrors:
            raise verrors

        await super().do_update(id, new)
        await self.middleware.call('etc.generate', 'kerberos')

        return await self._get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete kerberos keytab by id, and force regeneration of
        system keytab.
        """
        await super().do_delete(id)
        if os.path.exists(keytab['SYSTEM'].value):
            os.remove(keytab['SYSTEM'].value)
        await self.middleware.call('etc.generate', 'kerberos')
        await self._cleanup_kerberos_principals()
        await self.middleware.call('kerberos.stop')
        try:
            await self.middleware.call('kerberos.start')
        except Exception as e:
            self.logger.debug(
                'Failed to start kerberos service after deleting keytab entry: %s'
                % e)

    @accepts(Dict(
        'keytab_data',
        Str('name', required=True),
    ))
    @returns(Ref('kerberos_keytab_entry'))
    @job(lock='upload_keytab', pipes=['input'], check_pipes=True)
    async def upload_keytab(self, job, data):
        """
        Upload a keytab file. This method expects the keytab file to be uploaded using
        the /_upload/ endpoint.
        """
        ktmem = io.BytesIO()
        await self.middleware.run_in_thread(shutil.copyfileobj,
                                            job.pipes.input.r, ktmem)
        b64kt = base64.b64encode(ktmem.getvalue())
        return await self.middleware.call('kerberos.keytab.create', {
            'name': data['name'],
            'file': b64kt.decode()
        })

    @private
    async def legacy_validate(self, keytab):
        err = await self._validate({'file': keytab})
        try:
            err.check()
        except Exception as e:
            raise CallError(e)

    @private
    async def _cleanup_kerberos_principals(self):
        principal_choices = await self.middleware.call(
            'kerberos.keytab.kerberos_principal_choices')
        ad = await self.middleware.call('activedirectory.config')
        ldap = await self.middleware.call('ldap.config')
        if ad['kerberos_principal'] and ad[
                'kerberos_principal'] not in principal_choices:
            await self.middleware.call('activedirectory.update',
                                       {'kerberos_principal': ''})
        if ldap['kerberos_principal'] and ldap[
                'kerberos_principal'] not in principal_choices:
            await self.middleware.call('ldap.update',
                                       {'kerberos_principal': ''})

    @private
    async def do_ktutil_list(self, data):
        kt = data.get("kt_name", keytab.SYSTEM.value)
        ktutil = await run(["klist", "-tek", kt], check=False)
        if ktutil.returncode != 0:
            raise CallError(ktutil.stderr.decode())
        ret = ktutil.stdout.decode().splitlines()
        if len(ret) < 4:
            return []

        return '\n'.join(ret[3:])

    @private
    async def _validate(self, data):
        """
        For now validation is limited to checking if we can resolve the hostnames
        configured for the kdc, admin_server, and kpasswd_server can be resolved
        by DNS, and if the realm can be resolved by DNS.
        """
        verrors = ValidationErrors()
        try:
            decoded = base64.b64decode(data['file'])
        except Exception as e:
            verrors.add(
                "kerberos.keytab_create",
                f"Keytab is a not a properly base64-encoded string: [{e}]")
            return verrors

        with open(keytab['TEST'].value, "wb") as f:
            f.write(decoded)

        try:
            await self.do_ktutil_list({"kt_name": keytab['TEST'].value})
        except CallError as e:
            verrors.add("kerberos.keytab_create",
                        f"Failed to validate keytab: [{e.errmsg}]")

        os.unlink(keytab['TEST'].value)

        return verrors

    @private
    async def _ktutil_list(self, keytab_file=keytab['SYSTEM'].value):
        keytab_entries = []
        try:
            kt_list_output = await self.do_ktutil_list(
                {"kt_name": keytab_file})
        except Exception as e:
            self.logger.warning("Failed to list kerberos keytab [%s]: %s",
                                keytab_file, e)
            kt_list_output = None

        if not kt_list_output:
            return keytab_entries

        for idx, line in enumerate(kt_list_output.splitlines()):
            fields = line.split()
            keytab_entries.append({
                'slot':
                idx + 1,
                'kvno':
                int(fields[0]),
                'principal':
                fields[3],
                'etype':
                fields[4][1:-1].strip('DEPRECATED:'),
                'etype_deprecated':
                fields[4][1:].startswith('DEPRECATED'),
                'date':
                time.strptime(fields[1], '%m/%d/%y'),
            })

        return keytab_entries

    @accepts()
    @returns(
        List('system-keytab',
             items=[
                 Dict('keytab-entry', Int('slot'), Int('kvno'),
                      Str('principal'), Str('etype'), Bool('etype_deprecated'),
                      Datetime('date'))
             ]))
    async def system_keytab_list(self):
        """
        Returns content of system keytab (/etc/krb5.keytab).
        """
        kt_list = await self._ktutil_list()
        parsed = []
        for entry in kt_list:
            entry['date'] = time.mktime(entry['date'])
            parsed.append(entry)

        return parsed

    @private
    async def _get_nonsamba_principals(self, keytab_list):
        """
        Generate list of Kerberos principals that are not the AD machine account.
        """
        ad = await self.middleware.call('activedirectory.config')
        pruned_list = []
        for i in keytab_list:
            if ad['netbiosname'].casefold() not in i['principal'].casefold():
                pruned_list.append(i)

        return pruned_list

    @private
    async def _generate_tmp_keytab(self):
        """
        Generate a temporary keytab to separate out the machine account keytab principal.
        ktutil copy returns 1 even if copy succeeds.
        """
        with contextlib.suppress(OSError):
            os.remove(keytab['SAMBA'].value)

        kt_copy = await Popen(['ktutil'],
                              stdout=subprocess.PIPE,
                              stderr=subprocess.PIPE,
                              stdin=subprocess.PIPE)
        output = await kt_copy.communicate(
            f'rkt {keytab.SYSTEM.value}\nwkt {keytab.SAMBA.value}\nq\n'.encode(
            ))
        if output[1]:
            raise CallError(
                f"failed to generate [{keytab['SAMBA'].value}]: {output[1].decode()}"
            )

    @private
    async def _prune_keytab_principals(self, to_delete=[]):
        """
        Delete all keytab entries from the tmp keytab that are not samba entries.
        The pruned keytab must be written to a new file to avoid duplication of
        entries.
        """
        rkt = f"rkt {keytab.SAMBA.value}"
        wkt = "wkt /var/db/system/samba4/samba_mit.keytab"
        delents = "\n".join(f"delent {x['slot']}" for x in reversed(to_delete))
        ktutil_remove = await Popen(['ktutil'],
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.PIPE,
                                    stdin=subprocess.PIPE)
        output = await ktutil_remove.communicate(
            f'{rkt}\n{delents}\n{wkt}\nq\n'.encode())
        if output[1]:
            raise CallError(output[1].decode())

        with contextlib.suppress(OSError):
            os.remove(keytab.SAMBA.value)

        os.rename("/var/db/system/samba4/samba_mit.keytab", keytab.SAMBA.value)

    @private
    async def kerberos_principal_choices(self):
        """
        Keytabs typically have multiple entries for same principal (differentiated by enc_type).
        Since the enctype isn't relevant in this situation, only show unique principal names.

        Return empty list if system keytab doesn't exist.
        """
        if not os.path.exists(keytab['SYSTEM'].value):
            return []

        try:
            keytab_list = await self._ktutil_list()
        except Exception as e:
            self.logger.trace(
                '"ktutil list" failed. Generating empty list of kerberos principal choices. Error: %s'
                % e)
            return []

        kerberos_principals = []
        for entry in keytab_list:
            if entry['principal'] not in kerberos_principals:
                kerberos_principals.append(entry['principal'])

        return sorted(kerberos_principals)

    @private
    async def has_nfs_principal(self):
        """
        This method checks whether the kerberos keytab contains an nfs service principal
        """
        principals = await self.kerberos_principal_choices()
        for p in principals:
            if p.startswith("nfs/"):
                return True

        return False

    @private
    async def store_samba_keytab(self):
        """
        Samba will automatically generate system keytab entries for the AD machine account
        (netbios name with '$' appended), and maintain them through machine account password changes.

        Copy the system keytab, parse it, and update the corresponding keytab entry in the freenas configuration
        database.

        The current system kerberos keytab and compare with a cached copy before overwriting it when a new
        keytab is generated through middleware 'etc.generate kerberos'.
        """
        if not os.path.exists(keytab['SYSTEM'].value):
            return False

        encoded_keytab = None
        keytab_list = await self._ktutil_list()
        items_to_remove = await self._get_nonsamba_principals(keytab_list)
        await self._generate_tmp_keytab()
        await self._prune_keytab_principals(items_to_remove)
        with open(keytab['SAMBA'].value, 'rb') as f:
            encoded_keytab = base64.b64encode(f.read())

        if not encoded_keytab:
            self.logger.debug(
                f"Failed to generate b64encoded version of {keytab['SAMBA'].name}"
            )
            return False

        keytab_file = encoded_keytab.decode()
        entry = await self.query([('name', '=', 'AD_MACHINE_ACCOUNT')])
        if not entry:
            await self.middleware.call('kerberos.keytab.direct_create', {
                'name': 'AD_MACHINE_ACCOUNT',
                'file': keytab_file
            })
        else:
            id = entry[0]['id']
            updated_entry = {'name': 'AD_MACHINE_ACCOUNT', 'file': keytab_file}
            await self.middleware.call('kerberos.keytab.direct_update', id,
                                       updated_entry)

        sambakt = await self.query([('name', '=', 'AD_MACHINE_ACCOUNT')])
        if sambakt:
            return sambakt[0]['id']

    @periodic(3600)
    @private
    async def check_updated_keytab(self):
        """
        Check mtime of current kerberos keytab. If it has changed since last check,
        assume that samba has updated it behind the scenes and that the configuration
        database needs to be updated to reflect the change.
        """
        if not await self.middleware.call('system.ready'):
            return

        old_mtime = 0
        ad_state = await self.middleware.call('activedirectory.get_state')
        if ad_state == 'DISABLED' or not os.path.exists(
                keytab['SYSTEM'].value):
            return

        if (await
                self.middleware.call("smb.get_smb_ha_mode")) in ("LEGACY",
                                                                 "CLUSTERED"):
            return

        if await self.middleware.call('cache.has_key', 'KEYTAB_MTIME'):
            old_mtime = await self.middleware.call('cache.get', 'KEYTAB_MTIME')

        new_mtime = (os.stat(keytab['SYSTEM'].value)).st_mtime
        if old_mtime == new_mtime:
            return

        ts = await self.middleware.call(
            'directoryservices.get_last_password_change')
        if ts['dbconfig'] == ts['secrets']:
            return

        self.logger.debug(
            "Machine account password has changed. Stored copies of "
            "kerberos keytab and directory services secrets will now "
            "be updated.")

        await self.middleware.call('directoryservices.backup_secrets')
        await self.store_samba_keytab()
        self.logger.trace('Updating stored AD machine account kerberos keytab')
        await self.middleware.call('cache.put', 'KEYTAB_MTIME',
                                   (os.stat(keytab['SYSTEM'].value)).st_mtime)
Ejemplo n.º 19
0
class KerberosService(TDBWrapConfigService):
    tdb_defaults = {"id": 1, "appdefaults_aux": "", "libdefaults_aux": ""}

    class Config:
        service = "kerberos"
        datastore = 'directoryservice.kerberossettings'
        datastore_prefix = "ks_"
        cli_namespace = "directory_service.kerberos.settings"

    @accepts(
        Dict('kerberos_settings_update',
             Str('appdefaults_aux', max_length=None),
             Str('libdefaults_aux', max_length=None),
             update=True))
    async def do_update(self, data):
        """
        `appdefaults_aux` add parameters to "appdefaults" section of the krb5.conf file.

        `libdefaults_aux` add parameters to "libdefaults" section of the krb5.conf file.
        """
        verrors = ValidationErrors()

        old = await self.config()
        new = old.copy()
        new.update(data)
        verrors.add_child(
            'kerberos_settings_update', await
            self._validate_appdefaults(new['appdefaults_aux']))
        verrors.add_child(
            'kerberos_settings_update', await
            self._validate_libdefaults(new['libdefaults_aux']))
        verrors.check()

        await super().do_update(data)

        await self.middleware.call('etc.generate', 'kerberos')
        return await self.config()

    @private
    @accepts(
        Dict('kerberos-options',
             Str('ccache',
                 enum=[x.name for x in krb5ccache],
                 default=krb5ccache.SYSTEM.name),
             register=True))
    async def _klist_test(self, data):
        """
        Returns false if there is not a TGT or if the TGT has expired.
        """
        krb_ccache = krb5ccache[data['ccache']]
        klist = await run(['klist', '-s', '-c', krb_ccache.value], check=False)
        if klist.returncode != 0:
            return False

        return True

    @private
    async def check_ticket(self):
        valid_ticket = await self._klist_test()
        if not valid_ticket:
            raise CallError("Kerberos ticket is required.", errno.ENOKEY)

        return

    @private
    async def _validate_param_type(self, data):
        supported_validation_types = [
            'boolean',
            'cctype',
            'etypes',
            'keytab',
        ]
        if data['ptype'] not in supported_validation_types:
            return

        if data['ptype'] == 'boolean':
            if data['value'].upper() not in ['YES', 'TRUE', 'NO', 'FALSE']:
                raise CallError(f'[{data["value"]}] is not boolean')

        if data['ptype'] == 'etypes':
            for e in data['value'].split(' '):
                try:
                    KRB_ETYPE(e)
                except Exception:
                    raise CallError(
                        f'[{e}] is not a supported encryption type')

        if data['ptype'] == 'cctype':
            available_types = ['FILE', 'MEMORY', 'DIR']
            if data['value'] not in available_types:
                raise CallError(
                    f'[{data["value"]}] is an unsupported cctype. '
                    f'Available types are {", ".join(available_types)}. '
                    'This parameter is case-sensitive')

        if data['ptype'] == 'keytab':
            try:
                keytab(data['value'])
            except Exception:
                raise CallError(
                    f'{data["value"]} is an unsupported keytab path')

    @private
    async def _validate_appdefaults(self, appdefaults):
        verrors = ValidationErrors()
        for line in appdefaults.splitlines():
            param = line.split('=')
            if len(param) == 2 and (param[1].strip())[0] != '{':
                validated_param = list(
                    filter(lambda x: param[0].strip() in (x.value)[0],
                           KRB_AppDefaults))

                if not validated_param:
                    verrors.add(
                        'kerberos_appdefaults',
                        f'{param[0]} is an invalid appdefaults parameter.')
                    continue

                try:
                    await self._validate_param_type({
                        'ptype': (validated_param[0]).value[1],
                        'value':
                        param[1].strip()
                    })
                except Exception as e:
                    verrors.add('kerberos_appdefaults',
                                f'{param[0]} has invalid value: {e.errmsg}.')
                    continue

        return verrors

    @private
    async def _validate_libdefaults(self, libdefaults):
        verrors = ValidationErrors()
        for line in libdefaults.splitlines():
            param = line.split('=')
            if len(param) == 2:
                validated_param = list(
                    filter(lambda x: param[0].strip() in (x.value)[0],
                           KRB_LibDefaults))

                if not validated_param:
                    verrors.add(
                        'kerberos_libdefaults',
                        f'{param[0]} is an invalid libdefaults parameter.')
                    continue

                try:
                    await self._validate_param_type({
                        'ptype': (validated_param[0]).value[1],
                        'value':
                        param[1].strip()
                    })
                except Exception as e:
                    verrors.add('kerberos_libdefaults',
                                f'{param[0]} has invalid value: {e.errmsg}.')

            else:
                verrors.add('kerberos_libdefaults',
                            f'{line} is an invalid libdefaults parameter.')

        return verrors

    @private
    @accepts(
        Dict(
            "get-kerberos-creds",
            Str("dstype", required=True, enum=[x.name for x in DSType]),
            OROperator(Dict('ad_parameters', Str('bindname'), Str('bindpw'),
                            Str('domainname'), Str('kerberos_principal')),
                       Dict('ldap_parameters', Str('binddn'), Str('bindpw'),
                            Int('kerberos_realm'), Str('kerberos_principal')),
                       name='conf',
                       required=True)))
    async def get_cred(self, data):
        '''
        Get kerberos cred from directory services config to use for `do_kinit`.
        '''
        conf = data.get('conf', {})
        if conf.get('kerberos_principal'):
            return {'kerberos_principal': conf['kerberos_principal']}

        verrors = ValidationErrors()
        dstype = DSType[data['dstype']]
        if dstype is DSType.DS_TYPE_ACTIVEDIRECTORY:
            for k in ['bindname', 'bindpw', 'domainname']:
                if not conf.get(k):
                    verrors.add(f'conf.{k}', 'Parameter is required.')

            verrors.check()
            return {
                'username': f'{conf["bindname"]}@{conf["domainname"].upper()}',
                'password': conf['bindpw']
            }

        for k in ['binddn', 'bindpw', 'kerberos_realm']:
            if not conf.get(k):
                verrors.add(f'conf.{k}', 'Parameter is required.')

        verrors.check()
        krb_realm = await self.middleware.call(
            'kerberos.realm.query', [('id', '=', conf['kerberos_realm'])],
            {'get': True})
        bind_cn = (conf['binddn'].split(','))[0].split("=")
        return {
            'username': f'{bind_cn[1]}@{krb_realm["realm"]}',
            'password': conf['bindpw']
        }

    @private
    @accepts(
        Dict(
            'do_kinit',
            OROperator(
                Dict('kerberos_username_password',
                     Str('username', required=True),
                     Str('password', required=True, private=True),
                     register=True),
                Dict(
                    'kerberos_keytab',
                    Str('kerberos_principal', required=True),
                ),
                name='krb5_cred',
                required=True,
            ),
            Patch(
                'kerberos-options',
                'kinit-options',
                ('add', {
                    'name': 'renewal_period',
                    'type': 'int',
                    'default': 7
                }),
            )))
    async def do_kinit(self, data):
        ccache = krb5ccache[data['kinit-options']['ccache']]
        cmd = [
            'kinit', '-r',
            str(data['kinit-options']['renewal_period']), '-c', ccache.value
        ]
        creds = data['krb5_cred']
        has_principal = 'kerberos_principal' in creds

        if has_principal:
            cmd.extend(['-k', creds['kerberos_principal']])
            kinit = await run(cmd, check=False)
            if kinit.returncode != 0:
                raise CallError(
                    f"kinit with principal [{creds['kerberos_principal']}] "
                    f"failed: {kinit.stderr.decode()}")
            return

        cmd.append(creds['username'])
        kinit = await Popen(cmd,
                            stdout=subprocess.PIPE,
                            stderr=subprocess.PIPE,
                            stdin=subprocess.PIPE)

        output = await kinit.communicate(input=creds['password'].encode())
        if kinit.returncode != 0:
            raise CallError(
                f"kinit with password failed: {output[1].decode()}")

        return True

    @private
    async def _kinit(self):
        """
        For now we only check for kerberos realms explicitly configured in AD and LDAP.
        """
        ad = await self.middleware.call('activedirectory.config')
        ldap = await self.middleware.call('ldap.config')
        await self.middleware.call('etc.generate', 'kerberos')
        payload = {}

        if ad['enable']:
            payload = {
                'dstype': DSType.DS_TYPE_ACTIVEDIRECTORY.name,
                'conf': {
                    'bindname': ad['bindname'],
                    'bindpw': ad['bindpw'],
                    'domainname': ad['domainname'],
                    'kerberos_principal': ad['kerberos_principal'],
                }
            }

        if ldap['enable'] and ldap['kerberos_realm']:
            payload = {
                'dstype': DSType.DS_TYPE_LDAP.name,
                'conf': {
                    'binddn': ldap['binddn'],
                    'bindpw': ldap['bindpw'],
                    'kerberos_realm': ldap['kerberos_realm'],
                    'kerberos_principal': ldap['kerberos_principal'],
                }
            }

        if not payload:
            return

        cred = await self.get_cred(payload)
        await self.do_kinit({'krb5_cred': cred})

    @private
    async def parse_klist(self, data):
        ad = data.get("ad")
        ldap = data.get("ldap")
        klistin = data.get("klistin")
        tickets = klistin.splitlines()
        default_principal = None
        tlen = len(tickets)

        if ad['enable']:
            dstype = DSType.DS_TYPE_ACTIVEDIRECTORY
        elif ldap['enable']:
            dstype = DSType.DS_TYPE_LDAP
        else:
            return {"ad_TGT": [], "ldap_TGT": []}

        parsed_klist = []
        for idx, e in enumerate(tickets):
            if e.startswith('Default'):
                default_principal = (e.split(':')[1]).strip()
            if e and e[0].isdigit():
                d = e.split("  ")
                issued = time.strptime(d[0], "%m/%d/%y %H:%M:%S")
                expires = time.strptime(d[1], "%m/%d/%y %H:%M:%S")
                client = default_principal
                server = d[2]
                flags = None
                etype = None
                next_two = [idx + 1, idx + 2]
                for i in next_two:
                    if i >= tlen:
                        break
                    if tickets[i][0].isdigit():
                        break
                    if tickets[i].startswith("\tEtype"):
                        etype = tickets[i].strip()
                        break
                    if tickets[i].startswith("\trenew"):
                        flags = tickets[i].split("Flags: ")[1]
                        continue

                    extra = tickets[i].split(", ", 1)
                    flags = extra[0].strip()
                    etype = extra[1].strip()

                parsed_klist.append({
                    'issued': issued,
                    'expires': expires,
                    'client': client,
                    'server': server,
                    'etype': etype,
                    'flags': flags,
                })

        return {
            "ad_TGT":
            parsed_klist if dstype == DSType.DS_TYPE_ACTIVEDIRECTORY else [],
            "ldap_TGT":
            parsed_klist if dstype == DSType.DS_TYPE_LDAP else [],
        }

    @private
    async def _get_cached_klist(self):
        """
        Try to get retrieve cached kerberos tgt info. If it hasn't been cached,
        perform klist, parse it, put it in cache, then return it.
        """
        if await self.middleware.call('cache.has_key', 'KRB_TGT_INFO'):
            return (await self.middleware.call('cache.get', 'KRB_TGT_INFO'))
        ad = await self.middleware.call('activedirectory.config')
        ldap = await self.middleware.call('ldap.config')
        ad_TGT = []
        ldap_TGT = []
        parsed_klist = {}
        if not ad['enable'] and not ldap['enable']:
            return {'ad_TGT': ad_TGT, 'ldap_TGT': ldap_TGT}
        if not ad['enable'] and not ldap['kerberos_realm']:
            return {'ad_TGT': ad_TGT, 'ldap_TGT': ldap_TGT}

        if not await self.status():
            await self.start()
        try:
            klist = await asyncio.wait_for(run(['klist', '-ef'],
                                               check=False,
                                               stdout=subprocess.PIPE),
                                           timeout=10.0)
        except Exception as e:
            await self.stop()
            raise CallError(
                "Attempt to list kerberos tickets failed with error: %s", e)

        if klist.returncode != 0:
            await self.stop()
            raise CallError(
                f'klist failed with error: {klist.stderr.decode()}')

        klist_output = klist.stdout.decode()

        parsed_klist = await self.parse_klist({
            "klistin": klist_output,
            "ad": ad,
            "ldap": ldap,
        })

        if parsed_klist['ad_TGT'] or parsed_klist['ldap_TGT']:
            await self.middleware.call('cache.put', 'KRB_TGT_INFO',
                                       parsed_klist)

        return parsed_klist

    @private
    async def renew(self):
        """
        Compare timestamp of cached TGT info with current timestamp. If we're within 5 minutes
        of expire time, renew the TGT via 'kinit -R'.
        """
        tgt_info = await self._get_cached_klist()
        ret = True

        must_renew = False
        must_reinit = False
        if not tgt_info['ad_TGT'] and not tgt_info['ldap_TGT']:
            must_reinit = True

        if tgt_info['ad_TGT']:
            permitted_buffer = datetime.timedelta(minutes=5)
            current_time = datetime.datetime.now()
            for entry in tgt_info['ad_TGT']:
                tgt_expiry_time = datetime.datetime.fromtimestamp(
                    time.mktime(entry['expires']))
                delta = tgt_expiry_time - current_time
                if datetime.timedelta(minutes=0) > delta:
                    must_reinit = True
                    break
                if permitted_buffer > delta:
                    must_renew = True
                    break

        if tgt_info['ldap_TGT']:
            permitted_buffer = datetime.timedelta(minutes=5)
            current_time = datetime.datetime.now()
            for entry in tgt_info['ldap_TGT']:
                tgt_expiry_time = datetime.datetime.fromtimestamp(
                    time.mktime(entry['expires']))
                delta = tgt_expiry_time - current_time
                if datetime.timedelta(minutes=0) > delta:
                    must_reinit = True
                    break
                if permitted_buffer > delta:
                    must_renew = True
                    break

        if must_renew and not must_reinit:
            try:
                kinit = await asyncio.wait_for(run(['kinit', '-R'],
                                                   check=False),
                                               timeout=15)
                if kinit.returncode != 0:
                    raise CallError(
                        f'kinit -R failed with error: {kinit.stderr.decode()}')
                self.logger.debug('Successfully renewed kerberos TGT')
                await self.middleware.call('cache.pop', 'KRB_TGT_INFO')
            except asyncio.TimeoutError:
                self.logger.debug(
                    'Attempt to renew kerberos TGT failed after 15 seconds.')

        if must_reinit:
            ret = await self.start()
            await self.middleware.call('cache.pop', 'KRB_TGT_INFO')

        return ret

    @private
    async def status(self):
        """
        Experience in production environments has indicated that klist can hang
        indefinitely. Fail if we hang for more than 10 seconds. This should force
        a kdestroy and new attempt to kinit (depending on why we are checking status).
        _klist_test will return false if there is not a TGT or if the TGT has expired.
        """
        try:
            ret = await asyncio.wait_for(self._klist_test(), timeout=10.0)
            return ret
        except asyncio.TimeoutError:
            self.logger.debug(
                'kerberos ticket status check timed out after 10 seconds.')
            return False

    @private
    @accepts(Ref('kerberos-options'))
    async def kdestroy(self, data):
        kdestroy = await run(
            ['kdestroy', '-c', krb5ccache[data['ccache']].value], check=False)
        if kdestroy.returncode != 0:
            raise CallError(
                f'kdestroy failed with error: {kdestroy.stderr.decode()}')

        return

    @private
    async def stop(self):
        await self.middleware.call('cache.pop', 'KRB_TGT_INFO')
        await self.kdestroy()
        return True

    @private
    async def start(self, realm=None, kinit_timeout=30):
        """
        kinit can hang because it depends on DNS. If it has not returned within
        30 seconds, it is safe to say that it has failed.
        """
        await self.middleware.call('etc.generate', 'kerberos')
        try:
            await asyncio.wait_for(self._kinit(), timeout=kinit_timeout)
        except asyncio.TimeoutError:
            raise CallError(
                f'Timed out hung kinit after [{kinit_timeout}] seconds')
Ejemplo n.º 20
0
class ChartReleaseService(Service):
    class Config:
        namespace = 'chart.release'

    @private
    async def retrieve_pod_with_containers(self, release_name):
        await self.middleware.call('kubernetes.validate_k8s_setup')
        release = await self.middleware.call('chart.release.query',
                                             [['id', '=', release_name]], {
                                                 'get': True,
                                                 'extra': {
                                                     'retrieve_resources': True
                                                 }
                                             })
        choices = {}
        for pod in release['resources']['pods']:
            choices[pod['metadata']['name']] = []
            for container in (pod['status']['container_statuses'] or []):
                choices[pod['metadata']['name']].append(container['name'])

        return choices

    @accepts(Str('release_name'))
    @returns(
        Dict(
            additional_attrs=True,
            example={'plex-d4559844b-zcgq9': ['plex']},
        ))
    async def pod_console_choices(self, release_name):
        """
        Returns choices for console access to a chart release.

        Output is a dictionary with names of pods as keys and containing names of containers which the pod
        comprises of.
        """
        return await self.retrieve_pod_with_containers(release_name)

    @accepts(Str('release_name'))
    @returns(
        Dict(
            additional_attrs=True,
            example={'plex-d4559844b-zcgq9': ['plex']},
        ))
    async def pod_logs_choices(self, release_name):
        """
        Returns choices for accessing logs of any container in any pod in a chart release.
        """
        return await self.retrieve_pod_with_containers(release_name)

    @private
    async def validate_pod_log_args(self, release_name, pod_name,
                                    container_name):
        choices = await self.pod_logs_choices(release_name)
        if pod_name not in choices:
            raise CallError(f'Unable to locate {pod_name!r} pod.',
                            errno=errno.ENOENT)
        elif container_name not in choices[pod_name]:
            raise CallError(
                f'Unable to locate {container_name!r} container in {pod_name!r} pod.',
                errno=errno.ENOENT)

    @accepts(Str('release_name'),
             Dict(
                 'options',
                 Int('limit_bytes',
                     default=None,
                     null=True,
                     validators=[Range(min=1)]),
                 Int('tail_lines',
                     default=500,
                     validators=[Range(min=1)],
                     null=True),
                 Str('pod_name', required=True, empty=False),
                 Str('container_name', required=True, empty=False),
             ))
    @returns()
    @job(lock='chart_release_logs', pipes=['output'])
    def pod_logs(self, job, release_name, options):
        """
        Export logs of `options.container_name` container in `options.pod_name` pod in `release_name` chart release.

        `options.tail_lines` is an option to select how many lines of logs to retrieve for the said container. It
        defaults to 500. If set to `null`, it will retrieve complete logs of the container.

        `options.limit_bytes` is an option to select how many bytes to retrieve from the tail lines selected. If set
        to null ( which is the default ), it will not limit the bytes returned. To clarify, `options.tail_lines`
        is applied first and the required number of lines are retrieved and then `options.limit_bytes` is applied.

        Please refer to websocket documentation for downloading the file.
        """
        self.middleware.call_sync('chart.release.validate_pod_log_args',
                                  release_name, options['pod_name'],
                                  options['container_name'])

        logs = self.middleware.call_sync('k8s.pod.get_logs',
                                         options['pod_name'],
                                         options['container_name'],
                                         get_namespace(release_name),
                                         options['tail_lines'],
                                         options['limit_bytes'])
        job.pipes.output.w.write((logs or '').encode())

    @accepts()
    @returns(Dict(additional_attrs=True))
    async def nic_choices(self):
        """
        Available choices for NIC which can be added to a pod in a chart release.
        """
        return await self.middleware.call('interface.choices')

    @accepts()
    @returns(List(items=[Int('used_port')]))
    async def used_ports(self):
        """
        Returns ports in use by applications.
        """
        return sorted(
            list(
                set({
                    port['port']
                    for chart_release in await self.middleware.call(
                        'chart.release.query')
                    for port in chart_release['used_ports']
                })))

    @accepts()
    @returns(List(items=[Ref('certificate_entry')]))
    async def certificate_choices(self):
        """
        Returns certificates which can be used by applications.
        """
        return await self.middleware.call(
            'certificate.query',
            [['revoked', '=', False], ['cert_type_CSR', '=', False],
             ['parsed', '=', True]])

    @accepts()
    @returns(List(items=[Ref('certificateauthority_entry')]))
    async def certificate_authority_choices(self):
        """
        Returns certificate authorities which can be used by applications.
        """
        return await self.middleware.call(
            'certificateauthority.query',
            [['revoked', '=', False], ['parsed', '=', True]])

    @private
    async def retrieve_pv_pvc_mapping(self, release_name):
        chart_release = await self.middleware.call(
            'chart.release.query', [['id', '=', release_name]], {
                'get': True,
                'extra': {
                    'retrieve_resources': True
                }
            })
        return await self.retrieve_pv_pvc_mapping_internal(chart_release)

    @private
    async def retrieve_pv_pvc_mapping_internal(self, chart_release):
        mapping = {}
        release_vol_ds = os.path.join(chart_release['dataset'], 'volumes')
        zfs_volumes = {
            zv['metadata']['name']: zv
            for zv in await self.middleware.call(
                'k8s.zv.query', [['spec.poolName', '=', release_vol_ds]])
        }

        for pv in chart_release['resources']['persistent_volumes']:
            claim_name = pv['spec'].get('claim_ref', {}).get('name')
            if claim_name:
                csi_spec = pv['spec']['csi']
                volumes_ds = csi_spec['volume_attributes'][
                    'openebs.io/poolname']
                if (os.path.join(chart_release['dataset'],
                                 'volumes') != volumes_ds
                        or csi_spec['volume_handle'] not in zfs_volumes):
                    # We are only going to backup/restore pvc's which were consuming
                    # their respective storage class and we have related zfs volume present
                    continue

                pv_name = pv['metadata']['name']
                mapping[claim_name] = {
                    'name': pv_name,
                    'pv_details': pv,
                    'dataset': os.path.join(volumes_ds,
                                            csi_spec['volume_handle']),
                    'zv_details': zfs_volumes[csi_spec['volume_handle']],
                }
        return mapping

    @private
    async def create_update_storage_class_for_chart_release(
            self, release_name, volumes_path):
        storage_class_name = get_storage_class_name(release_name)
        storage_class = await self.middleware.call(
            'k8s.storage_class.retrieve_storage_class_manifest')
        storage_class['metadata']['name'] = storage_class_name
        storage_class['parameters']['poolname'] = volumes_path
        if await self.middleware.call(
                'k8s.storage_class.query',
            [['metadata.name', '=', storage_class_name]]):
            await self.middleware.call('k8s.storage_class.update',
                                       storage_class_name, storage_class)
        else:
            await self.middleware.call('k8s.storage_class.create',
                                       storage_class)

    @accepts(Str('release_name'))
    @returns(
        Dict(
            Int('available', required=True), Int('desired', required=True),
            Str('status',
                required=True,
                enum=['ACTIVE', 'DEPLOYING', 'STOPPED'])))
    async def pod_status(self, release_name):
        """
        Retrieve available/desired pods status for a chart release and it's current state.
        """
        status = {'available': 0, 'desired': 0}
        for resource in (Resources.DEPLOYMENT, Resources.STATEFULSET):
            for r_data in await self.middleware.call(
                    f'k8s.{resource.name.lower()}.query',
                [['metadata.namespace', '=',
                  get_namespace(release_name)]],
            ):
                # Detail about ready_replicas/replicas
                # https://stackoverflow.com/questions/66317251/couldnt-understand-availablereplicas-
                # readyreplicas-unavailablereplicas-in-dep
                status.update({
                    'available': (r_data['status']['ready_replicas'] or 0),
                    'desired': (r_data['status']['replicas'] or 0),
                })
        pod_diff = status['available'] - status['desired']
        r_status = 'ACTIVE'
        if pod_diff == 0 and status['desired'] == 0:
            r_status = 'STOPPED'
        elif pod_diff < 0:
            r_status = 'DEPLOYING'
        return {
            'status': r_status,
            **status,
        }
Ejemplo n.º 21
0
class FilesystemService(Service):

    @accepts(Str('path', required=True), Ref('query-filters'), Ref('query-options'))
    def listdir(self, path, filters=None, options=None):
        """
        Get the contents of a directory.

        Each entry of the list consists of:
          name(str): name of the file
          path(str): absolute path of the entry
          realpath(str): absolute real path of the entry (if SYMLINK)
          type(str): DIRECTORY | FILESYSTEM | SYMLINK | OTHER
          size(int): size of the entry
          mode(int): file mode/permission
          uid(int): user id of entry owner
          gid(int): group id of entry onwer
        """
        if not os.path.exists(path):
            raise CallError(f'Directory {path} does not exist', errno.ENOENT)

        if not os.path.isdir(path):
            raise CallError(f'Path {path} is not a directory', errno.ENOTDIR)

        rv = []
        for entry in os.scandir(path):
            if entry.is_dir():
                etype = 'DIRECTORY'
            elif entry.is_file():
                etype = 'FILE'
            elif entry.is_symlink():
                etype = 'SYMLINK'
            else:
                etype = 'OTHER'

            data = {
                'name': entry.name,
                'path': entry.path,
                'realpath': os.path.realpath(entry.path) if etype == 'SYMLINK' else entry.path,
                'type': etype,
            }
            try:
                stat = entry.stat()
                data.update({
                    'size': stat.st_size,
                    'mode': stat.st_mode,
                    'uid': stat.st_uid,
                    'gid': stat.st_gid,
                })
            except FileNotFoundError:
                data.update({'size': None, 'mode': None, 'uid': None, 'gid': None})
            rv.append(data)
        return filter_list(rv, filters=filters or [], options=options or {})

    @accepts(Str('path'))
    def stat(self, path):
        """
        Return the filesystem stat(2) for a given `path`.
        """
        try:
            stat = os.stat(path, follow_symlinks=False)
        except FileNotFoundError:
            raise CallError(f'Path {path} not found', errno.ENOENT)

        stat = {
            'size': stat.st_size,
            'mode': stat.st_mode,
            'uid': stat.st_uid,
            'gid': stat.st_gid,
            'atime': stat.st_atime,
            'mtime': stat.st_mtime,
            'ctime': stat.st_ctime,
            'dev': stat.st_dev,
            'inode': stat.st_ino,
            'nlink': stat.st_nlink,
        }

        try:
            stat['user'] = pwd.getpwuid(stat['uid']).pw_name
        except KeyError:
            stat['user'] = None

        try:
            stat['group'] = grp.getgrgid(stat['gid']).gr_name
        except KeyError:
            stat['group'] = None

        stat['acl'] = False if self.middleware.call_sync('filesystem.acl_is_trivial', path) else True

        return stat

    @private
    @accepts(
        Str('path'),
        Str('content'),
        Dict(
            'options',
            Bool('append', default=False),
            Int('mode'),
        ),
    )
    def file_receive(self, path, content, options=None):
        """
        Simplified file receiving method for small files.

        `content` must be a base 64 encoded file content.

        DISCLAIMER: DO NOT USE THIS FOR BIG FILES (> 500KB).
        """
        options = options or {}
        dirname = os.path.dirname(path)
        if not os.path.exists(dirname):
            os.makedirs(dirname)
        if options.get('append'):
            openmode = 'ab'
        else:
            openmode = 'wb+'
        with open(path, openmode) as f:
            f.write(binascii.a2b_base64(content))
        mode = options.get('mode')
        if mode:
            os.chmod(path, mode)
        return True

    @private
    @accepts(
        Str('path'),
        Dict(
            'options',
            Int('offset'),
            Int('maxlen'),
        ),
    )
    def file_get_contents(self, path, options=None):
        """
        Get contents of a file `path` in base64 encode.

        DISCLAIMER: DO NOT USE THIS FOR BIG FILES (> 500KB).
        """
        options = options or {}
        if not os.path.exists(path):
            return None
        with open(path, 'rb') as f:
            if options.get('offset'):
                f.seek(options['offset'])
            data = binascii.b2a_base64(f.read(options.get('maxlen'))).decode().strip()
        return data

    @accepts(Str('path'))
    @job(pipes=["output"])
    async def get(self, job, path):
        """
        Job to get contents of `path`.
        """

        if not os.path.isfile(path):
            raise CallError(f'{path} is not a file')

        with open(path, 'rb') as f:
            await self.middleware.run_in_thread(shutil.copyfileobj, f, job.pipes.output.w)

    @accepts(
        Str('path'),
        Dict(
            'options',
            Bool('append', default=False),
            Int('mode'),
        ),
    )
    @job(pipes=["input"])
    async def put(self, job, path, options=None):
        """
        Job to put contents to `path`.
        """
        options = options or {}
        dirname = os.path.dirname(path)
        if not os.path.exists(dirname):
            os.makedirs(dirname)
        if options.get('append'):
            openmode = 'ab'
        else:
            openmode = 'wb+'

        with open(path, openmode) as f:
            await self.middleware.run_in_thread(shutil.copyfileobj, job.pipes.input.r, f)

        mode = options.get('mode')
        if mode:
            os.chmod(path, mode)
        return True

    @accepts(Str('path'))
    def statfs(self, path):
        """
        Return stats from the filesystem of a given path.

        Raises:
            CallError(ENOENT) - Path not found
        """
        try:
            statfs = bsd.statfs(path)
        except FileNotFoundError:
            raise CallError('Path not found.', errno.ENOENT)
        return {
            **statfs.__getstate__(),
            'total_bytes': statfs.total_blocks * statfs.blocksize,
            'free_bytes': statfs.free_blocks * statfs.blocksize,
            'avail_bytes': statfs.avail_blocks * statfs.blocksize,
        }

    def __convert_to_basic_permset(self, permset):
        """
        Convert "advanced" ACL permset format to basic format using
        bitwise operation and constants defined in py-bsd/bsd/acl.pyx,
        py-bsd/defs.pxd and acl.h.

        If the advanced ACL can't be converted without losing
        information, we return 'OTHER'.

        Reverse process converts the constant's value to a dictionary
        using a bitwise operation.
        """
        perm = 0
        for k, v, in permset.items():
            if v:
                perm |= acl.NFS4Perm[k]

        try:
            SimplePerm = (acl.NFS4BasicPermset(perm)).name
        except Exception:
            SimplePerm = 'OTHER'

        return SimplePerm

    def __convert_to_basic_flagset(self, flagset):
        flags = 0
        for k, v, in flagset.items():
            if k == "INHERITED":
                continue
            if v:
                flags |= acl.NFS4Flag[k]

        try:
            SimpleFlag = (acl.NFS4BasicFlagset(flags)).name
        except Exception:
            SimpleFlag = 'OTHER'

        return SimpleFlag

    def __convert_to_adv_permset(self, basic_perm):
        permset = {}
        perm_mask = acl.NFS4BasicPermset[basic_perm].value
        for name, member in acl.NFS4Perm.__members__.items():
            if perm_mask & member.value:
                permset.update({name: True})
            else:
                permset.update({name: False})

        return permset

    def __convert_to_adv_flagset(self, basic_flag):
        flagset = {}
        flag_mask = acl.NFS4BasicFlagset[basic_flag].value
        for name, member in acl.NFS4Flag.__members__.items():
            if flag_mask & member.value:
                flagset.update({name: True})
            else:
                flagset.update({name: False})

        return flagset

    @accepts(Str('path'))
    def acl_is_trivial(self, path):
        """
        ACL is trivial if it can be fully expressed as a file mode without losing
        any access rules. This is intended to be used as a check before allowing
        users to chmod() through the webui
        """
        if not os.path.exists(path):
            raise CallError('Path not found.', errno.ENOENT)
        a = acl.ACL(file=path)
        return a.is_trivial

    @accepts(
        Dict(
            'filesystem_ownership',
            Str('path', required=True),
            Int('uid', null=True, default=None),
            Int('gid', null=True, default=None),
            Dict(
                'options',
                Bool('recursive', default=False),
                Bool('traverse', default=False)
            )
        )
    )
    def chown(self, data):
        """
        Change owner or group of file at `path`.

        `uid` and `gid` specify new owner of the file. If either
        key is absent or None, then existing value on the file is not
        changed.

        `recursive` performs action recursively, but does
        not traverse filesystem mount points.

        If `traverse` and `recursive` are specified, then the chown
        operation will traverse filesystem mount points.
        """
        uid = -1 if data['uid'] is None else data['uid']
        gid = -1 if data['gid'] is None else data['gid']
        options = data['options']

        if not options['recursive']:
            os.chown(data['path'], uid, gid)
        else:
            winacl = subprocess.run([
                '/usr/local/bin/winacl',
                '-a', 'chown',
                '-O', str(uid), '-G', str(gid),
                '-rx' if options['traverse'] else '-r',
                '-p', data['path']], check=False, capture_output=True
            )
            if winacl.returncode != 0:
                raise CallError(f"Failed to recursively change ownership: {winacl.stderr.decode()}")

    @accepts(
        Dict(
            'filesystem_permission',
            Str('path', required=True),
            UnixPerm('mode', null=True),
            Int('uid', null=True, default=None),
            Int('gid', null=True, default=None),
            Dict(
                'options',
                Bool('stripacl', default=False),
                Bool('recursive', default=False),
                Bool('traverse', default=False),
            )
        )
    )
    @job(lock=lambda args: f'setperm:{args[0]}')
    def setperm(self, job, data):
        """
        Remove extended ACL from specified path.

        If `mode` is specified then the mode will be applied to the
        path and files and subdirectories depending on which `options` are
        selected. Mode should be formatted as string representation of octal
        permissions bits.

        `stripacl` setperm will fail if an extended ACL is present on `path`,
        unless `stripacl` is set to True.

        `recursive` remove ACLs recursively, but do not traverse dataset
        boundaries.

        `traverse` remove ACLs from child datasets.

        If no `mode` is set, and `stripacl` is True, then non-trivial ACLs
        will be converted to trivial ACLs. An ACL is trivial if it can be
        expressed as a file mode without losing any access rules.

        """
        options = data['options']
        mode = data.get('mode', None)

        uid = -1 if data['uid'] is None else data['uid']
        gid = -1 if data['gid'] is None else data['gid']

        if not os.path.exists(data['path']):
            raise CallError('Path not found.', errno.ENOENT)

        acl_is_trivial = self.middleware.call_sync('filesystem.acl_is_trivial', data['path'])
        if not acl_is_trivial and not options['stripacl']:
            raise CallError(
                f'Non-trivial ACL present on [{data["path"]}]. Option "stripacl" required to change permission.'
            )

        if mode is not None:
            mode = int(mode, 8)

        a = acl.ACL(file=data['path'])
        a.strip()
        a.apply(data['path'])

        if mode:
            os.chmod(data['path'], mode)

        if uid or gid:
            os.chown(data['path'], uid, gid)

        if not options['recursive']:
            return

        winacl = subprocess.run([
            '/usr/local/bin/winacl',
            '-a', 'clone' if mode else 'strip',
            '-O', str(uid), '-G', str(gid),
            '-rx' if options['traverse'] else '-r',
            '-p', data['path']], check=False, capture_output=True
        )
        if winacl.returncode != 0:
            raise CallError(f"Failed to recursively apply ACL: {winacl.stderr.decode()}")

    @accepts(
        Str('path'),
        Bool('simplified', default=True),
    )
    def getacl(self, path, simplified=True):
        """
        Return ACL of a given path.

        Simplified returns a shortened form of the ACL permset and flags

        `TRAVERSE` sufficient rights to traverse a directory, but not read contents.

        `READ` sufficient rights to traverse a directory, and read file contents.

        `MODIFIY` sufficient rights to traverse, read, write, and modify a file. Equivalent to modify_set.

        `FULL_CONTROL` all permissions.

        If the permisssions do not fit within one of the pre-defined simplified permissions types, then
        the full ACL entry will be returned.

        In all cases we replace USER_OBJ, GROUP_OBJ, and EVERYONE with owner@, group@, everyone@ for
        consistency with getfacl and setfacl. If one of aforementioned special tags is used, 'id' must
        be set to None.

        An inheriting empty everyone@ ACE is appended to non-trivial ACLs in order to enforce Windows
        expectations regarding permissions inheritance. This entry is removed from NT ACL returned
        to SMB clients when 'ixnas' samba VFS module is enabled. We also remove it here to avoid confusion.
        """
        if not os.path.exists(path):
            raise CallError('Path not found.', errno.ENOENT)

        stat = os.stat(path)

        a = acl.ACL(file=path)
        fs_acl = a.__getstate__()

        if not simplified:
            advanced_acl = []
            for entry in fs_acl:
                ace = {
                    'tag': (acl.ACLWho[entry['tag']]).value,
                    'id': entry['id'],
                    'type': entry['type'],
                    'perms': entry['perms'],
                    'flags': entry['flags'],
                }
                if ace['tag'] == 'everyone@' and self.__convert_to_basic_permset(ace['perms']) == 'NOPERMS':
                    continue

                advanced_acl.append(ace)

            return {'uid': stat.st_uid, 'gid': stat.st_gid, 'acl': advanced_acl}

        if simplified:
            simple_acl = []
            for entry in fs_acl:
                ace = {
                    'tag': (acl.ACLWho[entry['tag']]).value,
                    'id': entry['id'],
                    'type': entry['type'],
                    'perms': {'BASIC': self.__convert_to_basic_permset(entry['perms'])},
                    'flags': {'BASIC': self.__convert_to_basic_flagset(entry['flags'])},
                }
                if ace['tag'] == 'everyone@' and ace['perms']['BASIC'] == 'NOPERMS':
                    continue

                for key in ['perms', 'flags']:
                    if ace[key]['BASIC'] == 'OTHER':
                        ace[key] = entry[key]

                simple_acl.append(ace)

            return {'uid': stat.st_uid, 'gid': stat.st_gid, 'acl': simple_acl}

    @accepts(
        Dict(
            'filesystem_acl',
            Str('path', required=True),
            List(
                'dacl',
                items=[
                    Dict(
                        'aclentry',
                        Str('tag', enum=['owner@', 'group@', 'everyone@', 'USER', 'GROUP']),
                        Int('id', null=True),
                        Str('type', enum=['ALLOW', 'DENY']),
                        Dict(
                            'perms',
                            Bool('READ_DATA'),
                            Bool('WRITE_DATA'),
                            Bool('APPEND_DATA'),
                            Bool('READ_NAMED_ATTRS'),
                            Bool('WRITE_NAMED_ATTRS'),
                            Bool('EXECUTE'),
                            Bool('DELETE_CHILD'),
                            Bool('READ_ATTRIBUTES'),
                            Bool('WRITE_ATTRIBUTES'),
                            Bool('DELETE'),
                            Bool('READ_ACL'),
                            Bool('WRITE_ACL'),
                            Bool('WRITE_OWNER'),
                            Bool('SYNCHRONIZE'),
                            Str('BASIC', enum=['FULL_CONTROL', 'MODIFY', 'READ', 'TRAVERSE']),
                        ),
                        Dict(
                            'flags',
                            Bool('FILE_INHERIT'),
                            Bool('DIRECTORY_INHERIT'),
                            Bool('NO_PROPAGATE_INHERIT'),
                            Bool('INHERIT_ONLY'),
                            Bool('INHERITED'),
                            Str('BASIC', enum=['INHERIT', 'NOINHERIT']),
                        ),
                    )
                ],
                default=[]
            ),
            Int('uid', null=True, default=None),
            Int('gid', null=True, default=None),
            Dict(
                'options',
                Bool('stripacl', default=False),
                Bool('recursive', default=False),
                Bool('traverse', default=False),
            )
        )
    )
    @job(lock=lambda args: f'setacl:{args[0]}')
    def setacl(self, job, data):
        """
        Set ACL of a given path. Takes the following parameters:
        `path` full path to directory or file.

        `dacl` "simplified" ACL here or a full ACL.

        `uid` the desired UID of the file user. If set to -1, then UID is not changed.

        `gid` the desired GID of the file group. If set to -1 then GID is not changed.

        `recursive` apply the ACL recursively

        `traverse` traverse filestem boundaries (ZFS datasets)

        `strip` convert ACL to trivial. ACL is trivial if it can be expressed as a file mode without
        losing any access rules.

        In all cases we replace USER_OBJ, GROUP_OBJ, and EVERYONE with owner@, group@, everyone@ for
        consistency with getfacl and setfacl. If one of aforementioned special tags is used, 'id' must
        be set to None.

        An inheriting empty everyone@ ACE is appended to non-trivial ACLs in order to enforce Windows
        expectations regarding permissions inheritance. This entry is removed from NT ACL returned
        to SMB clients when 'ixnas' samba VFS module is enabled.
        """
        options = data['options']
        dacl = data.get('dacl', [])
        if not os.path.exists(data['path']):
            raise CallError('Path not found.', errno.ENOENT)

        if dacl and options['stripacl']:
            raise CallError('Setting ACL and stripping ACL are not permitted simultaneously.', errno.EINVAL)

        uid = -1 if data.get('uid', None) is None else data['uid']
        gid = -1 if data.get('gid', None) is None else data['gid']
        if options['stripacl']:
            a = acl.ACL(file=data['path'])
            a.strip()
            a.apply(data['path'])
        else:
            cleaned_acl = []
            lockace_is_present = False
            for entry in dacl:
                if entry['perms'].get('BASIC') == 'OTHER' or entry['flags'].get('BASIC') == 'OTHER':
                    raise CallError('Unable to apply simplified ACL due to OTHER entry. Use full ACL.', errno.EINVAL)
                ace = {
                    'tag': (acl.ACLWho(entry['tag'])).name,
                    'id': entry['id'],
                    'type': entry['type'],
                    'perms': self.__convert_to_adv_permset(entry['perms']['BASIC']) if 'BASIC' in entry['perms'] else entry['perms'],
                    'flags': self.__convert_to_adv_flagset(entry['flags']['BASIC']) if 'BASIC' in entry['flags'] else entry['flags'],
                }
                if ace['tag'] == 'EVERYONE' and self.__convert_to_basic_permset(ace['perms']) == 'NOPERMS':
                    lockace_is_present = True
                cleaned_acl.append(ace)
            if not lockace_is_present:
                locking_ace = {
                    'tag': 'EVERYONE',
                    'id': None,
                    'type': 'ALLOW',
                    'perms': self.__convert_to_adv_permset('NOPERMS'),
                    'flags': self.__convert_to_adv_flagset('INHERIT')
                }
                cleaned_acl.append(locking_ace)

            a = acl.ACL()
            a.__setstate__(cleaned_acl)
            a.apply(data['path'])

        if not options['recursive']:
            return True

        winacl = subprocess.run([
            '/usr/local/bin/winacl',
            '-a', 'clone', '-O', str(uid), '-G', str(gid),
            '-rx' if options['traverse'] else '-r',
            '-p', data['path']], check=False, capture_output=True
        )
        if winacl.returncode != 0:
            raise CallError(f"Failed to recursively apply ACL: {winacl.stderr.decode()}")
Ejemplo n.º 22
0
class AlertServiceService(CRUDService):
    class Config:
        datastore = "system.alertservice"
        datastore_extend = "alertservice._extend"
        datastore_order_by = ["name"]

    @accepts()
    async def list_types(self):
        """
        List all types of supported Alert services which can be configured with the system.
        """
        return [{
            "name": name,
            "title": factory.title,
        } for name, factory in sorted(ALERT_SERVICES_FACTORIES.items(),
                                      key=lambda i: i[1].title.lower())]

    @private
    async def _extend(self, service):
        try:
            service["type__title"] = ALERT_SERVICES_FACTORIES[
                service["type"]].title
        except KeyError:
            service["type__title"] = "<Unknown>"

        return service

    @private
    async def _compress(self, service):
        service.pop("type__title")

        return service

    @private
    async def _validate(self, service, schema_name):
        verrors = ValidationErrors()

        factory = ALERT_SERVICES_FACTORIES.get(service["type"])
        if factory is None:
            verrors.add(f"{schema_name}.type", "This field has invalid value")
            raise verrors

        verrors.add_child(
            f"{schema_name}.attributes",
            validate_attributes(list(factory.schema.attrs.values()), service))

        if verrors:
            raise verrors

    @accepts(
        Dict(
            "alert_service_create",
            Str("name"),
            Str("type", required=True),
            Dict("attributes", additional_attrs=True),
            Str("level", enum=list(AlertLevel.__members__)),
            Bool("enabled"),
            register=True,
        ))
    async def do_create(self, data):
        """
        Create an Alert Service of specified `type`.

        If `enabled`, it sends alerts to the configured `type` of Alert Service.

        .. examples(websocket)::

          Create an Alert Service of Mail `type`

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "alertservice.create",
                "params": [{
                    "name": "Test Email Alert",
                    "enabled": true,
                    "type": "Mail",
                    "attributes": {
                        "email": "*****@*****.**"
                    },
                    "settings": {
                        "VolumeVersion": "HOURLY"
                    }
                }]
            }
        """
        await self._validate(data, "alert_service_create")

        data["id"] = await self.middleware.call("datastore.insert",
                                                self._config.datastore, data)

        await self._extend(data)

        return data

    @accepts(Int("id"),
             Patch(
                 "alert_service_create",
                 "alert_service_update",
                 ("attr", {
                     "update": True
                 }),
             ))
    async def do_update(self, id, data):
        """
        Update Alert Service of `id`.
        """
        old = await self.middleware.call(
            "datastore.query", self._config.datastore, [("id", "=", id)], {
                "extend": self._config.datastore_extend,
                "get": True
            })

        new = old.copy()
        new.update(data)

        await self._validate(new, "alert_service_update")

        await self._compress(new)

        await self.middleware.call("datastore.update", self._config.datastore,
                                   id, new)

        await self._extend(new)

        return new

    @accepts(Int("id"))
    async def do_delete(self, id):
        """
        Delete Alert Service of `id`.
        """
        return await self.middleware.call("datastore.delete",
                                          self._config.datastore, id)

    @accepts(Ref('alert_service_create'))
    async def test(self, data):
        """
        Send a test alert using `type` of Alert Service.

        .. examples(websocket)::

          Send a test alert using Alert Service of Mail `type`.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "alertservice.test",
                "params": [{
                    "name": "Test Email Alert",
                    "enabled": true,
                    "type": "Mail",
                    "attributes": {
                        "email": "*****@*****.**"
                    },
                    "settings": {}
                }]
            }
        """
        await self._validate(data, "alert_service_test")

        factory = ALERT_SERVICES_FACTORIES.get(data["type"])
        if factory is None:
            self.logger.error("Alert service %r does not exist", data["type"])
            return False

        try:
            alert_service = factory(self.middleware, data["attributes"])
        except Exception:
            self.logger.error(
                "Error creating alert service %r with parameters=%r",
                data["type"],
                data["attributes"],
                exc_info=True)
            return False

        master_node = "A"
        if not await self.middleware.call("system.is_freenas"):
            if await self.middleware.call("failover.licensed"):
                master_node = await self.middleware.call("failover.node")

        test_alert = Alert(
            TestAlertClass,
            node=master_node,
            datetime=datetime.utcnow(),
            last_occurrence=datetime.utcnow(),
            _uuid="test",
        )

        try:
            await alert_service.send([test_alert], [], [test_alert])
        except Exception:
            self.logger.error("Error in alert service %r",
                              data["type"],
                              exc_info=True)
            return False

        return True
Ejemplo n.º 23
0
class ServiceService(CRUDService):

    SERVICE_DEFS = {
        's3': ServiceDefinition('minio', '/var/run/minio.pid'),
        'ssh': ServiceDefinition('sshd', '/var/run/sshd.pid'),
        'rsync': ServiceDefinition('rsync', '/var/run/rsyncd.pid'),
        'nfs': ServiceDefinition('nfsd', None),
        'afp': ServiceDefinition('netatalk', None),
        'cifs': ServiceDefinition('smbd', '/var/run/samba4/smbd.pid'),
        'dynamicdns': ServiceDefinition('inadyn', None),
        'snmp': ServiceDefinition('snmpd', '/var/run/net_snmpd.pid'),
        'ftp': ServiceDefinition('proftpd', '/var/run/proftpd.pid'),
        'tftp': ServiceDefinition('inetd', '/var/run/inetd.pid'),
        'iscsitarget': ServiceDefinition('ctld', '/var/run/ctld.pid'),
        'lldp': ServiceDefinition('ladvd', '/var/run/ladvd.pid'),
        'ups': ServiceDefinition('upsd', '/var/db/nut/upsd.pid'),
        'upsmon': ServiceDefinition('upsmon', '/var/db/nut/upsmon.pid'),
        'smartd': ServiceDefinition('smartd', 'smartd-daemon', '/var/run/smartd-daemon.pid'),
        'webshell': ServiceDefinition(None, '/var/run/webshell.pid'),
        'webdav': ServiceDefinition('httpd', '/var/run/httpd.pid'),
        'netdata': ServiceDefinition('netdata', '/var/db/netdata/netdata.pid')
    }

    @filterable
    async def query(self, filters=None, options=None):
        if options is None:
            options = {}
        options['prefix'] = 'srv_'

        services = await self.middleware.call('datastore.query', 'services.services', filters, options)

        # In case a single service has been requested
        if not isinstance(services, list):
            services = [services]

        jobs = {
            asyncio.ensure_future(self._get_status(entry)): entry
            for entry in services
        }
        if jobs:
            done, pending = await asyncio.wait(list(jobs.keys()), timeout=15)

        def result(task):
            """
            Method to handle results of the coroutines.
            In case of error or timeout, provide UNKNOWN state.
            """
            result = None
            try:
                if task in done:
                    result = task.result()
            except Exception:
                pass
            if result is None:
                entry = jobs.get(task)
                self.logger.warn('Failed to get status for %s', entry['service'])
                entry['state'] = 'UNKNOWN'
                entry['pids'] = []
                return entry
            else:
                return result

        services = list(map(result, jobs))
        return filter_list(services, filters, options)

    @accepts(
        Str('id_or_name'),
        Dict(
            'service-update',
            Bool('enable', default=False),
        ),
    )
    async def do_update(self, id_or_name, data):
        """
        Update service entry of `id_or_name`.

        Currently it only accepts `enable` option which means whether the
        service should start on boot.

        """
        if not id_or_name.isdigit():
            svc = await self.middleware.call('datastore.query', 'services.services', [('srv_service', '=', id_or_name)])
            if not svc:
                raise CallError(f'Service {id_or_name} not found.', errno.ENOENT)
            id_or_name = svc[0]['id']

        return await self.middleware.call('datastore.update', 'services.services', id_or_name, {'srv_enable': data['enable']})

    @accepts(
        Str('service'),
        Dict(
            'service-control',
            Bool('onetime', default=True),
            Bool('wait', default=None, null=True),
            Bool('sync', default=None, null=True),
            register=True,
        ),
    )
    async def start(self, service, options=None):
        """ Start the service specified by `service`.

        The helper will use method self._start_[service]() to start the service.
        If the method does not exist, it would fallback using service(8)."""
        await self.middleware.call_hook('service.pre_action', service, 'start', options)
        sn = self._started_notify("start", service)
        await self._simplecmd("start", service, options)
        return await self.started(service, sn)

    async def started(self, service, sn=None):
        """
        Test if service specified by `service` has been started.
        """
        if sn:
            await self.middleware.run_in_thread(sn.join)

        try:
            svc = await self.query([('service', '=', service)], {'get': True})
            self.middleware.send_event('service.query', 'CHANGED', fields=svc)
            return svc['state'] == 'RUNNING'
        except IndexError:
            f = getattr(self, '_started_' + service, None)
            if callable(f):
                if inspect.iscoroutinefunction(f):
                    return (await f())[0]
                else:
                    return f()[0]
            else:
                return (await self._started(service))[0]

    @accepts(
        Str('service'),
        Ref('service-control'),
    )
    async def stop(self, service, options=None):
        """ Stop the service specified by `service`.

        The helper will use method self._stop_[service]() to stop the service.
        If the method does not exist, it would fallback using service(8)."""
        await self.middleware.call_hook('service.pre_action', service, 'stop', options)
        sn = self._started_notify("stop", service)
        await self._simplecmd("stop", service, options)
        return await self.started(service, sn)

    @accepts(
        Str('service'),
        Ref('service-control'),
    )
    async def restart(self, service, options=None):
        """
        Restart the service specified by `service`.

        The helper will use method self._restart_[service]() to restart the service.
        If the method does not exist, it would fallback using service(8)."""
        await self.middleware.call_hook('service.pre_action', service, 'restart', options)
        sn = self._started_notify("restart", service)
        await self._simplecmd("restart", service, options)
        return await self.started(service, sn)

    @accepts(
        Str('service'),
        Ref('service-control'),
    )
    async def reload(self, service, options=None):
        """
        Reload the service specified by `service`.

        The helper will use method self._reload_[service]() to reload the service.
        If the method does not exist, the helper will try self.restart of the
        service instead."""
        await self.middleware.call_hook('service.pre_action', service, 'reload', options)
        try:
            await self._simplecmd("reload", service, options)
        except Exception as e:
            await self.restart(service, options)
        return await self.started(service)

    async def _get_status(self, service):
        f = getattr(self, '_started_' + service['service'], None)
        if callable(f):
            if inspect.iscoroutinefunction(f):
                running, pids = await f()
            else:
                running, pids = f()
        else:
            running, pids = await self._started(service['service'])

        if running:
            state = 'RUNNING'
        else:
            state = 'STOPPED'

        service['state'] = state
        service['pids'] = pids
        return service

    async def _simplecmd(self, action, what, options=None):
        self.logger.debug("Calling: %s(%s) ", action, what)
        f = getattr(self, '_' + action + '_' + what, None)
        if f is None:
            # Provide generic start/stop/restart verbs for rc.d scripts
            if what in self.SERVICE_DEFS:
                if self.SERVICE_DEFS[what].rc_script:
                    what = self.SERVICE_DEFS[what].rc_script
            if action in ("start", "stop", "restart", "reload"):
                if action == 'restart':
                    await self._system("/usr/sbin/service " + what + " forcestop ")
                await self._service(what, action, **options)
            else:
                raise ValueError("Internal error: Unknown command")
        else:
            call = f(**(options or {}))
            if inspect.iscoroutinefunction(f):
                await call

    async def _system(self, cmd, options=None):
        stdout = DEVNULL
        if options and 'stdout' in options:
            stdout = options['stdout']
        stderr = DEVNULL
        if options and 'stderr' in options:
            stderr = options['stderr']

        proc = await Popen(cmd, stdout=stdout, stderr=stderr, shell=True, close_fds=True)
        await proc.communicate()
        return proc.returncode

    async def _service(self, service, verb, **options):
        onetime = options.pop('onetime', None)
        force = options.pop('force', None)
        quiet = options.pop('quiet', None)
        extra = options.pop('extra', '')

        # force comes before one which comes before quiet
        # they are mutually exclusive
        preverb = ''
        if force:
            preverb = 'force'
        elif onetime:
            preverb = 'one'
        elif quiet:
            preverb = 'quiet'

        return await self._system('/usr/sbin/service {} {}{} {}'.format(
            service,
            preverb,
            verb,
            extra,
        ), options)

    def _started_notify(self, verb, what):
        """
        The check for started [or not] processes is currently done in 2 steps
        This is the first step which involves a thread StartNotify that watch for event
        before actually start/stop rc.d scripts

        Returns:
            StartNotify object if the service is known or None otherwise
        """

        if what in self.SERVICE_DEFS:
            sn = StartNotify(verb=verb, pidfile=self.SERVICE_DEFS[what].pidfile)
            sn.start()
            return sn
        else:
            return None

    async def _started(self, what, notify=None):
        """
        This is the second step::
        Wait for the StartNotify thread to finish and then check for the
        status of pidfile/procname using pgrep

        Returns:
            True whether the service is alive, False otherwise
        """

        if what in self.SERVICE_DEFS:
            if notify:
                await self.middleware.run_in_thread(notify.join)

            if self.SERVICE_DEFS[what].pidfile:
                pgrep = "/bin/pgrep -F {}{}".format(
                    self.SERVICE_DEFS[what].pidfile,
                    ' ' + self.SERVICE_DEFS[what].procname if self.SERVICE_DEFS[what].procname else '',
                )
            else:
                pgrep = "/bin/pgrep {}".format(self.SERVICE_DEFS[what].procname)
            proc = await Popen(pgrep, shell=True, stdout=PIPE, stderr=PIPE, close_fds=True)
            data = (await proc.communicate())[0].decode()

            if proc.returncode == 0:
                return True, [
                    int(i)
                    for i in data.strip().split('\n') if i.isdigit()
                ]
        return False, []

    async def _start_webdav(self, **kwargs):
        await self._service("ix-apache", "start", force=True, **kwargs)
        await self._service("apache24", "start", **kwargs)

    async def _stop_webdav(self, **kwargs):
        await self._service("apache24", "stop", **kwargs)

    async def _restart_webdav(self, **kwargs):
        await self._service("apache24", "stop", force=True, **kwargs)
        await self._service("ix-apache", "start", force=True, **kwargs)
        await self._service("apache24", "restart", **kwargs)

    async def _reload_webdav(self, **kwargs):
        await self._service("ix-apache", "start", force=True, **kwargs)
        await self._service("apache24", "reload", **kwargs)

    async def _restart_django(self, **kwargs):
        await self._service("django", "restart", **kwargs)

    async def _start_webshell(self, **kwargs):
        await self._system("/usr/local/bin/python /usr/local/www/freenasUI/tools/webshell.py")

    async def _restart_webshell(self, **kwargs):
        try:
            with open('/var/run/webshell.pid', 'r') as f:
                pid = f.read()
                os.kill(int(pid), signal.SIGTERM)
                time.sleep(0.2)
                os.kill(int(pid), signal.SIGKILL)
        except:
            pass
        await self._system("ulimit -n 1024 && /usr/local/bin/python /usr/local/www/freenasUI/tools/webshell.py")

    async def _restart_iscsitarget(self, **kwargs):
        await self._service("ix-ctld", "start", force=True, **kwargs)
        await self._service("ctld", "stop", force=True, **kwargs)
        await self._service("ix-ctld", "start", quiet=True, **kwargs)
        await self._service("ctld", "restart", **kwargs)

    async def _start_iscsitarget(self, **kwargs):
        await self._service("ix-ctld", "start", quiet=True, **kwargs)
        await self._service("ctld", "start", **kwargs)

    async def _stop_iscsitarget(self, **kwargs):
        await self._service("ix-ctld", "stop", force=True, **kwargs)
        await self._service("ctld", "stop", force=True, **kwargs)

    async def _reload_iscsitarget(self, **kwargs):
        await self._service("ix-ctld", "start", quiet=True, **kwargs)
        await self._service("ctld", "reload", **kwargs)

    async def _start_collectd(self, **kwargs):
        await self._service("ix-collectd", "start", quiet=True, **kwargs)
        await self._service("collectd", "restart", **kwargs)

    async def _restart_collectd(self, **kwargs):
        await self._service("collectd", "stop", **kwargs)
        await self._service("ix-collectd", "start", quiet=True, **kwargs)
        await self._service("collectd", "start", **kwargs)

    async def _start_sysctl(self, **kwargs):
        await self._service("ix-sysctl", "start", quiet=True, **kwargs)

    async def _reload_sysctl(self, **kwargs):
        await self._service("ix-sysctl", "reload", **kwargs)

    async def _start_network(self, **kwargs):
        await self.middleware.call('interfaces.sync')
        await self.middleware.call('routes.sync')

    async def _stop_jails(self, **kwargs):
        for jail in await self.middleware.call('datastore.query', 'jails.jails'):
            try:
                await self.middleware.call('notifier.warden', 'stop', [], {'jail': jail['jail_host']})
            except Exception as e:
                self.logger.debug(f'Failed to stop jail {jail["jail_host"]}', exc_info=True)

    async def _start_jails(self, **kwargs):
        await self._service("ix-warden", "start", **kwargs)
        for jail in await self.middleware.call('datastore.query', 'jails.jails'):
            if jail['jail_autostart']:
                try:
                    await self.middleware.call('notifier.warden', 'start', [], {'jail': jail['jail_host']})
                except Exception as e:
                    self.logger.debug(f'Failed to start jail {jail["jail_host"]}', exc_info=True)
        await self._service("ix-plugins", "start", **kwargs)
        await self.reload("http", kwargs)

    async def _restart_jails(self, **kwargs):
        await self._stop_jails()
        await self._start_jails()

    async def _stop_pbid(self, **kwargs):
        await self._service("pbid", "stop", **kwargs)

    async def _start_pbid(self, **kwargs):
        await self._service("pbid", "start", **kwargs)

    async def _restart_pbid(self, **kwargs):
        await self._service("pbid", "restart", **kwargs)

    async def _reload_named(self, **kwargs):
        await self._service("named", "reload", **kwargs)

    async def _reload_hostname(self, **kwargs):
        await self._system('/bin/hostname ""')
        await self._service("ix-hostname", "start", quiet=True, **kwargs)
        await self._service("hostname", "start", quiet=True, **kwargs)
        await self._service("mdnsd", "restart", quiet=True, **kwargs)
        await self._service("collectd", "stop", **kwargs)
        await self._service("ix-collectd", "start", quiet=True, **kwargs)
        await self._service("collectd", "start", **kwargs)

    async def _reload_resolvconf(self, **kwargs):
        await self._reload_hostname()
        await self._service("ix-resolv", "start", quiet=True, **kwargs)

    async def _reload_networkgeneral(self, **kwargs):
        await self._reload_resolvconf()
        await self._service("routing", "restart", **kwargs)

    async def _reload_timeservices(self, **kwargs):
        await self._service("ix-localtime", "start", quiet=True, **kwargs)
        await self._service("ix-ntpd", "start", quiet=True, **kwargs)
        await self._service("ntpd", "restart", **kwargs)
        settings = await self.middleware.call(
            'datastore.query',
            'system.settings',
            [],
            {'order_by': ['-id'], 'get': True}
        )
        os.environ['TZ'] = settings['stg_timezone']
        time.tzset()

    async def _reload_smartd(self, **kwargs):
        await self._service("ix-smartd", "start", quiet=True, **kwargs)
        await self._service("smartd-daemon", "reload", **kwargs)

    async def _restart_smartd(self, **kwargs):
        await self.middleware.call("etc.generate", "smartd")
        await self._service("smartd-daemon", "stop", force=True, **kwargs)
        await self._service("smartd-daemon", "restart", **kwargs)

    async def _reload_ssh(self, **kwargs):
        await self._service("ix-sshd", "start", quiet=True, **kwargs)
        await self._service("ix_register", "reload", **kwargs)
        await self._service("openssh", "reload", **kwargs)
        await self._service("ix_sshd_save_keys", "start", quiet=True, **kwargs)

    async def _start_ssh(self, **kwargs):
        await self._service("ix-sshd", "start", quiet=True, **kwargs)
        await self._service("ix_register", "reload", **kwargs)
        await self._service("openssh", "start", **kwargs)
        await self._service("ix_sshd_save_keys", "start", quiet=True, **kwargs)

    async def _stop_ssh(self, **kwargs):
        await self._service("openssh", "stop", force=True, **kwargs)
        await self._service("ix_register", "reload", **kwargs)

    async def _restart_ssh(self, **kwargs):
        await self._service("ix-sshd", "start", quiet=True, **kwargs)
        await self._service("openssh", "stop", force=True, **kwargs)
        await self._service("ix_register", "reload", **kwargs)
        await self._service("openssh", "restart", **kwargs)
        await self._service("ix_sshd_save_keys", "start", quiet=True, **kwargs)

    async def _start_ssl(self, what=None):
        if what is not None:
            await self._service("ix-ssl", "start", quiet=True, extra=what)
        else:
            await self._service("ix-ssl", "start", quiet=True)

    async def _start_s3(self, **kwargs):
        await self.middleware.call('etc.generate', 's3')
        await self._service("minio", "start", quiet=True, stdout=None, stderr=None, **kwargs)

    async def _reload_s3(self, **kwargs):
        await self.middleware.call('etc.generate', 's3')
        await self._service("minio", "restart", quiet=True, stdout=None, stderr=None, **kwargs)

    async def _reload_rsync(self, **kwargs):
        await self._service("ix-rsyncd", "start", quiet=True, **kwargs)
        await self._service("rsyncd", "restart", **kwargs)

    async def _restart_rsync(self, **kwargs):
        await self._stop_rsync()
        await self._start_rsync()

    async def _start_rsync(self, **kwargs):
        await self._service("ix-rsyncd", "start", quiet=True, **kwargs)
        await self._service("rsyncd", "start", **kwargs)

    async def _stop_rsync(self, **kwargs):
        await self._service("rsyncd", "stop", force=True, **kwargs)

    async def _started_nis(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/NIS/ctl status"):
            res = True
        return res, []

    async def _start_nis(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/NIS/ctl start"):
            res = True
        return res

    async def _restart_nis(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/NIS/ctl restart"):
            res = True
        return res

    async def _stop_nis(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/NIS/ctl stop"):
            res = True
        return res

    async def _started_ldap(self, **kwargs):
        if (await self._system('/usr/sbin/service ix-ldap status') != 0):
            return False, []
        return await self.middleware.call('notifier.ldap_status'), []

    async def _start_ldap(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/LDAP/ctl start"):
            res = True
        return res

    async def _stop_ldap(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/LDAP/ctl stop"):
            res = True
        return res

    async def _restart_ldap(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/LDAP/ctl restart"):
            res = True
        return res

    async def _start_lldp(self, **kwargs):
        await self._service("ladvd", "start", **kwargs)

    async def _stop_lldp(self, **kwargs):
        await self._service("ladvd", "stop", force=True, **kwargs)

    async def _restart_lldp(self, **kwargs):
        await self._service("ladvd", "stop", force=True, **kwargs)
        await self._service("ladvd", "restart", **kwargs)

    async def _clear_activedirectory_config(self):
        await self._system("/bin/rm -f /etc/directoryservice/ActiveDirectory/config")

    async def _started_activedirectory(self, **kwargs):
        # Perform a wbinfo -t because it's the most accurate single test we have to
        # detect problems with AD join. The default winbind timeout is 60 seconds (as of Samba 4.7).
        # This can be controlled by the smb4.conf parameter "winbind request timeout = "
        if await self._system('/usr/local/bin/wbinfo -t') != 0:
                self.logger.debug('AD monitor: wbinfo -t failed')
                return False, []
        return True, []

    async def _start_activedirectory(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/ActiveDirectory/ctl start"):
            res = True
        return res

    async def _stop_activedirectory(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/ActiveDirectory/ctl stop"):
            res = True
        return res

    async def _restart_activedirectory(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/ActiveDirectory/ctl restart"):
            res = True
        return res

    async def _reload_activedirectory(self, **kwargs):
        # Steps required to force winbind to connect to new DC if DC it's connected to goes down
        # We may need to expand the list of operations below to include fresh kinit. Some
        # information about winbind connection is stored in samba's gencache. In test cases, flushing
        # gencache (net cache flush) was not required to do this.
        await self._service("samba_server", "stop", force=True, **kwargs)
        await self._service("samba_server", "start", quiet=True, **kwargs)

    async def _started_domaincontroller(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/DomainController/ctl status"):
            res = True
        return res, []

    async def _start_domaincontroller(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/DomainController/ctl start"):
            res = True
        return res

    async def _stop_domaincontroller(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/DomainController/ctl stop"):
            res = True
        return res

    async def _restart_domaincontroller(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/DomainController/ctl restart"):
            res = True
        return res

    async def _restart_syslogd(self, **kwargs):
        await self._service("ix-syslogd", "start", quiet=True, **kwargs)
        await self._system("/etc/local/rc.d/syslog-ng restart")

    async def _start_syslogd(self, **kwargs):
        await self._service("ix-syslogd", "start", quiet=True, **kwargs)
        await self._system("/etc/local/rc.d/syslog-ng start")

    async def _stop_syslogd(self, **kwargs):
        await self._system("/etc/local/rc.d/syslog-ng stop")

    async def _reload_syslogd(self, **kwargs):
        await self._service("ix-syslogd", "start", quiet=True, **kwargs)
        await self._system("/etc/local/rc.d/syslog-ng reload")

    async def _start_tftp(self, **kwargs):
        await self._service("ix-inetd", "start", quiet=True, **kwargs)
        await self._service("inetd", "start", **kwargs)

    async def _reload_tftp(self, **kwargs):
        await self._service("ix-inetd", "start", quiet=True, **kwargs)
        await self._service("inetd", "stop", force=True, **kwargs)
        await self._service("inetd", "restart", **kwargs)

    async def _restart_tftp(self, **kwargs):
        await self._service("ix-inetd", "start", quiet=True, **kwargs)
        await self._service("inetd", "stop", force=True, **kwargs)
        await self._service("inetd", "restart", **kwargs)

    async def _restart_cron(self, **kwargs):
        await self._service("ix-crontab", "start", quiet=True, **kwargs)

    async def _start_motd(self, **kwargs):
        await self._service("ix-motd", "start", quiet=True, **kwargs)
        await self._service("motd", "start", quiet=True, **kwargs)

    async def _start_ttys(self, **kwargs):
        await self._service("ix-ttys", "start", quiet=True, **kwargs)

    async def _reload_ftp(self, **kwargs):
        await self._service("ix-proftpd", "start", quiet=True, **kwargs)
        await self._service("proftpd", "restart", **kwargs)

    async def _restart_ftp(self, **kwargs):
        await self._stop_ftp()
        await self._start_ftp()

    async def _start_ftp(self, **kwargs):
        await self._service("ix-proftpd", "start", quiet=True, **kwargs)
        await self._service("proftpd", "start", **kwargs)

    async def _stop_ftp(self, **kwargs):
        await self._service("proftpd", "stop", force=True, **kwargs)

    async def _start_ups(self, **kwargs):
        await self._service("ix-ups", "start", quiet=True, **kwargs)
        await self._service("nut", "start", **kwargs)
        await self._service("nut_upsmon", "start", **kwargs)
        await self._service("nut_upslog", "start", **kwargs)

    async def _stop_ups(self, **kwargs):
        await self._service("nut_upslog", "stop", force=True, **kwargs)
        await self._service("nut_upsmon", "stop", force=True, **kwargs)
        await self._service("nut", "stop", force=True, **kwargs)

    async def _restart_ups(self, **kwargs):
        await self._service("ix-ups", "start", quiet=True, **kwargs)
        await self._service("nut", "stop", force=True, **kwargs)
        await self._service("nut_upsmon", "stop", force=True, **kwargs)
        await self._service("nut_upslog", "stop", force=True, **kwargs)
        await self._service("nut", "restart", **kwargs)
        await self._service("nut_upsmon", "restart", **kwargs)
        await self._service("nut_upslog", "restart", **kwargs)

    async def _started_ups(self, **kwargs):
        mode = (await self.middleware.call('datastore.query', 'services.ups', [], {'order_by': ['-id'], 'get': True}))['ups_mode']
        if mode == "master":
            svc = "ups"
        else:
            svc = "upsmon"
        return await self._started(svc)

    async def _start_afp(self, **kwargs):
        await self._service("ix-afpd", "start", **kwargs)
        await self._service("netatalk", "start", **kwargs)

    async def _stop_afp(self, **kwargs):
        await self._service("netatalk", "stop", force=True, **kwargs)
        # when netatalk stops if afpd or cnid_metad is stuck
        # they'll get left behind, which can cause issues
        # restarting netatalk.
        await self._system("pkill -9 afpd")
        await self._system("pkill -9 cnid_metad")

    async def _restart_afp(self, **kwargs):
        await self._stop_afp()
        await self._start_afp()

    async def _reload_afp(self, **kwargs):
        await self._service("ix-afpd", "start", quiet=True, **kwargs)
        await self._system("killall -1 netatalk")

    async def _reload_nfs(self, **kwargs):
        await self.middleware.call("etc.generate", "nfsd")

    async def _restart_nfs(self, **kwargs):
        await self._stop_nfs(**kwargs)
        await self._start_nfs(**kwargs)

    async def _stop_nfs(self, **kwargs):
        await self._service("lockd", "stop", force=True, **kwargs)
        await self._service("statd", "stop", force=True, **kwargs)
        await self._service("nfsd", "stop", force=True, **kwargs)
        await self._service("mountd", "stop", force=True, **kwargs)
        await self._service("nfsuserd", "stop", force=True, **kwargs)
        await self._service("gssd", "stop", force=True, **kwargs)
        await self._service("rpcbind", "stop", force=True, **kwargs)

    async def _start_nfs(self, **kwargs):
        nfs = await self.middleware.call('datastore.config', 'services.nfs')
        await self.middleware.call("etc.generate", "nfsd")
        await self._service("rpcbind", "start", quiet=True, **kwargs)
        await self._service("gssd", "start", quiet=True, **kwargs)
        # Workaround to work with "onetime", since the rc scripts depend on rc flags.
        if nfs['nfs_srv_v4']:
            sysctl.filter('vfs.nfsd.server_max_nfsvers')[0].value = 4
            if nfs['nfs_srv_v4_v3owner']:
                # Per RFC7530, sending NFSv3 style UID/GIDs across the wire is now allowed
                # You must have both of these sysctl's set to allow the desired functionality
                sysctl.filter('vfs.nfsd.enable_stringtouid')[0].value = 1
                sysctl.filter('vfs.nfs.enable_uidtostring')[0].value = 1
                await self._service("nfsuserd", "stop", force=True, **kwargs)
            else:
                sysctl.filter('vfs.nfsd.enable_stringtouid')[0].value = 0
                sysctl.filter('vfs.nfs.enable_uidtostring')[0].value = 0
                await self._service("nfsuserd", "start", quiet=True, **kwargs)
        else:
            sysctl.filter('vfs.nfsd.server_max_nfsvers')[0].value = 3
            if nfs['nfs_srv_16']:
                await self._service("nfsuserd", "start", quiet=True, **kwargs)
        await self._service("mountd", "start", quiet=True, **kwargs)
        await self._service("nfsd", "start", quiet=True, **kwargs)
        await self._service("statd", "start", quiet=True, **kwargs)
        await self._service("lockd", "start", quiet=True, **kwargs)

    async def _force_stop_jail(self, **kwargs):
        await self._service("jail", "stop", force=True, **kwargs)

    async def _start_plugins(self, jail=None, plugin=None, **kwargs):
        if jail and plugin:
            await self._system("/usr/sbin/service ix-plugins forcestart %s:%s" % (jail, plugin))
        else:
            await self._service("ix-plugins", "start", force=True, **kwargs)

    async def _stop_plugins(self, jail=None, plugin=None, **kwargs):
        if jail and plugin:
            await self._system("/usr/sbin/service ix-plugins forcestop %s:%s" % (jail, plugin))
        else:
            await self._service("ix-plugins", "stop", force=True, **kwargs)

    async def _restart_plugins(self, jail=None, plugin=None):
        await self._stop_plugins(jail=jail, plugin=plugin)
        await self._start_plugins(jail=jail, plugin=plugin)

    async def _started_plugins(self, jail=None, plugin=None, **kwargs):
        res = False
        if jail and plugin:
            if self._system("/usr/sbin/service ix-plugins status %s:%s" % (jail, plugin)) == 0:
                res = True
        else:
            if await self._service("ix-plugins", "status", **kwargs) == 0:
                res = True
        return res, []

    async def _start_dynamicdns(self, **kwargs):
        await self._service("ix-inadyn", "start", quiet=True, **kwargs)
        await self._service("inadyn", "start", **kwargs)

    async def _restart_dynamicdns(self, **kwargs):
        await self._service("ix-inadyn", "start", quiet=True, **kwargs)
        await self._service("inadyn", "stop", force=True, **kwargs)
        await self._service("inadyn", "restart", **kwargs)

    async def _reload_dynamicdns(self, **kwargs):
        await self._service("ix-inadyn", "start", quiet=True, **kwargs)
        await self._service("inadyn", "stop", force=True, **kwargs)
        await self._service("inadyn", "restart", **kwargs)

    async def _restart_system(self, **kwargs):
        asyncio.ensure_future(self._system("/bin/sleep 3 && /sbin/shutdown -r now"))

    async def _stop_system(self, **kwargs):
        asyncio.ensure_future(self._system("/bin/sleep 3 && /sbin/shutdown -p now"))

    async def _reload_cifs(self, **kwargs):
        await self._service("ix-pre-samba", "start", quiet=True, **kwargs)
        await self._service("samba_server", "reload", force=True, **kwargs)
        await self._service("ix-post-samba", "start", quiet=True, **kwargs)
        await self._service("mdnsd", "restart", **kwargs)
        # After mdns is restarted we need to reload netatalk to have it rereregister
        # with mdns. Ticket #7133
        await self._service("netatalk", "reload", **kwargs)

    async def _restart_cifs(self, **kwargs):
        await self._service("ix-pre-samba", "start", quiet=True, **kwargs)
        await self._service("samba_server", "stop", force=True, **kwargs)
        await self._service("samba_server", "restart", quiet=True, **kwargs)
        await self._service("ix-post-samba", "start", quiet=True, **kwargs)
        await self._service("mdnsd", "restart", **kwargs)
        # After mdns is restarted we need to reload netatalk to have it rereregister
        # with mdns. Ticket #7133
        await self._service("netatalk", "reload", **kwargs)

    async def _start_cifs(self, **kwargs):
        await self._service("ix-pre-samba", "start", quiet=True, **kwargs)
        await self._service("samba_server", "start", quiet=True, **kwargs)
        await self._service("ix-post-samba", "start", quiet=True, **kwargs)

    async def _stop_cifs(self, **kwargs):
        await self._service("samba_server", "stop", force=True, **kwargs)
        await self._service("ix-post-samba", "start", quiet=True, **kwargs)

    async def _started_cifs(self, **kwargs):
        if await self._service("samba_server", "status", quiet=True, onetime=True, **kwargs):
            return False, []
        else:
            return True, []

    async def _start_snmp(self, **kwargs):
        await self._service("ix-snmpd", "start", quiet=True, **kwargs)
        await self._service("snmpd", "start", quiet=True, **kwargs)
        await self._service("snmp-agent", "start", quiet=True, **kwargs)

    async def _stop_snmp(self, **kwargs):
        await self._service("snmp-agent", "stop", quiet=True, **kwargs)
        await self._service("snmpd", "stop", quiet=True, **kwargs)

    async def _restart_snmp(self, **kwargs):
        await self._service("snmp-agent", "stop", quiet=True, **kwargs)
        await self._service("snmpd", "stop", force=True, **kwargs)
        await self._service("ix-snmpd", "start", quiet=True, **kwargs)
        await self._service("snmpd", "start", quiet=True, **kwargs)
        await self._service("snmp-agent", "start", quiet=True, **kwargs)

    async def _reload_snmp(self, **kwargs):
        await self._service("snmp-agent", "stop", quiet=True, **kwargs)
        await self._service("snmpd", "stop", force=True, **kwargs)
        await self._service("ix-snmpd", "start", quiet=True, **kwargs)
        await self._service("snmpd", "start", quiet=True, **kwargs)
        await self._service("snmp-agent", "start", quiet=True, **kwargs)

    async def _restart_http(self, **kwargs):
        await self._service("ix-nginx", "start", quiet=True, **kwargs)
        await self._service("ix_register", "reload", **kwargs)
        await self._service("nginx", "restart", **kwargs)

    async def _reload_http(self, **kwargs):
        await self._service("ix-nginx", "start", quiet=True, **kwargs)
        await self._service("ix_register", "reload", **kwargs)
        await self._service("nginx", "reload", **kwargs)

    async def _reload_loader(self, **kwargs):
        await self._service("ix-loader", "reload", **kwargs)

    async def _start_loader(self, **kwargs):
        await self._service("ix-loader", "start", quiet=True, **kwargs)

    async def _restart_disk(self, **kwargs):
        await self._reload_disk(**kwargs)

    async def _reload_disk(self, **kwargs):
        await self._service("ix-fstab", "start", quiet=True, **kwargs)
        await self._service("ix-swap", "start", quiet=True, **kwargs)
        await self._service("swap", "start", quiet=True, **kwargs)
        await self._service("mountlate", "start", quiet=True, **kwargs)
        # Restarting collectd may take a long time and there is no
        # benefit in waiting for it since even if it fails it wont
        # tell the user anything useful.
        asyncio.ensure_future(self.restart("collectd", kwargs))

    async def _reload_user(self, **kwargs):
        await self._service("ix-passwd", "start", quiet=True, **kwargs)
        await self._service("ix-aliases", "start", quiet=True, **kwargs)
        await self._service("ix-sudoers", "start", quiet=True, **kwargs)
        await self.reload("cifs", kwargs)

    async def _restart_system_datasets(self, **kwargs):
        systemdataset = await self.middleware.call('systemdataset.setup')
        if not systemdataset:
            return None
        if systemdataset['syslog']:
            await self.restart("syslogd", kwargs)
        await self.restart("cifs", kwargs)
        if systemdataset['rrd']:
            # Restarting collectd may take a long time and there is no
            # benefit in waiting for it since even if it fails it wont
            # tell the user anything useful.
            asyncio.ensure_future(self.restart("collectd", kwargs))
Ejemplo n.º 24
0
class MailService(ConfigService):

    oauth_access_token = None
    oauth_access_token_expires_at = None

    class Config:
        datastore = 'system.email'
        datastore_prefix = 'em_'
        datastore_extend = 'mail.mail_extend'
        cli_namespace = 'system.mail'

    ENTRY = Dict(
        'mail_entry',
        Str('fromemail', validators=[Email()], required=True),
        Str('fromname', required=True),
        Str('outgoingserver', required=True),
        Int('port', required=True),
        Str('security', enum=['PLAIN', 'SSL', 'TLS'], required=True),
        Bool('smtp', required=True),
        Str('user', null=True, required=True),
        Str('pass', private=True, null=True, required=True),
        Dict(
            'oauth',
            Str('client_id'),
            Str('client_secret'),
            Str('refresh_token'),
            null=True,
            private=True,
            required=True,
        ),
        Int('id', required=True),
    )

    @private
    async def mail_extend(self, cfg):
        if cfg['security']:
            cfg['security'] = cfg['security'].upper()
        return cfg

    @accepts(
        Patch('mail_entry',
              'mail_update', ('rm', {
                  'name': 'id'
              }), ('replace',
                   Dict(
                       'oauth',
                       Str('client_id', required=True),
                       Str('client_secret', required=True),
                       Str('refresh_token', required=True),
                       null=True,
                       private=True,
                   )), ('attr', {
                       'update': True
                   }),
              register=True))
    async def do_update(self, data):
        """
        Update Mail Service Configuration.

        `fromemail` is used as a sending address which the mail server will use for sending emails.

        `outgoingserver` is the hostname or IP address of SMTP server used for sending an email.

        `security` is type of encryption desired.

        `smtp` is a boolean value which when set indicates that SMTP authentication has been enabled and `user`/`pass`
        are required attributes now.
        """
        config = await self.config()

        new = config.copy()
        new.update(data)
        new['security'] = new['security'].lower()  # Django Model compatibility

        verrors = ValidationErrors()

        if new['smtp'] and new['user'] == '':
            verrors.add(
                'mail_update.user',
                'This field is required when SMTP authentication is enabled',
            )

        if new['oauth']:
            if new['fromemail']:
                verrors.add('mail_update.fromemail',
                            'This field cannot be used with GMail')
            if new['fromname']:
                verrors.add('mail_update.fromname',
                            'This field cannot be used with GMail')

        self.__password_verify(new['pass'], 'mail_update.pass', verrors)

        if verrors:
            raise verrors

        await self.middleware.call('datastore.update', 'system.email',
                                   config['id'], new, {'prefix': 'em_'})

        await self.middleware.call('mail.gmail_initialize')

        return await self.config()

    def __password_verify(self, password, schema, verrors=None):
        if not password:
            return
        if verrors is None:
            verrors = ValidationErrors()
        # FIXME: smtplib does not support non-ascii password yet
        # https://github.com/python/cpython/pull/8938
        try:
            password.encode('ascii')
        except UnicodeEncodeError:
            verrors.add(
                schema,
                'Only plain text characters (7-bit ASCII) are allowed in passwords. '
                'UTF or composed characters are not allowed.')
        return verrors

    @accepts(
        Dict(
            'mail_message',
            Str('subject', required=True),
            Str('text', max_length=None),
            Str('html', null=True, max_length=None),
            List('to', items=[Str('email')]),
            List('cc', items=[Str('email')]),
            Int('interval', null=True),
            Str('channel', null=True),
            Int('timeout', default=300),
            Bool('attachments', default=False),
            Bool('queue', default=True),
            Dict('extra_headers', additional_attrs=True),
            register=True,
        ), Ref('mail_update'))
    @returns(Bool('successfully_sent'))
    @job(pipes=['input'], check_pipes=False)
    def send(self, job, message, config):
        """
        Sends mail using configured mail settings.

        `text` will be formatted to HTML using Markdown and rendered using default E-Mail template.
        You can put your own HTML using `html`. If `html` is null, no HTML MIME part will be added to E-Mail.

        If `attachments` is true, a list compromised of the following dict is required
        via HTTP upload:
          - headers(list)
            - name(str)
            - value(str)
            - params(dict)
          - content (str)

        [
         {
          "headers": [
           {
            "name": "Content-Transfer-Encoding",
            "value": "base64"
           },
           {
            "name": "Content-Type",
            "value": "application/octet-stream",
            "params": {
             "name": "test.txt"
            }
           }
          ],
          "content": "dGVzdAo="
         }
        ]
        """

        product_name = self.middleware.call_sync('system.product_name')

        gc = self.middleware.call_sync('datastore.config',
                                       'network.globalconfiguration')

        hostname = f'{gc["gc_hostname"]}.{gc["gc_domain"]}'

        message['subject'] = f'{product_name} {hostname}: {message["subject"]}'

        add_html = True
        if 'html' in message and message['html'] is None:
            message.pop('html')
            add_html = False

        if 'text' not in message:
            if 'html' not in message:
                verrors = ValidationErrors()
                verrors.add('mail_message.text',
                            'Text is required when HTML is not set')
                verrors.check()

            message['text'] = html2text.html2text(message['html'])

        if add_html and 'html' not in message:
            lookup = TemplateLookup(directories=[
                os.path.join(os.path.dirname(os.path.realpath(__file__)),
                             '../assets/templates')
            ],
                                    module_directory="/tmp/mako/templates")

            tmpl = lookup.get_template('mail.html')

            message['html'] = tmpl.render(
                body=html.escape(message['text']).replace('\n', '<br>\n'))

        return self.send_raw(job, message, config)

    @accepts(Ref('mail_message'), Ref('mail_update'))
    @job(pipes=['input'], check_pipes=False)
    @private
    def send_raw(self, job, message, config):
        config = dict(self.middleware.call_sync('mail.config'), **config)

        if config['fromname']:
            from_addr = Header(config['fromname'], 'utf-8')
            try:
                config['fromemail'].encode('ascii')
            except UnicodeEncodeError:
                from_addr.append(f'<{config["fromemail"]}>', 'utf-8')
            else:
                from_addr.append(f'<{config["fromemail"]}>', 'ascii')
        else:
            try:
                config['fromemail'].encode('ascii')
            except UnicodeEncodeError:
                from_addr = Header(config['fromemail'], 'utf-8')
            else:
                from_addr = Header(config['fromemail'], 'ascii')

        interval = message.get('interval')
        if interval is None:
            interval = timedelta()
        else:
            interval = timedelta(seconds=interval)

        sw_name = self.middleware.call_sync('system.version').split('-', 1)[0]

        channel = message.get('channel')
        if not channel:
            channel = sw_name.lower()
        if interval > timedelta():
            channelfile = '/tmp/.msg.%s' % (channel)
            last_update = datetime.now() - interval
            try:
                last_update = datetime.fromtimestamp(
                    os.stat(channelfile).st_mtime)
            except OSError:
                pass
            timediff = datetime.now() - last_update
            if (timediff >= interval) or (timediff < timedelta()):
                # Make sure mtime is modified
                # We could use os.utime but this is simpler!
                with open(channelfile, 'w') as f:
                    f.write('!')
            else:
                raise CallError(
                    'This message was already sent in the given interval')

        verrors = self.__password_verify(config['pass'], 'mail-config.pass')
        if verrors:
            raise verrors
        to = message.get('to')
        if not to:
            to = [
                self.middleware.call_sync('user.query',
                                          [('username', '=', 'root')],
                                          {'get': True})['email']
            ]
            if not to[0]:
                raise CallError('Email address for root is not configured')

        if message.get('attachments'):
            job.check_pipe("input")

            def read_json():
                f = job.pipes.input.r
                data = b''
                i = 0
                while True:
                    read = f.read(1048576)  # 1MiB
                    if read == b'':
                        break
                    data += read
                    i += 1
                    if i > 50:
                        raise ValueError(
                            'Attachments bigger than 50MB not allowed yet')
                if data == b'':
                    return None
                return json.loads(data)

            attachments = read_json()
        else:
            attachments = None

        if 'html' in message or attachments:
            msg = MIMEMultipart()
            msg.preamble = 'This is a multi-part message in MIME format.'
            if 'html' in message:
                msg2 = MIMEMultipart('alternative')
                msg2.attach(
                    MIMEText(message['text'], 'plain', _charset='utf-8'))
                msg2.attach(MIMEText(message['html'], 'html',
                                     _charset='utf-8'))
                msg.attach(msg2)
            if attachments:
                for attachment in attachments:
                    m = Message()
                    m.set_payload(attachment['content'])
                    for header in attachment.get('headers'):
                        m.add_header(header['name'], header['value'],
                                     **(header.get('params') or {}))
                    msg.attach(m)
        else:
            msg = MIMEText(message['text'], _charset='utf-8')

        msg['Subject'] = message['subject']

        msg['From'] = from_addr
        msg['To'] = ', '.join(to)
        if message.get('cc'):
            msg['Cc'] = ', '.join(message.get('cc'))
        msg['Date'] = formatdate()

        local_hostname = self.middleware.call_sync('system.hostname')

        msg['Message-ID'] = "<%s-%s.%s@%s>" % (
            sw_name.lower(), datetime.utcnow().strftime("%Y%m%d.%H%M%S.%f"),
            base64.urlsafe_b64encode(os.urandom(3)), local_hostname)

        extra_headers = message.get('extra_headers') or {}
        for key, val in list(extra_headers.items()):
            # We already have "Content-Type: multipart/mixed" and setting "Content-Type: text/plain" like some scripts
            # do will break python e-mail module.
            if key.lower() == "content-type":
                continue

            if key in msg:
                msg.replace_header(key, val)
            else:
                msg[key] = val

        syslog.openlog(logoption=syslog.LOG_PID, facility=syslog.LOG_MAIL)
        try:
            if config['oauth']:
                self.middleware.call_sync('mail.gmail_send', msg, config)
            else:
                server = self._get_smtp_server(config,
                                               message['timeout'],
                                               local_hostname=local_hostname)
                # NOTE: Don't do this.
                #
                # If smtplib.SMTP* tells you to run connect() first, it's because the
                # mailserver it tried connecting to via the outgoing server argument
                # was unreachable and it tried to connect to 'localhost' and barfed.
                # This is because FreeNAS doesn't run a full MTA.
                # else:
                #    server.connect()
                headers = '\n'.join([f'{k}: {v}' for k, v in msg._headers])
                syslog.syslog(f"sending mail to {', '.join(to)}\n{headers}")
                server.sendmail(from_addr.encode(), to, msg.as_string())
                server.quit()
        except Exception as e:
            # Don't spam syslog with these messages. They should only end up in the
            # test-email pane.
            # We are only interested in ValueError, not subclasses.
            if e.__class__ is ValueError:
                raise CallError(str(e))
            syslog.syslog(f'Failed to send email to {", ".join(to)}: {str(e)}')
            if isinstance(e, smtplib.SMTPAuthenticationError):
                raise CallError(
                    f'Authentication error ({e.smtp_code}): {e.smtp_error}',
                    errno.EAUTH if osc.IS_FREEBSD else errno.EPERM)
            self.logger.warn('Failed to send email: %s', str(e), exc_info=True)
            if message['queue']:
                with MailQueue() as mq:
                    mq.append(msg)
            raise CallError(f'Failed to send email: {e}')
        return True

    def _get_smtp_server(self, config, timeout=300, local_hostname=None):
        self.middleware.call_sync('network.general.will_perform_activity',
                                  'mail')

        if local_hostname is None:
            local_hostname = self.middleware.call_sync('system.hostname')

        if not config['outgoingserver'] or not config['port']:
            # See NOTE below.
            raise ValueError('you must provide an outgoing mailserver and mail'
                             ' server port when sending mail')
        if config['security'] == 'SSL':
            server = smtplib.SMTP_SSL(config['outgoingserver'],
                                      config['port'],
                                      timeout=timeout,
                                      local_hostname=local_hostname)
        else:
            server = smtplib.SMTP(config['outgoingserver'],
                                  config['port'],
                                  timeout=timeout,
                                  local_hostname=local_hostname)
            if config['security'] == 'TLS':
                server.starttls()
        if config['smtp']:
            server.login(config['user'], config['pass'])

        return server

    @periodic(600, run_on_start=False)
    @private
    def send_mail_queue(self):
        with MailQueue() as mq:
            for queue in list(mq.queue):
                try:
                    config = self.middleware.call_sync('mail.config')
                    if config['oauth']:
                        self.middleware.call_sync('mail.gmail_send',
                                                  queue.message, config)
                    else:
                        server = self._get_smtp_server(config)
                        server.sendmail(queue.message['From'].encode(),
                                        queue.message['To'].split(', '),
                                        queue.message.as_string())
                        server.quit()
                except Exception:
                    self.logger.debug('Sending message from queue failed',
                                      exc_info=True)
                    queue.attempts += 1
                    if queue.attempts >= mq.MAX_ATTEMPTS:
                        mq.queue.remove(queue)
                else:
                    mq.queue.remove(queue)
Ejemplo n.º 25
0
class VCenterService(ConfigService):

    PRIVATE_GROUP_NAME = 'iXSystems'

    class Config:
        datastore = 'vcp.vcenterconfiguration'
        datastore_prefix = 'vc_'
        datastore_extend = 'vcenter.vcenter_extend'

    @private
    async def common_validation(self, data, schema_name):
        verrors = ValidationErrors()

        ip = data.get('ip')
        if ip:
            await resolve_hostname(self.middleware, verrors,
                                   f'{schema_name}.ip', ip)

        management_ip = data.get('management_ip')
        if management_ip and management_ip not in (
                await self.get_management_ip_choices()):
            verrors.add(f'{schema_name}.management_ip',
                        'Please select a valid IP for your TrueNAS system')

        action = data.get('action')
        if action and action != 'UNINSTALL':
            if (not (await
                     self.middleware.call('vcenteraux.config'))['enable_https']
                    and (await self.middleware.call('system.general.config')
                         )['ui_httpsredirect']):
                verrors.add(f'{schema_name}.action',
                            'Please enable vCenter plugin over HTTPS')

        return verrors

    @private
    async def vcenter_extend(self, data):
        data['password'] = await self.middleware.call('notifier.pwenc_decrypt',
                                                      data['password'])
        data['port'] = int(
            data['port']) if data['port'] else 443  # Defaulting to 443
        return data

    @accepts(
        Dict(
            'vcenter_update_dict',
            Int('port', validators=[Port()]),
            Str('action',
                enum=['INSTALL', 'REPAIR', 'UNINSTALL', 'UPGRADE'],
                required=True),
            Str('management_ip'),
            Str('ip'),  # HOST IP
            Str('password', password=True),
            Str('username'),
        ))
    async def do_update(self, data):
        old = await self.config()
        new = old.copy()
        new.update(data)

        schema_name = 'vcenter_update'
        verrors = await self.common_validation(new, schema_name)
        if verrors:
            raise verrors

        action = new.pop('action')
        system_general = await self.middleware.call('system.general.config')
        ui_protocol = 'https' if system_general['ui_httpsredirect'] else 'http'
        ui_port = system_general['ui_port'] if ui_protocol.lower(
        ) != 'https' else system_general['ui_httpsport']
        fingerprint = await self.middleware.call(
            'certificate.get_host_certificates_thumbprint',
            new['management_ip'], new['port'])
        plugin_file_name = await self.middleware.run_in_thread(
            self.get_plugin_file_name)
        # TODO: URL will change once the plugin file's location is shifted
        management_addr = f'{ui_protocol}://{new["management_ip"]}:{ui_port}/legacy/static/{plugin_file_name}'

        install_dict = {
            'port': new['port'],
            'fingerprint': fingerprint,
            'management_ip': management_addr,
            'ip': new['ip'],
            'password': new['password'],
            'username': new['username']
        }

        if action == 'INSTALL':

            if new['installed']:
                verrors.add(f'{schema_name}.action',
                            'Plugin is already installed')
            else:

                for r_key in ('management_ip', 'ip', 'password', 'port',
                              'username'):
                    if not new[r_key]:
                        verrors.add(
                            f'{schema_name}.{r_key}',
                            'This field is required to install the plugin')

                if verrors:
                    raise verrors

                try:
                    await self.middleware.run_in_thread(
                        self.__install_vcenter_plugin, install_dict)
                except ValidationError as e:
                    verrors.add_validation_error(e)
                else:
                    new['version'] = await self.middleware.run_in_thread(
                        self.get_plugin_version)
                    new['installed'] = True

        elif action == 'REPAIR':

            if not new['installed']:
                verrors.add(
                    f'{schema_name}.action',
                    'Plugin is not installed. Please install it first')
            else:

                # FROM MY UNDERSTANDING REPAIR IS CALLED WHEN THE DATABASE APPARENTLY TELLS THAT THE PLUGIN IS PRESENT
                # BUT THE SYSTEM FAILS TO RECOGNIZE THE PLUGIN EXTENSION

                try:
                    credential_dict = install_dict.copy()
                    credential_dict.pop('management_ip')
                    credential_dict.pop('fingerprint')

                    found_plugin = await self.middleware.run_in_thread(
                        self._find_plugin, credential_dict)
                    if found_plugin:
                        verrors.add(f'{schema_name}.action',
                                    'Plugin repair is not required')
                except ValidationError as e:
                    verrors.add_validation_error(e)
                else:

                    if verrors:
                        raise verrors

                    try:
                        repair_dict = install_dict.copy()
                        repair_dict['install_mode'] = 'REPAIR'
                        await self.middleware.run_in_thread(
                            self.__install_vcenter_plugin, repair_dict)
                    except ValidationError as e:
                        verrors.add_validation_error(e)

        elif action == 'UNINSTALL':

            if not new['installed']:
                verrors.add(f'{schema_name}.action',
                            'Plugin is not installed on the system')
            else:

                try:
                    uninstall_dict = install_dict.copy()
                    uninstall_dict.pop('management_ip')
                    uninstall_dict.pop('fingerprint')
                    await self.middleware.run_in_thread(
                        self.__uninstall_vcenter_plugin, uninstall_dict)
                except ValidationError as e:
                    verrors.add_validation_error(e)
                else:
                    new['installed'] = False
                    new['port'] = 443
                    for key in new:
                        # flushing existing object with empty values
                        if key not in ('installed', 'id', 'port'):
                            new[key] = ''

        else:

            if not new['installed']:
                verrors.add(f'{schema_name}.action', 'Plugin not installed')
            elif not (await self.is_update_available()):
                verrors.add(f'{schema_name}.action',
                            'No update is available for vCenter plugin')
            else:

                try:
                    await self.middleware.run_in_thread(
                        self.__upgrade_vcenter_plugin, install_dict)
                except ValidationError as e:
                    verrors.add_validation_error(e)
                else:
                    new['version'] = await self.middleware.run_in_thread(
                        self.get_plugin_version)

        if verrors:
            raise verrors

        new['password'] = await self.middleware.call('notifier.pwenc_encrypt',
                                                     new['password'])

        await self.middleware.call('datastore.update', self._config.datastore,
                                   new['id'], new,
                                   {'prefix': self._config.datastore_prefix})

        return await self.config()

    @private
    async def is_update_available(self):
        latest_version = await self.middleware.run_in_thread(
            self.get_plugin_version)
        current_version = (await self.config())['version']
        return latest_version if current_version and \
                                 parse_version(latest_version) > parse_version(current_version) else None

    @private
    async def plugin_root_path(self):
        return await self.middleware.call('notifier.gui_static_root')

    @private
    async def get_management_ip_choices(self):
        ip_list = await self.middleware.call('interfaces.ip_in_use',
                                             {'ipv4': True})

        return [ip_dict['address'] for ip_dict in ip_list]

    @private
    def get_plugin_file_name(self):
        # TODO: The path to the plugin should be moved over to middlewared from django
        root_path = self.middleware.call_sync('vcenter.plugin_root_path')
        return next(v for v in os.listdir(root_path)
                    if 'plugin' in v and '.zip' in v)

    @private
    def get_plugin_version(self):
        file_name = self.get_plugin_file_name()
        return file_name.split('_')[1]

    @private
    async def property_file_path(self):
        return os.path.join(
            (await self.middleware.call('notifier.gui_base_path')),
            'vcp/Extensionconfig.ini.dist')

    @private
    async def resource_folder_path(self):
        return os.path.join(
            (await self.middleware.call('notifier.gui_base_path')),
            'vcp/vcp_locales')

    @private
    def create_event_keyvalue_pairs(self):
        try:

            eri_list = []
            resource_folder_path = self.middleware.call_sync(
                'vcenter.resource_folder_path')
            for file in os.listdir(resource_folder_path):
                eri = vim.Extension.ResourceInfo()

                # Read locale file from vcp_locale
                eri.module = file.split("_")[0]
                with open(os.path.join(resource_folder_path, file),
                          'r') as file:
                    for line in file:
                        if len(line) > 2 and '=' in line:
                            if 'locale' in line:
                                eri.locale = line.split(
                                    '=')[1].lstrip().rstrip()
                            else:
                                prop = line.split('=')
                                key_val = vim.KeyValue()
                                key_val.key = prop[0].lstrip().rstrip()
                                key_val.value = prop[1].lstrip().rstrip()
                                eri.data.append(key_val)
                eri_list.append(eri)
            return eri_list
        except Exception as e:
            raise ValidationError('vcenter_update.create_event_keyvalue_pairs',
                                  f'Can not read locales : {e}')

    @private
    def get_extension_key(self):
        cp = configparser.ConfigParser()
        cp.read(self.middleware.call_sync('vcenter.property_file_path'))
        return cp.get('RegisterParam', 'key')

    @accepts(
        Dict(
            'install_vcenter_plugin',
            Int('port', required=True),
            Str('fingerprint', required=True),
            Str('management_ip', required=True),
            Str('install_mode',
                enum=['NEW', 'REPAIR'],
                required=False,
                default='NEW'),
            Str('ip', required=True),  # HOST IP
            Str('password', password=True,
                required=True),  # Password should be decrypted
            Str('username', required=True),
            register=True))
    def __install_vcenter_plugin(self, data):

        encrypted_password = self.middleware.call_sync(
            'notifier.pwenc_encrypt', data['password'])

        update_zipfile_dict = data.copy()
        update_zipfile_dict.pop('management_ip')
        update_zipfile_dict.pop('fingerprint')
        update_zipfile_dict['password'] = encrypted_password
        update_zipfile_dict['plugin_version_old'] = 'null'
        update_zipfile_dict['plugin_version_new'] = self.get_plugin_version()
        self.__update_plugin_zipfile(update_zipfile_dict)

        data.pop('install_mode')

        try:
            ext = self.get_extension(data['management_ip'],
                                     data['fingerprint'])

            data.pop('fingerprint')
            data.pop('management_ip')
            si = self.__check_credentials(data)

            si.RetrieveServiceContent().extensionManager.RegisterExtension(ext)

        except vim.fault.NoPermission:
            raise ValidationError(
                'vcenter_update.username',
                'VCenter user has no permission to install the plugin')

    @accepts(
        Patch('install_vcenter_plugin',
              'uninstall_vcenter_plugin', ('rm', {
                  'name': 'fingerprint'
              }), ('rm', {
                  'name': 'install_mode'
              }), ('rm', {
                  'name': 'management_ip'
              }),
              register=True))
    def __uninstall_vcenter_plugin(self, data):
        try:
            extkey = self.get_extension_key()

            si = self.__check_credentials(data)
            si.RetrieveServiceContent().extensionManager.UnregisterExtension(
                extkey)

        except vim.fault.NoPermission:
            raise ValidationError(
                'vcenter_update.username',
                'VCenter user does not have necessary permission to uninstall the plugin'
            )

    @accepts(
        Patch('install_vcenter_plugin', 'upgrade_vcenter_plugin',
              ('rm', {
                  'name': 'install_mode'
              })))
    def __upgrade_vcenter_plugin(self, data):

        update_zipfile_dict = data.copy()
        update_zipfile_dict.pop('management_ip')
        update_zipfile_dict.pop('fingerprint')
        update_zipfile_dict['install_mode'] = 'UPGRADE'
        update_zipfile_dict['password'] = self.middleware.call_sync(
            'notifier.pwenc_encrypt', data['password'])
        update_zipfile_dict['plugin_version_old'] = str(
            (self.middleware.call_sync('vcenter.config'))['version'])
        update_zipfile_dict['plugin_version_new'] = self.middleware.call_sync(
            'vcenter.get_plugin_version')
        self.__update_plugin_zipfile(update_zipfile_dict)

        try:
            ext = self.get_extension(data['management_ip'],
                                     data['fingerprint'])

            data.pop('fingerprint')
            data.pop('management_ip')
            si = self.__check_credentials(data)

            si.RetrieveServiceContent().extensionManager.UpdateExtension(ext)

        except vim.fault.NoPermission:
            raise ValidationError(
                'vcenter_update.username',
                'VCenter user has no permission to upgrade the plugin')

    @accepts(Ref('uninstall_vcenter_plugin'))
    def _find_plugin(self, data):
        try:
            si = self.__check_credentials(data)

            extkey = self.get_extension_key()
            ext = si.RetrieveServiceContent().extensionManager.FindExtension(
                extkey)

            if ext is None:
                return False
            else:
                return f'TrueNAS System : {ext.client[0].url.split("/")[2]}'
        except vim.fault.NoPermission:
            raise ValidationError(
                'vcenter_update.username',
                'VCenter user has no permission to find the plugin on this system'
            )

    @accepts(Ref('uninstall_vcenter_plugin'))
    def __check_credentials(self, data):
        try:
            si = SmartConnect("https",
                              data['ip'],
                              data['port'],
                              data['username'],
                              data['password'],
                              sslContext=get_context_object())

            if si:
                return si

        except (socket.gaierror, TimeoutError):
            raise ValidationError(
                'vcenter_update.ip',
                'Provided vCenter Hostname/IP or port are not valid')
        except vim.fault.InvalidLogin:
            raise ValidationError(
                'vcenter_update.username',
                'Provided vCenter credentials are not valid ( username or password )'
            )
        except vim.fault.NoPermission:
            raise ValidationError(
                'vcenter_update.username',
                'vCenter user does not have permission to perform this operation'
            )
        except Exception as e:

            if 'not a vim server' in str(e).lower():
                # In case an IP is provided for a server which is not a VIM server - then Exception is raised with
                # following text
                # Exception: 10.XX.XX.XX:443 is not a VIM server

                raise ValidationError(
                    'vcenter_update.ip',
                    'Provided Hostname/IP is not a VIM server')

            else:
                raise e

    @private
    def get_extension(self, vcp_url, fingerprint):
        try:
            cp = configparser.ConfigParser()
            cp.read(self.middleware.call_sync('vcenter.property_file_path'))
            version = self.middleware.call_sync('vcenter.get_plugin_version')

            description = vim.Description()
            description.label = cp.get('RegisterParam', 'label')
            description.summary = cp.get('RegisterParam', 'description')

            ext = vim.Extension()
            ext.company = cp.get('RegisterParam', 'company')
            ext.version = version
            ext.key = cp.get('RegisterParam', 'key')
            ext.description = description
            ext.lastHeartbeatTime = datetime.now()

            server_info = vim.Extension.ServerInfo()
            server_info.serverThumbprint = fingerprint
            server_info.type = vcp_url.split(':')[0].upper()  # sysgui protocol
            server_info.url = vcp_url
            server_info.description = description
            server_info.company = cp.get('RegisterParam', 'company')
            server_info.adminEmail = ['ADMIN EMAIL']
            ext.server = [server_info]

            client = vim.Extension.ClientInfo()
            client.url = vcp_url
            client.company = cp.get('RegisterParam', 'company')
            client.version = version
            client.description = description
            client.type = "vsphere-client-serenity"
            ext.client = [client]

            event_info = []
            for e in cp.get('RegisterParam', 'events').split(","):
                ext_event_type_info = vim.Extension.EventTypeInfo()
                ext_event_type_info.eventID = e
                event_info.append(ext_event_type_info)

            task_info = []
            for t in cp.get('RegisterParam', 'tasks').split(","):
                ext_type_info = vim.Extension.TaskTypeInfo()
                ext_type_info.taskID = t
                task_info.append(ext_type_info)

            # Register custom privileges required for vcp RBAC
            priv_info = []
            for priv in cp.get('RegisterParam', 'auth').split(","):
                ext_type_info = vim.Extension.PrivilegeInfo()
                ext_type_info.privID = priv
                ext_type_info.privGroupName = self.PRIVATE_GROUP_NAME
                priv_info.append(ext_type_info)

            ext.taskList = task_info
            ext.eventList = event_info
            ext.privilegeList = priv_info

            resource_list = self.create_event_keyvalue_pairs()
            ext.resourceList = resource_list

            return ext
        except configparser.NoOptionError as e:
            raise ValidationError('vcenter_update.get_extension',
                                  f'Property Missing : {e}')

    @private
    def extract_zip(self, src_path, dest_path):
        if not os.path.exists(dest_path):
            os.makedirs(dest_path)
        with zipfile.ZipFile(src_path) as zip_f:
            zip_f.extractall(dest_path)

    @private
    def zipdir(self, src_path, dest_path):

        assert os.path.isdir(src_path)
        with closing(zipfile.ZipFile(dest_path, "w")) as z:

            for root, dirs, files in os.walk(src_path):
                for fn in files:
                    absfn = os.path.join(root, fn)
                    zfn = absfn[len(src_path) + len(os.sep):]
                    z.write(absfn, zfn)

    @private
    def remove_directory(self, dest_path):
        if os.path.exists(dest_path):
            shutil.rmtree(dest_path)

    @accepts(
        Dict(
            'update_vcp_plugin_zipfile',
            Int('port', required=True),
            Str('ip', required=True, validators=[IpAddress()]),
            Str('install_mode',
                enum=['NEW', 'REPAIR', 'UPGRADE'],
                required=True),
            Str('plugin_version_old', required=True),
            Str('plugin_version_new', required=True),
            Str('password', required=True,
                password=True),  # should be encrypted
            Str('username', required=True),
            register=True))
    def __update_plugin_zipfile(self, data):
        file_name = self.middleware.call_sync('vcenter.get_plugin_file_name')
        plugin_root_path = self.middleware.call_sync(
            'vcenter.plugin_root_path')

        self.extract_zip(os.path.join(plugin_root_path, file_name),
                         os.path.join(plugin_root_path, 'plugin'))
        self.extract_zip(
            os.path.join(plugin_root_path,
                         'plugin/plugins/ixsystems-vcp-service.jar'),
            os.path.join(plugin_root_path,
                         'plugin/plugins/ixsystems-vcp-service'))

        data['fpath'] = os.path.join(
            plugin_root_path,
            'plugin/plugins/ixsystems-vcp-service/META-INF/config/install.properties'
        )

        self.__create_property_file(data)
        self.zipdir(
            os.path.join(plugin_root_path,
                         'plugin/plugins/ixsystems-vcp-service'),
            os.path.join(plugin_root_path,
                         'plugin/plugins/ixsystems-vcp-service.jar'))
        self.remove_directory(
            os.path.join(plugin_root_path,
                         'plugin/plugins/ixsystems-vcp-service'))

        shutil.make_archive(os.path.join(plugin_root_path, file_name[0:-4]),
                            'zip', os.path.join(plugin_root_path, 'plugin'))

        self.remove_directory(os.path.join(plugin_root_path, 'plugin'))

    @accepts(
        Patch(
            'update_vcp_plugin_zipfile',
            '__create_property_file',
            ('add', {
                'name': 'fpath',
                'type': 'str'
            }),
        ))
    def __create_property_file(self, data):
        # Password encrypted using notifier.pwenc_encrypt

        config = configparser.ConfigParser()
        with open(data['fpath'], 'w') as config_file:
            config.add_section('installation_parameter')
            config.set('installation_parameter', 'ip', data['ip'])
            config.set('installation_parameter', 'username', data['username'])
            config.set('installation_parameter', 'port', str(data['port']))
            config.set('installation_parameter', 'password', data['password'])
            config.set('installation_parameter', 'install_mode',
                       data['install_mode'])
            config.set('installation_parameter', 'plugin_version_old',
                       data['plugin_version_old'])
            config.set('installation_parameter', 'plugin_version_new',
                       data['plugin_version_new'])
            config.write(config_file)
Ejemplo n.º 26
0
class ReportingService(ConfigService):
    class Config:
        datastore = 'system.reporting'

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.__rrds = {}
        for name, klass in RRD_PLUGINS.items():
            self.__rrds[name] = klass(self.middleware)

    @accepts(
        Dict('reporting_update',
             Bool('cpu_in_percentage'),
             Str('graphite'),
             Int('graph_age', validators=[Range(min=1)]),
             Int('graph_points', validators=[Range(min=1)]),
             Bool('confirm_rrd_destroy'),
             update=True))
    async def do_update(self, data):
        """
        Configure Reporting Database settings.

        If `cpu_in_percentage` is `true`, collectd reports CPU usage in percentage instead of "jiffies".

        `graphite` specifies a destination hostname or IP for collectd data sent by the Graphite plugin..

        `graph_age` specifies the maximum age of stored graphs in months. `graph_points` is the number of points for
        each hourly, daily, weekly, etc. graph. Changing these requires destroying the current reporting database,
        so when these fields are changed, an additional `confirm_rrd_destroy: true` flag must be present.

        .. examples(websocket)::

          Update reporting settings

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "reporting.update",
                "params": [{
                    "cpu_in_percentage": false,
                    "graphite": "",
                }]
            }

          Recreate reporting database with new settings

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "reporting.update",
                "params": [{
                    "graph_age": 12,
                    "graph_points": 1200,
                    "confirm_rrd_destroy": true,
                }]
            }
        """

        confirm_rrd_destroy = data.pop('confirm_rrd_destroy', False)

        old = await self.config()

        new = copy.deepcopy(old)
        new.update(data)

        verrors = ValidationErrors()

        destroy_database = False
        for k in ['graph_age', 'graph_points']:
            if old[k] != new[k]:
                destroy_database = True

                if not confirm_rrd_destroy:
                    verrors.add(
                        f'reporting_update.{k}',
                        _('Changing this option requires destroying the reporting database. This action must be '
                          'confirmed by setting confirm_rrd_destroy flag'),
                    )

        if verrors:
            raise verrors

        await self.middleware.call('datastore.update', self._config.datastore,
                                   old['id'], new,
                                   {'prefix': self._config.datastore_prefix})

        if destroy_database:
            await self.middleware.call('service.stop', 'collectd')
            await self.middleware.call('service.stop', 'rrdcached')
            await run('sh',
                      '-c',
                      'rm -rfx /var/db/collectd/rrd/*',
                      check=False)
            await self.middleware.call('reporting.setup')
            await self.middleware.call('service.start', 'rrdcached')

        await self.middleware.call('service.restart', 'collectd')

        return await self.config()

    @private
    def setup(self):
        systemdatasetconfig = self.middleware.call_sync('systemdataset.config')
        if not systemdatasetconfig['path']:
            self.middleware.logger.error(f'System dataset is not mounted')
            return False

        rrd_mount = f'{systemdatasetconfig["path"]}/rrd-{systemdatasetconfig["uuid"]}'
        if not os.path.exists(rrd_mount):
            self.middleware.logger.error(
                f'{rrd_mount} does not exist or is not a directory')
            return False

        # Ensure that collectd working path is a symlink to system dataset
        pwd = '/var/db/collectd/rrd'
        if os.path.exists(pwd) and (not os.path.isdir(pwd)
                                    or not os.path.islink(pwd)):
            shutil.move(pwd, f'{pwd}.{time.strftime("%Y%m%d%H%M%S")}')
        if not os.path.exists(pwd):
            os.symlink(rrd_mount, pwd)

        # Migrate legacy RAMDisk
        persist_file = '/data/rrd_dir.tar.bz2'
        if os.path.isfile(persist_file):
            with tarfile.open(persist_file) as tar:
                if 'collectd/rrd' in tar.getnames():
                    tar.extractall(pwd, get_members(tar, 'collectd/rrd/'))

            os.unlink('/data/rrd_dir.tar.bz2')

        hostname = self.middleware.call_sync('system.info')['hostname']
        if not hostname:
            hostname = self.middleware.call_sync(
                'network.configuration.config')['hostname_local']

        # Migrate from old version, where `hostname` was a real directory and `localhost` was a symlink.
        # Skip the case where `hostname` is "localhost", so symlink was not (and is not) needed.
        if (hostname != 'localhost'
                and os.path.isdir(os.path.join(pwd, hostname))
                and not os.path.islink(os.path.join(pwd, hostname))):
            if os.path.exists(os.path.join(pwd, 'localhost')):
                if os.path.islink(os.path.join(pwd, 'localhost')):
                    os.unlink(os.path.join(pwd, 'localhost'))
                else:
                    # This should not happen, but just in case
                    shutil.move(
                        os.path.join(pwd, 'localhost'),
                        os.path.join(
                            pwd,
                            f'localhost.bak.{time.strftime("%Y%m%d%H%M%S")}'))
            shutil.move(os.path.join(pwd, hostname),
                        os.path.join(pwd, 'localhost'))

        # Remove all directories except "localhost" and its backups (that may be erroneously created by
        # running collectd before this script)
        to_remove_dirs = [
            os.path.join(pwd, d) for d in os.listdir(pwd)
            if not d.startswith('localhost')
            and os.path.isdir(os.path.join(pwd, d))
        ]
        for r_dir in to_remove_dirs:
            subprocess.run(['rm', '-rfx', r_dir])

        # Remove all symlinks (that are stale if hostname was changed).
        to_remove_symlinks = [
            os.path.join(pwd, l) for l in os.listdir(pwd)
            if os.path.islink(os.path.join(pwd, l))
        ]
        for r_symlink in to_remove_symlinks:
            os.unlink(r_symlink)

        # Create "localhost" directory if it does not exist
        if not os.path.exists(os.path.join(pwd, 'localhost')):
            os.makedirs(os.path.join(pwd, 'localhost'))

        # Create "${hostname}" -> "localhost" symlink if necessary
        if hostname != 'localhost':
            os.symlink(os.path.join(pwd, 'localhost'),
                       os.path.join(pwd, hostname))

        # Let's return a positive value to indicate that necessary collectd operations were performed successfully
        return True

    @filterable
    def graphs(self, filters, options):
        return filter_list(
            [i.__getstate__() for i in self.__rrds.values() if i.has_data()],
            filters, options)

    def __rquery_to_start_end(self, query):
        unit = query.get('unit')
        if unit:
            verrors = ValidationErrors()
            for i in ('start', 'end'):
                if i in query:
                    verrors.add(
                        f'reporting_query.{i}',
                        f'{i!r} should only be used if "unit" attribute is not provided.',
                    )
            verrors.check()
        else:
            if 'start' not in query:
                unit = 'HOURLY'
            else:
                starttime = query['start']
                endtime = query.get('end') or 'now'

        if unit:
            unit = unit[0].lower()
            page = query['page']
            starttime = f'end-{page + 1}{unit}'
            if not page:
                endtime = 'now'
            else:
                endtime = f'now-{page}{unit}'
        return starttime, endtime

    @accepts(
        List('graphs',
             items=[
                 Dict(
                     'graph',
                     Str('name', required=True),
                     Str('identifier', default=None, null=True),
                 ),
             ],
             empty=False),
        Dict(
            'reporting_query',
            Str('unit', enum=['HOUR', 'DAY', 'WEEK', 'MONTH', 'YEAR']),
            Int('page', default=0),
            Str('start', empty=False),
            Str('end', empty=False),
            Bool('aggregate', default=True),
            register=True,
        ))
    def get_data(self, graphs, query):
        """
        Get reporting data for given graphs.

        List of possible graphs can be retrieved using `reporting.graphs` call.

        For the time period of the graph either `unit` and `page` OR `start` and `end` should be
        used, not both.

        `aggregate` will return aggregate available data for each graph (e.g. min, max, mean).

        .. examples(websocket)::

          Get graph data of "nfsstat" from the last hour.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "reporting.get_data",
                "params": [
                    [{"name": "nfsstat"}],
                    {"unit": "HOURLY"},
                ]
            }

        """
        starttime, endtime = self.__rquery_to_start_end(query)
        rv = []
        for i in graphs:
            try:
                rrd = self.__rrds[i['name']]
            except KeyError:
                raise CallError(f'Graph {i["name"]!r} not found.',
                                errno.ENOENT)
            rv.append(
                rrd.export(i['identifier'],
                           starttime,
                           endtime,
                           aggregate=query['aggregate']))
        return rv

    @private
    @accepts(Ref('reporting_query'))
    def get_all(self, query):
        starttime, endtime = self.__rquery_to_start_end(query)
        rv = []
        for rrd in self.__rrds.values():
            idents = rrd.get_identifiers()
            if idents is None:
                idents = [None]
            for ident in idents:
                rv.append(
                    rrd.export(ident,
                               starttime,
                               endtime,
                               aggregate=query['aggregate']))
        return rv
Ejemplo n.º 27
0
def filterable(fn):
    fn._filterable = True
    return accepts(Ref('query-filters'), Ref('query-options'))(fn)
Ejemplo n.º 28
0
class DatastoreService(Service, FilterMixin, SchemaMixin):

    class Config:
        private = True

    @accepts(
        Str('name'),
        List('query-filters', register=True),
        Dict(
            'query-options',
            Bool('relationships', default=True),
            Str('extend', default=None, null=True),
            Str('extend_context', default=None, null=True),
            Str('prefix', default=None, null=True),
            Dict('extra', additional_attrs=True),
            List('order_by'),
            List('select'),
            Bool('count', default=False),
            Bool('get', default=False),
            Int('offset', default=0),
            Int('limit', default=0),
            Bool('force_sql_filters', default=False),
            register=True,
        ),
    )
    async def query(self, name, filters, options):
        """
        Query for items in a given collection `name`.

        `filters` is a list which each entry can be in one of the following formats:

            entry: simple_filter | conjuntion
            simple_filter: '[' attribute_name, OPERATOR, value ']'
            conjunction: '[' CONJUNCTION, '[' simple_filter (',' simple_filter)* ']]'

            OPERATOR: ('=' | '!=' | '>' | '>=' | '<' | '<=' | '~' | 'in' | 'nin')
            CONJUNCTION: 'OR'

        e.g.

        `['OR', [ ['username', '=', 'root' ], ['uid', '=', 0] ] ]`

        `[ ['username', '=', 'root' ] ]`

        .. examples(websocket)::

          Querying for username "root" and returning a single item:

            :::javascript
            {
              "id": "d51da71b-bb48-4b8b-a8f7-6046fcc892b4",
              "msg": "method",
              "method": "datastore.query",
              "params": ["account.bsdusers", [ ["username", "=", "root" ] ], {"get": true}]
            }
        """
        table = self._get_table(name)

        # We do not want to make changes to original options
        # which might happen with "prefix"
        options = options.copy()

        aliases = {}
        if options['count']:
            qs = select([func.count(self._get_pk(table))])
        else:
            columns = list(table.c)
            from_ = table
            if options['relationships']:
                aliases = self._get_queryset_joins(table)
                for foreign_key, alias in aliases.items():
                    columns.extend(list(alias.c))
                    from_ = from_.outerjoin(alias, alias.c[foreign_key.column.name] == foreign_key.parent)

            qs = select(columns).select_from(from_)

        prefix = options['prefix']

        if filters:
            qs = qs.where(and_(*self._filters_to_queryset(filters, table, prefix, aliases)))

        if options['count']:
            return (await self.middleware.call("datastore.fetchall", qs))[0][0]

        order_by = options['order_by']
        if order_by:
            # Do not change original order_by
            order_by = order_by[:]
            for i, order in enumerate(order_by):
                if order.startswith('nulls_first:'):
                    wrapper = nullsfirst
                    order = order[len('nulls_first:'):]
                elif order.startswith('nulls_last:'):
                    wrapper = nullslast
                    order = order[len('nulls_last:'):]
                else:
                    wrapper = lambda x: x  # noqa

                if order.startswith('-'):
                    order_by[i] = self._get_col(table, order[1:], prefix).desc()
                else:
                    order_by[i] = self._get_col(table, order, prefix)

                order_by[i] = wrapper(order_by[i])

            # FIXME: remove this after switching to SQLite 3.30
            changed = True
            while changed:
                changed = False
                for i, v in enumerate(order_by):
                    if isinstance(v, UnaryExpression) and v.modifier in (nullsfirst_op, nullslast_op):
                        if isinstance(v.element, UnaryExpression) and v.element.modifier == desc_op:
                            root_element = v.element.element
                        else:
                            root_element = v.element

                        order_by = order_by[:i] + [
                            {
                                nullsfirst_op: root_element != None,  # noqa
                                nullslast_op: root_element == None,  # noqa
                            }[v.modifier],
                            v.element,
                        ] + order_by[i + 1:]
                        changed = True
                        break

            qs = qs.order_by(*order_by)

        if options['offset']:
            qs = qs.offset(options['offset'])

        if options['limit']:
            qs = qs.limit(options['limit'])

        result = await self.middleware.call("datastore.fetchall", qs)

        relationships = [{} for row in result]
        if options['relationships']:
            # This will only fetch many-to-many relationships for primary table, not for joins, but that's enough
            relationships = await self._fetch_many_to_many(table, result)

        result = await self._queryset_serialize(
            result,
            table, aliases, relationships, options['extend'], options['extend_context'], options['prefix'],
            options['select'], options['extra'],
        )

        if options['get']:
            try:
                return result[0]
            except IndexError:
                raise MatchNotFound() from None

        return result

    @accepts(Str('name'), Ref('query-options'))
    async def config(self, name, options):
        """
        Get configuration settings object for a given `name`.

        This is a shortcut for `query(name, {"get": true})`.
        """
        options['get'] = True
        return await self.query(name, [], options)

    def _get_queryset_joins(self, table):
        result = {}
        for column in table.c:
            if column.foreign_keys:
                if len(column.foreign_keys) > 1:
                    raise RuntimeError('Multiple foreign keys are not supported')

                foreign_key = list(column.foreign_keys)[0]
                alias = foreign_key.column.table.alias(foreign_key.name)

                result[foreign_key] = alias
                if foreign_key.column.table != (table.original if isinstance(table, Alias) else table):
                    result.update(self._get_queryset_joins(alias))

        return result

    async def _queryset_serialize(
        self, qs, table, aliases, relationships, extend, extend_context, field_prefix, select, extra_options,
    ):
        rows = []
        for i, row in enumerate(qs):
            rows.append(self._serialize(row, table, aliases, relationships[i], field_prefix))

        if extend_context:
            extend_context_value = await self.middleware.call(extend_context, rows, extra_options)
        else:
            extend_context_value = None

        return [
            await self._extend(data, extend, extend_context, extend_context_value, select)
            for data in rows
        ]

    def _serialize(self, obj, table, aliases, relationships, field_prefix):
        data = self._serialize_row(obj, table, aliases)
        data.update(relationships)

        return {self._strip_prefix(k, field_prefix): v for k, v in data.items()}

    async def _extend(self, data, extend, extend_context, extend_context_value, select):
        if extend:
            if extend_context:
                data = await self.middleware.call(extend, data, extend_context_value)
            else:
                data = await self.middleware.call(extend, data)

        if not select:
            return data
        else:
            return {k: v for k, v in data.items() if k in select}

    def _strip_prefix(self, k, field_prefix):
        return k[len(field_prefix):] if field_prefix and k.startswith(field_prefix) else k

    def _serialize_row(self, obj, table, aliases):
        data = {}

        for column in table.c:
            # aliases == {} when we are loading without relationships, let's leave fk values in that case
            if not column.foreign_keys or not aliases:
                data[str(column.name)] = obj[column]

        for foreign_key, alias in aliases.items():
            column = foreign_key.parent

            if column.table != table:
                continue

            if not column.name.endswith('_id'):
                raise RuntimeError('Foreign key column must end with _id')

            data[column.name[:-3]] = (
                self._serialize_row(obj, alias, aliases)
                if obj[column] is not None and obj[self._get_pk(alias)] is not None
                else None
            )

        return data

    async def _fetch_many_to_many(self, table, rows):
        pk = self._get_pk(table)
        pk_values = [row[pk] for row in rows]

        relationships = [{} for row in rows]
        if pk_values:
            for relationship_name, relationship in self._get_relationships(table).items():
                # We can only join by single primary key
                assert len(relationship.synchronize_pairs) == 1
                assert len(relationship.secondary_synchronize_pairs) == 1

                local_pk, relationship_local_pk = relationship.synchronize_pairs[0]
                remote_pk, relationship_remote_pk = relationship.secondary_synchronize_pairs[0]

                assert local_pk == pk

                all_children_ids = set()
                pk_to_children_ids = defaultdict(set)
                for connection in await self.query(
                    relationship.secondary.name.replace('_', '.', 1),
                    [[relationship_local_pk.name, 'in', pk_values]],
                    {'relationships': False}
                ):
                    child_id = connection[relationship_remote_pk.name]

                    all_children_ids.add(child_id)
                    pk_to_children_ids[connection[relationship_local_pk.name]].add(child_id)

                all_children = {}
                if all_children_ids:
                    for child in await self.query(
                        relationship.target.name.replace('_', '.', 1),
                        [[remote_pk.name, 'in', all_children_ids]],
                    ):
                        all_children[child[remote_pk.name]] = child

                for i, row in enumerate(rows):
                    relationships[i][relationship_name] = [
                        all_children[child_id]
                        for child_id in pk_to_children_ids[row[pk]]
                        if child_id in all_children
                    ]

        return relationships
Ejemplo n.º 29
0
class DSCache(Service):
    class Config:
        private = True

    @accepts(
        Str('directory_service',
            required=True,
            enum=["ACTIVEDIRECTORY", "LDAP"]),
        Str('idtype', enum=['USER', 'GROUP'], required=True),
        Dict('cache_entry', additional_attrs=True),
    )
    async def insert(self, ds, idtype, entry):
        if idtype == "GROUP":
            id_key = "gid"
            name_key = "name"
        else:
            id_key = "uid"
            name_key = "username"

        ops = [{
            "action": "SET",
            "key": f'ID_{entry[id_key]}',
            "val": entry
        }, {
            "action": "SET",
            "key": f'NAME_{entry[name_key]}',
            "val": entry
        }]
        await self.middleware.call('tdb.batch_ops', {
            "name": f'{ds.lower()}_{idtype.lower()}',
            "ops": ops
        })
        return True

    @accepts(
        Str('directory_service',
            required=True,
            enum=["ACTIVEDIRECTORY", "LDAP"]),
        Dict(
            'principal_info',
            Str('idtype', enum=['USER', 'GROUP']),
            Str('who'),
            Int('id'),
        ), Dict('options', Bool('synthesize', default=False)))
    async def retrieve(self, ds, data, options):
        who_str = data.get('who')
        who_id = data.get('id')
        if who_str is None and who_id is None:
            raise CallError("`who` or `id` entry is required to uniquely "
                            "identify the entry to be retrieved.")

        tdb_name = f'{ds.lower()}_{data["idtype"].lower()}'
        prefix = "NAME" if who_str else "ID"
        tdb_key = f'{prefix}_{who_str if who_str else who_id}'

        try:
            entry = await self.middleware.call("tdb.fetch", {
                "name": tdb_name,
                "key": tdb_key
            })
        except MatchNotFound:
            entry = None

        if not entry and options['synthesize']:
            """
            if cache lacks entry, create one from passwd / grp info,
            insert into cache and return synthesized value.
            get_uncached_* will raise KeyError if NSS lookup fails.
            """
            try:
                if data['idtype'] == 'USER':
                    pwdobj = await self.middleware.call(
                        'dscache.get_uncached_user', who_str, who_id)
                    entry = await self.middleware.call('idmap.synthetic_user',
                                                       ds.lower(), pwdobj)
                else:
                    grpobj = await self.middleware.call(
                        'dscache.get_uncached_group', who_str, who_id)
                    entry = await self.middleware.call('idmap.synthetic_group',
                                                       ds.lower(), grpobj)
                await self.insert(ds, data['idtype'], entry)
            except KeyError:
                entry = None

        elif not entry:
            raise KeyError(who_str if who_str else who_id)

        return entry

    @accepts(
        Str('ds', required=True, enum=["ACTIVEDIRECTORY", "LDAP"]),
        Str('idtype', required=True, enum=["USER", "GROUP"]),
    )
    async def entries(self, ds, idtype):
        entries = await self.middleware.call(
            'tdb.entries', {
                'name': f'{ds.lower()}_{idtype.lower()}',
                'query-filters': [('key', '^', 'ID')]
            })
        return [x['val'] for x in entries]

    def get_uncached_user(self, username=None, uid=None):
        """
        Returns dictionary containing pwd_struct data for
        the specified user or uid. Will raise an exception
        if the user does not exist. This method is appropriate
        for user validation.
        """
        if username:
            u = pwd.getpwnam(username)
        elif uid is not None:
            u = pwd.getpwuid(uid)
        else:
            return {}
        return {
            'pw_name': u.pw_name,
            'pw_uid': u.pw_uid,
            'pw_gid': u.pw_gid,
            'pw_gecos': u.pw_gecos,
            'pw_dir': u.pw_dir,
            'pw_shell': u.pw_shell
        }

    def get_uncached_group(self, groupname=None, gid=None):
        """
        Returns dictionary containing grp_struct data for
        the specified group or gid. Will raise an exception
        if the group does not exist. This method is appropriate
        for group validation.
        """
        if groupname:
            g = grp.getgrnam(groupname)
        elif gid is not None:
            g = grp.getgrgid(gid)
        else:
            return {}
        return {'gr_name': g.gr_name, 'gr_gid': g.gr_gid, 'gr_mem': g.gr_mem}

    @accepts(
        Str('objtype', enum=['USERS', 'GROUPS'], default='USERS'),
        Ref('query-filters'),
        Ref('query-options'),
    )
    async def query(self, objtype, filters, options):
        """
        Query User / Group cache with `query-filters` and `query-options`.

        `objtype`: 'USERS' or 'GROUPS'
        """
        res = []
        ds_state = await self.middleware.call('directoryservices.get_state')
        enabled_ds = None

        is_name_check = bool(filters and len(filters) == 1
                             and filters[0][0] in ['username', 'name'])
        is_id_check = bool(filters and len(filters) == 1
                           and filters[0][0] in ['uid', 'gid'])

        res.extend((await self.middleware.call(f'{objtype.lower()[:-1]}.query',
                                               filters, options)))

        for dstype, state in ds_state.items():
            if state != 'DISABLED':
                enabled_ds = dstype
                break

        if not enabled_ds:
            return res

        if is_name_check and filters[0][1] == '=':
            # exists in local sqlite database, return results
            if res:
                return res

            entry = await self.retrieve(enabled_ds.upper(), {
                'idtype': objtype[:-1],
                'who': filters[0][2],
            }, {'synthensize': True})
            return [entry] if entry else []

        if is_id_check and filters[0][1] == '=':
            # exists in local sqlite database, return results
            if res:
                return res

            entry = await self.retrieve(enabled_ds.upper(), {
                'idtype': objtype[:-1],
                'id': filters[0][2],
            }, {'synthesize': True})
            return [entry] if entry else []

        entries = await self.entries(enabled_ds.upper(), objtype[:-1])
        entries_by_id = sorted(entries, key=lambda i: i['id'])
        res.extend(filter_list(entries_by_id, filters, options))
        return res

    @job(lock="dscache_refresh")
    async def refresh(self, job):
        """
        This is called from a cronjob every 24 hours and when a user clicks on the
        UI button to 'rebuild directory service cache'.
        """
        for ds in ['activedirectory', 'ldap']:
            await self.middleware.call('tdb.wipe', {'name': f'{ds}_user'})
            await self.middleware.call('tdb.wipe', {'name': f'{ds}_group'})

            ds_state = await self.middleware.call(f'{ds}.get_state')

            if ds_state == 'HEALTHY':
                await job.wrap(await
                               self.middleware.call(f'{ds}.fill_cache', True))
            elif ds_state != 'DISABLED':
                self.logger.debug(
                    'Unable to refresh [%s] cache, state is: %s' %
                    (ds, ds_state))
Ejemplo n.º 30
0
class TDBService(Service, TDBMixin, SchemaMixin):

    handles = {}

    class Config:
        private = True

    @private
    def validate_tdb_options(self, name, options):
        if options['service_version']['major'] > 0 or options[
                'service_version']['minor'] > 0:
            if options['tdb_type'] == 'BASIC':
                raise CallError(
                    f"{name}: BASIC tdb types do not support versioning",
                    errno.EINVAL)

        if not options['cluster']:
            return

        healthy = self.middleware.call_sync('ctdb.general.healthy')
        if healthy:
            return

        raise CallError(f"{name}: ctdb must be enabled and healthy.",
                        errno.ENXIO)

    @private
    def _ctdb_get_dbid(self, name, options):
        dbmap = self.middleware.call_sync("ctdb.general.getdbmap",
                                          [("name", "=", f'{name}.tdb')])
        if dbmap:
            return dbmap[0]['dbid']

        cmd = ["ctdb", "attach", name, "persistent"]
        attach = run(cmd, check=False)
        if attach.returncode != 0:
            raise CallError("Failed to attach backend: %s",
                            attach.stderr.decode())

        dbmap = self.middleware.call_sync("ctdb.general.getdbmap",
                                          [("name", "=", f'{name}.tdb')])
        if not dbmap:
            raise CallError(f'{name}: failed to attach to database')

        return dbmap[0]['dbid']

    @private
    def get_connection(self, name, options):
        self.validate_tdb_options(name, options)

        existing = self.handles.get('name')

        if existing:
            if options != existing['options']:
                raise CallError(
                    f'{name}: Internal Error - tdb options mismatch',
                    errno.EINVAL)

            if existing['handle']:
                return existing['handle']

        else:
            self.handles[name] = {'name': name, 'options': options.copy()}

        if options['cluster']:
            dbid = self._ctdb_get_dbid(name, options)
            handle = self._get_handle(name, dbid, options)
            self.handles[name].update({'handle': handle})
        else:
            handle = self._get_handle(name, None, options)

        return handle

    @accepts(
        Dict(
            'tdb-store', Str('name', required=True), Str('key', required=True),
            Dict('value', required=True, additional_attrs=True),
            Dict('tdb-options',
                 Str('backend',
                     enum=['PERSISTENT', 'VOLATILE', 'CUSTOM'],
                     default='PERSISTENT'),
                 Str('tdb_type',
                     enum=['BASIC', 'CRUD', 'CONFIG'],
                     default='BASIC'),
                 Str('data_type',
                     enum=['JSON', 'STRING', 'BYTES'],
                     default='JSON'),
                 Bool('cluster', default=False),
                 Int('read_backoff', default=0),
                 Dict('service_version', Int('major', default=0),
                      Int('minor', default=0)),
                 register=True)))
    def store(self, data):
        handle = self.get_connection(data['name'], data['tdb-options'])

        if data['tdb-options']['data_type'] == 'JSON':
            tdb_val = json.dumps(data['value'])
        elif data['tdb-options']['data_type'] == 'STRING':
            tdb_val = data['value']['payload']
        elif data['tdb-options']['data_type'] == 'BYTES':
            tdb_val = b64decode(data['value']['payload'])

        with closing(handle) as tdb_handle:
            self._set(tdb_handle, data['key'], tdb_val)

    @accepts(
        Dict(
            'tdb-fetch',
            Str('name', required=True),
            Str('key', required=True),
            Ref('tdb-options'),
        ))
    def fetch(self, data):
        handle = self.get_connection(data['name'], data['tdb-options'])

        with closing(handle) as tdb_handle:
            tdb_val = self._get(tdb_handle, data['key'])

        if tdb_val is None:
            raise MatchNotFound(data['key'])

        if data['tdb-options']['data_type'] == 'JSON':
            data = json.loads(tdb_val)
        elif data['tdb-options']['data_type'] == 'STRING':
            data = tdb_val
        elif data['tdb-options']['data_type'] == 'BYTES':
            data = b64encode(tdb_val).decode()

        return data

    @accepts(
        Dict(
            'tdb-remove',
            Str('name', required=True),
            Str('key', required=True),
            Ref('tdb-options'),
        ))
    def remove(self, data):
        handle = self.get_connection(data['name'], data['tdb-options'])
        with closing(handle) as tdb_handle:
            self._rem(tdb_handle, data['key'])

    @accepts(
        Dict(
            Str('name', required=True),
            Ref('query-filters'),
            Ref('query-options'),
            Ref('tdb-options'),
        ))
    def entries(self, data):
        def append_entries(tdb_key, tdb_data, state):
            if tdb_data is None:
                return True

            if state['data_type'] == 'JSON':
                entry = json.loads(tdb_data)
            elif state['data_type'] == 'STRING':
                entry = tdb_data
            elif state['data_type'] == 'BYTES':
                entry = b64encode(tdb_data)

            state['output'].append({"key": tdb_key, "val": entry})
            return True

        state = {'output': [], 'data_type': data['tdb-options']['data_type']}
        handle = self.get_connection(data['name'], data['tdb-options'])

        with closing(handle) as tdb_handle:
            self._traverse(tdb_handle, append_entries, state)

        return filter_list(state['output'], data['query-filters'],
                           data['query-options'])

    @accepts(
        Dict(
            'tdb-batch-ops',
            Str('name', required=True),
            List('ops', required=True),
            Ref('tdb-options'),
        ))
    def batch_ops(self, data):
        handle = self.get_connection(data['name'], data['tdb-options'])

        with closing(handle) as tdb_handle:
            data = self._batch_ops(tdb_handle, data['ops'])

        return data

    @accepts(Dict(
        'tdb-wipe',
        Str('name', required=True),
        Ref('tdb-options'),
    ))
    def wipe(self, data):
        handle = self.get_connection(data['name'], data['tdb-options'])
        with closing(handle) as tdb_handle:
            self._wipe(tdb_handle)

    @accepts(
        Dict(
            'tdb-config-config',
            Str('name', required=True),
            Ref('tdb-options'),
        ))
    def config(self, data):
        handle = self.get_connection(data['name'], data['tdb-options'])
        with closing(handle) as tdb_handle:
            data = self._config_config(tdb_handle)

        return data

    @accepts(
        Dict(
            'tdb-config-update',
            Str('name', required=True),
            Dict('payload', additional_attrs=True),
            Ref('tdb-options'),
        ))
    def config_update(self, data):
        handle = self.get_connection(data['name'], data['tdb-options'])
        with closing(handle) as tdb_handle:
            self._config_update(tdb_handle, data['payload'])

        return

    @accepts(
        Dict(
            'tdb-crud-create',
            Str('name', required=True),
            Dict('payload', additional_attrs=True),
            Ref('tdb-options'),
        ))
    def create(self, data):
        handle = self.get_connection(data['name'], data['tdb-options'])
        with closing(handle) as tdb_handle:
            id = self._create(tdb_handle, data['payload'])

        return id

    @accepts(
        Dict(
            'tdb-crud-query',
            Str('name', required=True),
            Ref('query-filters'),
            Ref('query-options'),
            Ref('tdb-options'),
        ))
    def query(self, data):
        handle = self.get_connection(data['name'], data['tdb-options'])
        with closing(handle) as tdb_handle:
            data = self._query(tdb_handle, data['query-filters'],
                               data['query-options'])

        return data

    @accepts(
        Dict(
            'tdb-crud-update',
            Str('name', required=True),
            Int('id', required=True),
            Dict('payload', additional_attrs=True),
            Ref('tdb-options'),
        ))
    def update(self, data):
        handle = self.get_connection(data['name'], data['tdb-options'])
        with closing(handle) as tdb_handle:
            id = self._update(tdb_handle, data['id'], data['payload'])

        return id

    @accepts(
        Dict(
            'tdb-crud-delete',
            Str('name', required=True),
            Int('id', required=True),
            Ref('tdb-options'),
        ))
    def delete(self, data):
        handle = self.get_connection(data['name'], data['tdb-options'])
        with closing(handle) as tdb_handle:
            self._delete(tdb_handle, data['id'])

        return

    @accepts(
        Dict(
            'tdb-upgrade',
            Str('name', required=True),
            Ref('tdb-options'),
        ))
    def apply_upgrades(self, data):
        raise NotImplementedError

    @accepts()
    def show_handles(self):
        ret = {h['name']: h['options'] for h in self.handles.values()}
        return ret

    @private
    async def setup(self):
        for p in TDBPath:
            if p is TDBPath.CUSTOM:
                continue

            os.makedirs(p.value, mode=0o700, exist_ok=True)