Ejemplo n.º 1
0
class BootEnvService(CRUDService):
    class Config:
        datastore_primary_key_type = 'string'
        cli_namespace = 'system.bootenv'

    BE_TOOL = 'zectl' if osc.IS_LINUX else 'beadm'
    ENTRY = Dict('bootenv_entry',
                 Str('id'),
                 Str('realname'),
                 Str('name'),
                 Str('active'),
                 Bool('activated'),
                 Bool('can_activate'),
                 Str('mountpoint'),
                 Str('space'),
                 Datetime('created'),
                 Bool('keep'),
                 Int('rawspace'),
                 additional_attrs=True)

    @filterable
    def query(self, filters, options):
        """
        Query all Boot Environments with `query-filters` and `query-options`.
        """
        results = []

        cp = subprocess.run([self.BE_TOOL, 'list', '-H'],
                            capture_output=True,
                            text=True)
        datasets_origins = [
            d['properties']['origin']['parsed']
            for d in self.middleware.call_sync(
                'zfs.dataset.query', [], {'extra': {
                    'properties': ['origin']
                }})
        ]
        boot_pool = self.middleware.call_sync('boot.pool_name')
        for line in cp.stdout.strip().split('\n'):
            fields = line.split('\t')
            name = fields[0]
            if len(fields) > 5 and fields[5] != '-':
                name = fields[5]
            be = {
                'id':
                name,
                'realname':
                fields[0],
                'name':
                name,
                'active':
                fields[1],
                'activated':
                'n' in fields[1].lower(),
                'can_activate':
                False,
                'mountpoint':
                fields[2],
                'space':
                None if osc.IS_LINUX else fields[3],
                'created':
                datetime.strptime(fields[3 if osc.IS_LINUX else 4],
                                  '%Y-%m-%d %H:%M'),
                'keep':
                False,
                'rawspace':
                None
            }

            ds = self.middleware.call_sync('zfs.dataset.query', [
                ('id', '=', rf'{boot_pool}/ROOT/{fields[0]}'),
            ], {'extra': {
                'snapshots': True
            }})
            if ds:
                ds = ds[0]
                snapshot = None
                origin = ds['properties']['origin']['parsed']
                if '@' in origin:
                    snapshot = self.middleware.call_sync(
                        'zfs.snapshot.query', [('id', '=', origin)])
                    if snapshot:
                        snapshot = snapshot[0]
                if f'{self.BE_TOOL}:keep' in ds['properties']:
                    if ds['properties'][f'{self.BE_TOOL}:keep'][
                            'value'] == 'True':
                        be['keep'] = True
                    elif ds['properties'][f'{self.BE_TOOL}:keep'][
                            'value'] == 'False':
                        be['keep'] = False

                # When a BE is deleted, following actions happen
                # 1) It's descendants ( if any ) are promoted once
                # 2) BE is deleted
                # 3) Filesystems dependent on BE's origin are promoted
                # 4) Origin is deleted
                #
                # Now we would like to find out the space which will be freed when a BE is removed.
                # We classify a BE as of being 2 types,
                # 1) BE without descendants
                # 2) BE with descendants
                #
                # For (1), space freed is "usedbydataset" property and space freed by it's "origin".
                # For (2), space freed is "usedbydataset" property and space freed by it's "origin" but this cannot
                # actively determined because all the descendants are promoted once for this BE and at the end origin
                # of current BE would be determined by last descendant promoted. So we ignore this for now and rely
                # only on the space it is currently consuming as a best effort to predict.
                # There is also "usedbysnaps" property, for that we will retrieve all snapshots of the dataset,
                # find if any of them do not have a dataset cloned, that space will also be freed when we delete
                # this dataset. And we will also factor in the space consumed by children.

                be['rawspace'] = ds['properties']['usedbydataset'][
                    'parsed'] + ds['properties']['usedbychildren']['parsed']

                children = False
                for snap in ds['snapshots']:
                    if snap['name'] not in datasets_origins:
                        be['rawspace'] += self.middleware.call_sync(
                            'zfs.snapshot.query', [['id', '=', snap['name']]],
                            {'extra': {
                                'properties': ['used']
                            }})['properties']['used']['parsed']
                    else:
                        children = True

                if snapshot and not children:
                    # This indicates the current BE is a leaf and it is safe to add the BE's origin
                    # space to the space freed when it is deleted.
                    be['rawspace'] += snapshot['properties']['used']['parsed']

                if be['rawspace'] < 1024:
                    be['space'] = f'{be["rawspace"]}B'
                elif 1024 <= be['rawspace'] < 1048576:
                    be['space'] = f'{be["rawspace"] / 1024}K'
                elif 1048576 <= be['rawspace'] < 1073741824:
                    be['space'] = f'{be["rawspace"] / 1048576}M'
                elif 1073741824 <= be['rawspace'] < 1099511627776:
                    be['space'] = f'{be["rawspace"] / 1073741824}G'
                elif 1099511627776 <= be['rawspace'] < 1125899906842624:
                    be['space'] = f'{be["rawspace"] / 1099511627776}T'
                elif 1125899906842624 <= be['rawspace'] < 1152921504606846976:
                    be['space'] = f'{be["rawspace"] / 1125899906842624}P'
                elif 1152921504606846976 <= be[
                        'rawspace'] < 1152921504606846976:
                    be['space'] = f'{be["rawspace"] / 1152921504606846976}E'
                else:
                    be['space'] = f'{be["rawspace"] / 1152921504606846976}Z'

                be['space'] = f'{round(float(be["space"][:-1]), 2)}{be["space"][-1]}'

                if osc.IS_FREEBSD:
                    be['can_activate'] = 'truenas:kernel_version' not in ds[
                        'properties']
                if osc.IS_LINUX:
                    be['can_activate'] = ('truenas:kernel_version'
                                          in ds['properties']
                                          or 'truenas:12' in ds['properties'])

            results.append(be)
        return filter_list(results, filters, options)

    @item_method
    @accepts(Str('id'))
    @returns(Bool('successfully_activated'))
    def activate(self, oid):
        """
        Activates boot environment `id`.
        """
        be = self.middleware.call_sync('bootenv.query', [['id', '=', oid]],
                                       {'get': True})
        if not be['can_activate']:
            raise CallError('This BE cannot be activated')

        try:
            subprocess.run([self.BE_TOOL, 'activate', oid],
                           capture_output=True,
                           text=True,
                           check=True)
        except subprocess.CalledProcessError as cpe:
            raise CallError(f'Failed to activate BE: {cpe.stdout.strip()}')
        else:
            return True

    @item_method
    @accepts(Str('id'), Dict(
        'attributes',
        Bool('keep', default=False),
    ))
    @returns(Bool('successfully_set_attribute'))
    async def set_attribute(self, oid, attrs):
        """
        Sets attributes boot environment `id`.

        Currently only `keep` attribute is allowed.
        """
        boot_pool = await self.middleware.call('boot.pool_name')
        boot_env = await self.get_instance(oid)
        dsname = f'{boot_pool}/ROOT/{boot_env["realname"]}'
        ds = await self.middleware.call('zfs.dataset.query',
                                        [('id', '=', dsname)])
        if not ds:
            raise CallError(f'BE {oid!r} does not exist.', errno.ENOENT)
        await self.middleware.call(
            'zfs.dataset.update', dsname, {
                'properties': {
                    f'{self.BE_TOOL}:keep': {
                        'value': str(attrs['keep'])
                    }
                },
            })
        return True

    @accepts(
        Dict(
            'bootenv_create',
            Str('name', required=True, validators=[Match(RE_BE_NAME)]),
            Str('source'),
        ))
    @returns(Str('bootenv_name'))
    async def do_create(self, data):
        """
        Create a new boot environment using `name`.

        If a new boot environment is desired which is a clone of another boot environment, `source` can be passed.
        Then, a new boot environment of `name` is created using boot environment `source` by cloning it.

        Ensure that `name` and `source` are valid boot environment names.
        """
        verrors = ValidationErrors()
        await self._clean_be_name(verrors, 'bootenv_create', data['name'])
        verrors.check()

        args = [self.BE_TOOL, 'create']
        source = data.get('source')
        if source:
            args += [
                '-e',
                os.path.join(await self.middleware.call('boot.pool_name'),
                             'ROOT', source) if osc.IS_LINUX else source
            ]
        args.append(data['name'])
        try:
            await run(args, encoding='utf8', check=True)
        except subprocess.CalledProcessError as cpe:
            raise CallError(f'Failed to create boot environment: {cpe.stdout}')
        return data['name']

    @accepts(Str('id'),
             Dict(
                 'bootenv_update',
                 Str('name', required=True, validators=[Match(RE_BE_NAME)]),
             ))
    @returns(Str('bootenv_name'))
    async def do_update(self, oid, data):
        """
        Update `id` boot environment name with a new provided valid `name`.
        """
        await self._get_instance(oid)

        verrors = ValidationErrors()
        await self._clean_be_name(verrors, 'bootenv_update', data['name'])
        verrors.check()

        try:
            await run(self.BE_TOOL,
                      'rename',
                      oid,
                      data['name'],
                      encoding='utf8',
                      check=True)
        except subprocess.CalledProcessError as cpe:
            raise CallError(f'Failed to update boot environment: {cpe.stdout}')
        return data['name']

    async def _clean_be_name(self, verrors, schema, name):
        beadm_names = (await (await Popen(
            f"{self.BE_TOOL} list -H | awk '{{print ${1 if osc.IS_LINUX else 7}}}'",
            shell=True,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
        )).communicate())[0].decode().split('\n')
        if name in filter(None, beadm_names):
            verrors.add(f'{schema}.name', f'The name "{name}" already exists',
                        errno.EEXIST)

    @accepts(Str('id'))
    @job(lock=lambda args: f'bootenv_delete_{args[0]}')
    async def do_delete(self, job, oid):
        """
        Delete `id` boot environment. This removes the clone from the system.
        """
        be = await self._get_instance(oid)
        try:
            await run(self.BE_TOOL,
                      'destroy',
                      '-F',
                      be['id'],
                      encoding='utf8',
                      check=True)
        except subprocess.CalledProcessError as cpe:
            raise CallError(f'Failed to delete boot environment: {cpe.stdout}')
        return True
Ejemplo n.º 2
0
class IdmapDomainService(CRUDService):
    class Config:
        datastore = 'directoryservice.idmap_domain'
        datastore_prefix = 'idmap_domain_'
        namespace = 'idmap'
        datastore_extend = f'{namespace}.idmap_extend'

    @private
    async def idmap_extend(self, data):
        if data.get('idmap_backend'):
            data['idmap_backend'] = data['idmap_backend'].upper()

        opt_enums = ['ssl', 'linked_service']
        if data.get('options'):
            for i in opt_enums:
                if data['options'].get(i):
                    data['options'][i] = data['options'][i].upper()

        return data

    @private
    async def idmap_compress(self, data):
        opt_enums = ['ssl', 'linked_service']
        if data.get('options'):
            for i in opt_enums:
                if data['options'].get(i):
                    data['options'][i] = data['options'][i].lower()

        data['idmap_backend'] = data['idmap_backend'].lower()

        return data

    @private
    async def get_next_idmap_range(self):
        """
        Increment next high range by 100,000,000 ids. This number has
        to accomodate the highest available rid value for a domain.
        Configured idmap ranges _must_ not overlap.
        """
        domains = await self.query()
        sorted_idmaps = sorted(domains,
                               key=lambda domain: domain['range_high'])
        low_range = sorted_idmaps[-1]['range_high'] + 1
        high_range = sorted_idmaps[-1]['range_high'] + 100000000
        return (low_range, high_range)

    @private
    async def remove_winbind_idmap_tdb(self):
        sysdataset = (await
                      self.middleware.call('systemdataset.config'))['basename']
        ts = str(datetime.datetime.now(datetime.timezone.utc).timestamp())[:10]
        await self.middleware.call('zfs.snapshot.create', {
            'dataset': f'{sysdataset}/samba4',
            'name': f'wbc-{ts}'
        })
        try:
            os.remove('/var/db/system/samba4/winbindd_idmap.tdb')

        except FileNotFoundError:
            self.logger.trace(
                "winbindd_idmap.tdb does not exist. Skipping removal.")

        except Exception:
            self.logger.debug("Failed to remove winbindd_idmap.tdb.",
                              exc_info=True)

    @private
    async def domain_info(self, domain):
        ret = {}

        if domain == 'DS_TYPE_ACTIVEDIRECTORY':
            domain = (await self.middleware.call('smb.config'))['workgroup']

        wbinfo = await run(['wbinfo', '-D', domain], check=False)
        if wbinfo.returncode != 0:
            raise CallError(f'Failed to get domain info for {domain}: '
                            f'{wbinfo.stderr.decode().strip()}')

        for entry in wbinfo.stdout.splitlines():
            kv = entry.decode().split(':')
            ret.update({kv[0].strip(): kv[1].strip()})

        return ret

    @private
    async def get_sssd_low_range(self,
                                 domain,
                                 sssd_config=None,
                                 seed=0xdeadbeef):
        """
        This is best effort attempt for SSSD compatibility. It will allocate low
        range for then initial slice in the SSSD environment. The SSSD allocation algorithm
        is non-deterministic. Domain SID string is converted to a 32-bit hashed value
        using murmurhash3 algorithm.

        The modulus of this value with the total number of available slices is used to
        pick the slice. This slice number is then used to calculate the low range for
        RID 0. With the default settings in SSSD this will be deterministic as long as
        the domain has less than 200,000 RIDs.
        """
        sid = (await self.domain_info(domain))['SID']
        sssd_config = {} if not sssd_config else sssd_config
        range_size = sssd_config.get('range_size', 200000)
        range_low = sssd_config.get('range_low', 10001)
        range_max = sssd_config.get('range_max', 2000200000)
        max_slices = int((range_max - range_low) / range_size)

        data = bytearray(sid.encode())
        datalen = len(data)
        hash = seed
        data_bytes = data

        c1 = 0xcc9e2d51
        c2 = 0x1b873593
        r1 = 15
        r2 = 13
        n = 0xe6546b64

        while datalen >= 4:
            k = int.from_bytes(data_bytes[:4], byteorder='little') & 0xFFFFFFFF
            self.logger.debug('%d', k)
            data_bytes = data_bytes[4:]
            datalen = datalen - 4
            k = (k * c1) & 0xFFFFFFFF
            k = (k << r1 | k >> 32 - r1) & 0xFFFFFFFF
            k = (k * c2) & 0xFFFFFFFF
            hash ^= k
            hash = (hash << r2 | hash >> 32 - r2) & 0xFFFFFFFF
            hash = (hash * 5 + n) & 0xFFFFFFFF

        if datalen > 0:
            k = 0
            if datalen >= 3:
                k = k | int.from_bytes(data_bytes[2], byteorder='little') << 16
            if datalen >= 2:
                k = k | data_bytes[1] << 8
            if datalen >= 1:
                k = k | data_bytes[0]
                k = (k * c1) & 0xFFFFFFFF
                k = (k << r1 | k >> 32 - r1) & 0xFFFFFFFF
                k = (k * c2) & 0xFFFFFFFF
                hash ^= k

        hash = (hash ^ len(data)) & 0xFFFFFFFF
        hash ^= hash >> 16
        hash = (hash * 0x85ebca6b) & 0xFFFFFFFF
        hash ^= hash >> 13
        hash = (hash * 0xc2b2ae35) & 0xFFFFFFFF
        hash ^= hash >> 16

        return (hash % max_slices) * range_size + range_size

    @accepts()
    @job(lock='clear_idmap_cache')
    async def clear_idmap_cache(self, job):
        """
        Stop samba, remove the winbindd_cache.tdb file, start samba, flush samba's cache.
        This should be performed after finalizing idmap changes.
        """
        await self.middleware.call('service.stop', 'cifs')

        try:
            os.remove('/var/db/system/samba4/winbindd_cache.tdb')

        except FileNotFoundError:
            self.logger.debug(
                "Failed to remove winbindd_cache.tdb. File not found.")

        except Exception:
            self.logger.debug("Failed to remove winbindd_cache.tdb.",
                              exc_info=True)

        await self.middleware.call('etc.generate', 'smb')
        await self.middleware.call('service.start', 'cifs')
        gencache_flush = await run(['net', 'cache', 'flush'], check=False)
        if gencache_flush.returncode != 0:
            raise CallError(
                f'Attempt to flush gencache failed with error: {gencache_flush.stderr.decode().strip()}'
            )

    @private
    async def autodiscover_trusted_domains(self):
        smb = await self.middleware.call('smb.config')

        ad_idmap_backend = (await self.query(
            [('name', '=', 'DS_TYPE_ACTIVEDIRECTORY')],
            {'get': True}))['idmap_backend']
        if ad_idmap_backend == IdmapBackend.AUTORID.name:
            self.logger.trace(
                'Skipping auto-generation of trusted domains due to AutoRID being enabled.'
            )
            return

        wbinfo = await run(['wbinfo', '-m', '--verbose'], check=False)
        if wbinfo.returncode != 0:
            raise CallError(
                f'wbinfo -m failed with error: {wbinfo.stderr.decode().strip()}'
            )

        for entry in wbinfo.stdout.decode().splitlines():
            c = entry.split()
            range_low, range_high = await self.get_next_idmap_range()
            if len(c) == 6 and c[0] != smb['workgroup']:
                await self.middleware.call(
                    'idmap.create', {
                        'name': c[0],
                        'dns_domain_name': c[1],
                        'range_low': range_low,
                        'range_high': range_high,
                        'idmap_backend': 'RID'
                    })

    @accepts()
    async def backend_options(self):
        """
        This returns full information about idmap backend options. Not all
        `options` are valid for every backend.
        """
        return {x.name: x.value for x in IdmapBackend}

    @accepts(
        Str('idmap_backend', enum=[x.name for x in IdmapBackend]), )
    async def options_choices(self, backend):
        """
        Returns a list of supported keys for the specified idmap backend.
        """
        return IdmapBackend[backend].supported_keys()

    @accepts()
    async def backend_choices(self):
        """
        Returns array of valid idmap backend choices per directory service.
        """
        return IdmapBackend.ds_choices()

    @private
    async def validate(self, schema_name, data, verrors):
        if data['name'] in [
                DSType.DS_TYPE_LDAP.name, DSType.DS_TYPE_DEFAULT_DOMAIN.name
        ]:
            if data['idmap_backend'] not in (await
                                             self.backend_choices())['LDAP']:
                verrors.add(
                    f'{schema_name}.idmap_backend',
                    f'idmap backend [{data["idmap_backend"]}] is not appropriate. '
                    f'for the system domain type {data["name"]}')

        if data['range_high'] < data['range_low']:
            """
            If we don't exit at this point further range() operations will raise an IndexError.
            """
            verrors.add(
                f'{schema_name}.range_low',
                'Idmap high range must be greater than idmap low range')
            return

        configured_domains = await self.query()
        ldap_enabled = False if await self.middleware.call(
            'ldap.get_state') == 'DISABLED' else True
        ad_enabled = False if await self.middleware.call(
            'activedirectory.get_state') == 'DISABLED' else True
        new_range = range(data['range_low'], data['range_high'])
        idmap_backend = data.get('idmap_backend')
        for i in configured_domains:
            # Do not generate validation error comparing to oneself.
            if i['name'] == data['name']:
                continue

            # Do not generate validation errors for overlapping with a disabled DS.
            if not ldap_enabled and i['name'] == 'DS_TYPE_LDAP':
                continue

            if not ad_enabled and i['name'] == 'DS_TYPE_ACTIVEDIRECTORY':
                continue

            # Idmap settings under Services->SMB are ignored when autorid is enabled.
            if idmap_backend == IdmapBackend.AUTORID.name and i[
                    'name'] == 'DS_TYPE_DEFAULT_DOMAIN':
                continue

            # Overlap between ranges defined for 'ad' backend are permitted.
            if idmap_backend == IdmapBackend.AD.name and i[
                    'idmap_backend'] == IdmapBackend.AD.name:
                continue

            existing_range = range(i['range_low'], i['range_high'])
            if range(max(existing_range[0], new_range[0]),
                     min(existing_range[-1], new_range[-1]) + 1):
                verrors.add(
                    f'{schema_name}.range_low',
                    'new idmap range conflicts with existing range for domain '
                    f'[{i["name"]}].')

    @private
    async def validate_options(self,
                               schema_name,
                               data,
                               verrors,
                               check=['MISSING', 'EXTRA']):
        supported_keys = set(
            IdmapBackend[data['idmap_backend']].supported_keys())
        required_keys = set(
            IdmapBackend[data['idmap_backend']].required_keys())
        provided_keys = set([str(x) for x in data['options'].keys()])

        missing_keys = required_keys - provided_keys
        extra_keys = provided_keys - supported_keys

        if 'MISSING' in check:
            for k in missing_keys:
                verrors.add(
                    f'{schema_name}.options.{k}',
                    f'[{k}] is a required parameter for the [{data["idmap_backend"]}] idmap backend.'
                )

        if 'EXTRA' in check:
            for k in extra_keys:
                verrors.add(
                    f'{schema_name}.options.{k}',
                    f'[{k}] is not a valid parameter for the [{data["idmap_backend"]}] idmap backend.'
                )

    @private
    async def prune_keys(self, data):
        supported_keys = set(
            IdmapBackend[data['idmap_backend']].supported_keys())
        provided_keys = set([str(x) for x in data['options'].keys()])

        for k in (provided_keys - supported_keys):
            data['options'].pop(k)

    @accepts(
        Dict('idmap_domain_create',
             Str('name', required=True),
             Str('dns_domain_name'),
             Int('range_low',
                 required=True,
                 validators=[Range(min=1000, max=2147483647)]),
             Int('range_high',
                 required=True,
                 validators=[Range(min=1000, max=2147483647)]),
             Str('idmap_backend', enum=[x.name for x in IdmapBackend]),
             Int('certificate', null=True),
             Dict(
                 'options',
                 Str('schema_mode', enum=['RFC2307', 'SFU', 'SFU20']),
                 Bool('unix_primary_group'),
                 Bool('unix_nss_info'),
                 Int('rangesize',
                     validators=[Range(min=10000, max=1000000000)]),
                 Bool('readonly'),
                 Bool('ignore_builtin'),
                 Str('ldap_base_dn'),
                 Str('ldap_user_dn'),
                 Str('ldap_user_dn_password', private=True),
                 Str('ldap_url'),
                 Str('ssl', enum=[x.value for x in SSL]),
                 Str('linked_service', enum=['LOCAL_ACCOUNT', 'LDAP', 'NIS']),
                 Str('ldap_server'),
                 Bool('ldap_realm'),
                 Str('bind_path_user'),
                 Str('bind_path_group'),
                 Bool('user_cn'),
                 Str('cn_realm'),
                 Str('ldap_domain'),
                 Str('ldap_url'),
                 Bool('sssd_compat'),
             ),
             register=True))
    async def do_create(self, data):
        """
        Create a new IDMAP domain. These domains must be unique. This table
        will be automatically populated after joining an Active Directory domain
        if "allow trusted domains" is set to True in the AD service configuration.
        There are three default system domains: DS_TYPE_ACTIVEDIRECTORY, DS_TYPE_LDAP, DS_TYPE_DEFAULT_DOMAIN.
        The system domains correspond with the idmap settings under Active Directory, LDAP, and SMB
        respectively.

        `name` the pre-windows 2000 domain name.

        `DNS_domain_name` DNS name of the domain.

        `idmap_backend` provides a plugin interface for Winbind to use varying
        backends to store SID/uid/gid mapping tables. The correct setting
        depends on the environment in which the NAS is deployed.

        `range_low` and `range_high` specify the UID and GID range for which this backend is authoritative.

        `certificate_id` references the certificate ID of the SSL certificate to use for certificate-based
        authentication to a remote LDAP server. This parameter is not supported for all idmap backends as some
        backends will generate SID to ID mappings algorithmically without causing network traffic.

        `options` are additional parameters that are backend-dependent:

        `AD` idmap backend options:
        `unix_primary_group` If True, the primary group membership is fetched from the LDAP attributes (gidNumber).
        If False, the primary group membership is calculated via the "primaryGroupID" LDAP attribute.

        `unix_nss_info` if True winbind will retrieve the login shell and home directory from the LDAP attributes.
        If False or if the AD LDAP entry lacks the SFU attributes the smb4.conf parameters `template shell` and `template homedir` are used.

        `schema_mode` Defines the schema that idmap_ad should use when querying Active Directory regarding user and group information.
        This can be either the RFC2307 schema support included in Windows 2003 R2 or the Service for Unix (SFU) schema.
        For SFU 3.0 or 3.5 please choose "SFU", for SFU 2.0 please choose "SFU20". The behavior of primary group membership is
        controlled by the unix_primary_group option.

        `AUTORID` idmap backend options:
        `readonly` sets the module to read-only mode. No new ranges will be allocated and new mappings
        will not be created in the idmap pool.

        `ignore_builtin` ignores mapping requests for the BUILTIN domain.

        `LDAP` idmap backend options:
        `ldap_base_dn` defines the directory base suffix to use for SID/uid/gid mapping entries.

        `ldap_user_dn` defines the user DN to be used for authentication.

        `ldap_url` specifies the LDAP server to use for SID/uid/gid map entries.

        `ssl` specifies whether to encrypt the LDAP transport for the idmap backend.

        `NSS` idmap backend options:
        `linked_service` specifies the auxiliary directory service ID provider.

        `RFC2307` idmap backend options:
        `domain` specifies the domain for which the idmap backend is being created. Numeric id, short-form
        domain name, or long-form DNS domain name of the domain may be specified. Entry must be entered as
        it appears in `idmap.domain`.

        `range_low` and `range_high` specify the UID and GID range for which this backend is authoritative.

        `ldap_server` defines the type of LDAP server to use. This can either be an LDAP server provided
        by the Active Directory Domain (ad) or a stand-alone LDAP server.

        `bind_path_user` specfies the search base where user objects can be found in the LDAP server.

        `bind_path_group` specifies the search base where group objects can be found in the LDAP server.

        `user_cn` query cn attribute instead of uid attribute for the user name in LDAP.

        `realm` append @realm to cn for groups (and users if user_cn is set) in LDAP queries.

        `ldmap_domain` when using the LDAP server in the Active Directory server, this allows one to
        specify the domain where to access the Active Directory server. This allows using trust relationships
        while keeping all RFC 2307 records in one place. This parameter is optional, the default is to access
        the AD server in the current domain to query LDAP records.

        `ldap_url` when using a stand-alone LDAP server, this parameter specifies the LDAP URL for accessing the LDAP server.

        `ldap_user_dn` defines the user DN to be used for authentication.

        `ldap_user_dn_password` is the password to be used for LDAP authentication.

        `realm` defines the realm to use in the user and group names. This is only required when using cn_realm together with
         a stand-alone ldap server.

        `RID` backend options:
        `sssd_compat` generate idmap low range based on same algorithm that SSSD uses by default.
        """
        verrors = ValidationErrors()
        if data['name'] in [x['name'] for x in await self.query()]:
            verrors.add('idmap_domain_create.name',
                        'Domain names must be unique.')

        if data['options'].get('sssd_compat'):
            if await self.middleware.call('activedirectory.get_state'
                                          ) != 'HEALTHY':
                verrors.add(
                    'idmap_domain_create.options',
                    'AD service must be enabled and started to '
                    'generate an SSSD-compatible id range')
                verrors.check()

            data['range_low'] = await self.get_sssd_low_range(data['name'])
            data['range_high'] = data['range_low'] + 100000000

        await self.validate('idmap_domain_create', data, verrors)
        await self.validate_options('idmap_domain_create', data, verrors)
        if data.get('certificate_id') and not data['options'].get('ssl'):
            verrors.add(
                'idmap_domain_create.certificate_id',
                f'The {data["idmap_backend"]} idmap backend does not '
                'generate LDAP traffic. Certificates do not apply.')
        verrors.check()

        if data['options'].get('ldap_user_dn_password'):
            try:
                DSType[data["name"]]
                domain = (await
                          self.middleware.call("smb.config"))['workgroup']
            except KeyError:
                domain = data["name"]

            secret = data['options'].pop('ldap_user_dn_password')

            await self.middleware.call("directoryservices.set_ldap_secret",
                                       domain, secret)
            await self.middleware.call("directoryservices.backup_secrets")

        final_options = IdmapBackend[data['idmap_backend']].defaults()
        final_options.update(data['options'])
        data['options'] = final_options
        data["id"] = await self.middleware.call(
            "datastore.insert",
            self._config.datastore,
            data,
            {"prefix": self._config.datastore_prefix},
        )
        return await self._get_instance(data['id'])

    @accepts(Int('id', required=True),
             Patch("idmap_domain_create", "idmap_domain_update",
                   ("attr", {
                       "update": True
                   })))
    async def do_update(self, id, data):
        """
        Update a domain by id.
        """
        old = await self._get_instance(id)
        new = old.copy()
        if data.get('idmap_backend'
                    ) and data['idmap_backend'] != old['idmap_backend']:
            """
            Remove options from previous backend because they are almost certainly
            not valid for the new backend.
            """
            new['options'] = {}

        new.update(data)
        tmp = data.copy()
        verrors = ValidationErrors()
        if old['name'] in [x.name
                           for x in DSType] and old['name'] != new['name']:
            verrors.add(
                'idmap_domain_update.name',
                f'Changing name of default domain {old["name"]} is not permitted'
            )

        if new['options'].get(
                'sssd_compat') and not old['options'].get('sssd_compat'):
            if await self.middleware.call('activedirectory.get_state'
                                          ) != 'HEALTHY':
                verrors.add(
                    'idmap_domain_update.options',
                    'AD service must be enabled and started to '
                    'generate an SSSD-compatible id range')
                verrors.check()

            new['range_low'] = await self.get_sssd_low_range(new['name'])
            new['range_high'] = new['range_low'] + 100000000

        if new['idmap_backend'] == 'AUTORID' and new[
                'name'] != 'DS_TYPE_ACTIVEDIRECTORY':
            verrors.add(
                "idmap_domain_update.idmap_backend",
                "AUTORID is only permitted for the default idmap backend for "
                "the active directory directory service (DS_TYPE_ACTIVEDIRECTORY)."
            )

        await self.validate('idmap_domain_update', new, verrors)
        await self.validate_options('idmap_domain_update', new, verrors,
                                    ['MISSING'])
        tmp['idmap_backend'] = new['idmap_backend']
        if data.get('options'):
            await self.validate_options('idmap_domain_update', tmp, verrors,
                                        ['EXTRA'])

        if data.get('certificate_id') and not data['options'].get('ssl'):
            verrors.add(
                'idmap_domain_update.certificate_id',
                f'The {new["idmap_backend"]} idmap backend does not '
                'generate LDAP traffic. Certificates do not apply.')
        verrors.check()
        await self.prune_keys(new)
        final_options = IdmapBackend[new['idmap_backend']].defaults()
        final_options.update(new['options'])
        new['options'] = final_options

        if new['options'].get('ldap_user_dn_password'):
            try:
                DSType[new["name"]]
                domain = (await
                          self.middleware.call("smb.config"))['workgroup']
            except KeyError:
                domain = new["name"]

            secret = new['options'].pop('ldap_user_dn_password')
            await self.middleware.call("directoryservices.set_ldap_secret",
                                       domain, secret)
            await self.middleware.call("directoryservices.backup_secrets")

        await self.idmap_compress(new)
        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})
        cache_job = await self.middleware.call('idmap.clear_idmap_cache')
        await cache_job.wait()
        return await self._get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete a domain by id. Deletion of default system domains is not permitted.
        """
        if id <= 5:
            entry = await self._get_instance(id)
            raise CallError(
                f'Deleting system idmap domain [{entry["name"]}] is not permitted.',
                errno.EPERM)
        await self.middleware.call("datastore.delete", self._config.datastore,
                                   id)

    @private
    async def name_to_sid(self, name):
        wb = await run([SMBCmd.WBINFO.value, '--name-to-sid', name],
                       check=False)
        if wb.returncode != 0:
            self.logger.debug("wbinfo failed with error: %s",
                              wb.stderr.decode().strip())

        return wb.stdout.decode().strip()

    @private
    async def sid_to_name(self, sid):
        """
        Last two characters of name string encode the account type.
        """
        wb = await run([SMBCmd.WBINFO.value, '--sid-to-name', sid],
                       check=False)
        if wb.returncode != 0:
            self.logger.debug("wbinfo failed with error: %s",
                              wb.stderr.decode().strip())

        return wb.stdout.decode().strip()[:-2]

    @private
    async def sid_to_unixid(self, sid_str):
        rv = None
        gid = None
        uid = None
        wb = await run([SMBCmd.WBINFO.value, '--sid-to-gid', sid_str],
                       check=False)
        if wb.returncode == 0:
            gid = int(wb.stdout.decode().strip())

        wb = await run([SMBCmd.WBINFO.value, '--sid-to-uid', sid_str],
                       check=False)
        if wb.returncode == 0:
            uid = int(wb.stdout.decode().strip())

        if gid and (gid == uid):
            rv = {"id_type": "BOTH", "id": gid}
        elif gid:
            rv = {"id_type": "GROUP", "id": gid}
        elif uid:
            rv = {"id_type": "USER", "id": uid}

        return rv

    @private
    async def unixid_to_sid(self, data):
        """
        Samba generates SIDs for local accounts that lack explicit mapping in
        passdb.tdb or group_mapping.tdb with a prefix of S-1-22-1 (users) and
        S-1-22-2 (groups). This is not returned by wbinfo, but for consistency
        with what appears when viewed over SMB protocol we'll do the same here.
        """
        unixid = data.get("id")
        id = IDType[data.get("id_type", "GROUP")]

        if id == IDType.USER:
            wb = await run([SMBCmd.WBINFO.value, '--uid-to-sid',
                            str(unixid)],
                           check=False)
        else:
            wb = await run([SMBCmd.WBINFO.value, '--gid-to-sid',
                            str(unixid)],
                           check=False)

        if wb.returncode != 0:
            self.logger.warning("Could not convert [%d] to SID: %s", unixid,
                                wb.stderr.decode().strip())
            if WBCErr.DOMAIN_NOT_FOUND.err() in wb.stderr.decode():
                is_local = await self.middleware.call(
                    f'{"user" if id == IDType.USER else "group"}.query',
                    [("uid" if id == IDType.USER else "gid", '=', unixid)],
                    {"count": True})
                if is_local:
                    return f'S-1-22-{1 if id == IDType.USER else 2}-{unixid}'

            return None

        return wb.stdout.decode().strip()
Ejemplo n.º 3
0
class UPSService(SystemServiceService):
    DRIVERS_AVAILABLE = set(os.listdir(DRIVER_BIN_DIR))

    class Config:
        datastore = 'services.ups'
        datastore_prefix = 'ups_'
        datastore_extend = 'ups.ups_config_extend'
        service = 'ups'
        service_verb = 'restart'

    @private
    async def ups_config_extend(self, data):
        data['mode'] = data['mode'].upper()
        data['shutdown'] = data['shutdown'].upper()
        data['toemail'] = [v for v in data['toemail'].split(';') if v]
        return data

    @accepts()
    async def port_choices(self):
        ports = [x for x in glob.glob('/dev/cua*') if x.find('.') == -1]
        ports.extend(glob.glob('/dev/ugen*'))
        ports.extend(glob.glob('/dev/uhid*'))
        return ports

    @accepts()
    def driver_choices(self):
        """
        Returns choices of UPS drivers supported by the system.
        """
        ups_choices = {}
        if os.path.exists("/conf/base/etc/local/nut/driver.list"):
            with open('/conf/base/etc/local/nut/driver.list', 'rb') as f:
                d = f.read().decode('utf8', 'ignore')
            r = io.StringIO()
            for line in re.sub(r'[ \t]+', ' ', d, flags=re.M).split('\n'):
                r.write(line.strip() + '\n')
            r.seek(0)
            reader = csv.reader(r, delimiter=' ', quotechar='"')
            for row in reader:
                if len(row) == 0 or row[0].startswith('#'):
                    continue
                if row[-2] == '#':
                    last = -3
                else:
                    last = -1
                driver_str = row[last]
                driver_annotation = ''
                m = re.match(r'(.+) \((.+)\)',
                             driver_str)  # "blazer_usb (USB ID 0665:5161)"
                if m:
                    driver_str, driver_annotation = m.group(1), m.group(2)
                for driver in driver_str.split(
                        ' or '):  # can be "blazer_ser or blazer_usb"
                    driver = driver.strip()
                    if driver not in self.DRIVERS_AVAILABLE:
                        continue
                    for i, field in enumerate(list(row)):
                        row[i] = field
                    ups_choices['$'.join(
                        [driver, row[3]])] = '%s (%s)' % (' '.join(
                            filter(None, row[0:last])), ', '.join(
                                filter(None, [driver, driver_annotation])))
        return ups_choices

    @private
    async def validate_data(self, data, schema):
        verrors = ValidationErrors()

        driver = data.get('driver')
        if driver:
            if driver not in (
                    await self.middleware.call('ups.driver_choices')).keys():
                verrors.add(
                    f'{schema}.driver',
                    'Driver selected does not match local machine\'s driver list'
                )

        identifier = data['identifier']
        if identifier:
            if not re.search(r'^[a-z0-9\.\-_]+$', identifier, re.I):
                verrors.add(f'{schema}.identifier',
                            'Use alphanumeric characters, ".", "-" and "_"')

        for field in [
                field for field in ['monpwd', 'monuser'] if data.get(field)
        ]:
            if re.search(r'[ #]', data[field], re.I):
                verrors.add(f'{schema}.{field}',
                            'Spaces or number signs are not allowed')

        mode = data.get('mode')
        if mode == 'MASTER':
            for field in filter(lambda f: not data[f], ['port', 'driver']):
                verrors.add(f'{schema}.{field}', 'This field is required')
        else:
            if not data.get('remotehost'):
                verrors.add(f'{schema}.remotehost', 'This field is required')

        to_emails = data.get('toemail')
        if to_emails:
            data['toemail'] = ';'.join(to_emails)
        else:
            data['toemail'] = ''

        data['mode'] = data['mode'].lower()
        data['shutdown'] = data['shutdown'].lower()

        return verrors, data

    @accepts(
        Dict('ups_update',
             Bool('emailnotify'),
             Bool('powerdown'),
             Bool('rmonitor'),
             Int('nocommwarntime', null=True),
             Int('remoteport'),
             Int('shutdowntimer'),
             Int('hostsync', validators=[Range(min=0)]),
             Str('description'),
             Str('driver'),
             Str('extrausers', max_length=None),
             Str('identifier', empty=False),
             Str('mode', enum=['MASTER', 'SLAVE']),
             Str('monpwd', empty=False),
             Str('monuser', empty=False),
             Str('options', max_length=None),
             Str('optionsupsd', max_length=None),
             Str('port'),
             Str('remotehost'),
             Str('shutdown', enum=['LOWBATT', 'BATT']),
             Str('shutdowncmd', empty=False),
             Str('subject'),
             List('toemail', items=[Str('email', validators=[Email()])]),
             update=True))
    async def do_update(self, data):
        """
        Update UPS Service Configuration.

        `emailnotify` when enabled, sends out notifications of different UPS events via email.

        `powerdown` when enabled, sets UPS to power off after shutting down the system.

        `nocommwarntime` is a value in seconds which makes UPS Service wait the specified seconds before alerting that
        the Service cannot reach configured UPS.

        `shutdowntimer` is a value in seconds which tells the Service to wait specified seconds for the UPS before
        initiating a shutdown. This only applies when `shutdown` is set to "BATT".

        `shutdowncmd` is the command which is executed to initiate a shutdown. It defaults to "poweroff".

        `toemail` is a list of valid email id's on which notification emails are sent.
        """
        config = await self.config()
        old_config = config.copy()
        config.update(data)
        verros, config = await self.validate_data(config, 'ups_update')
        if verros:
            raise verros

        old_config['mode'] = old_config['mode'].lower()
        old_config['shutdown'] = old_config['shutdown'].lower()
        old_config['toemail'] = ';'.join(
            old_config['toemail']) if old_config['toemail'] else ''

        if len(set(old_config.items()) ^ set(config.items())) > 0:
            if config['identifier'] != old_config['identifier']:
                await self.dismiss_alerts()

            await self._update_service(old_config, config)

        return await self.config()

    @private
    async def alerts_mapping(self):
        return {
            'LOWBATT': 'UPSBatteryLow',
            'COMMBAD': 'UPSCommbad',
            'COMMOK': 'UPSCommok',
            'ONBATT': 'UPSOnBattery',
            'ONLINE': 'UPSOnline',
            'REPLBATT': 'UPSReplbatt'
        }

    @private
    async def dismiss_alerts(self):
        config = await self.config()

        for alert in (await self.alerts_mapping()).values():
            await self.middleware.call('alert.oneshot_delete', alert,
                                       {'ups': config['identifier']})

    @private
    @accepts(Str('notify_type'))
    async def upssched_event(self, notify_type):
        config = await self.config()

        if config['mode'] == 'MASTER':
            upsc_identifier = f'{config["identifier"]}@localhost:{config["remoteport"]}'
        else:
            upsc_identifier = f'{config["identifier"]}@{config["remotehost"]}:{config["remoteport"]}'

        if notify_type.lower() == 'shutdown':
            # Before we start FSD with upsmon, lets ensure that ups is not ONLINE (OL).
            # There are cases where battery/charger issues can result in ups.status being "OL LB" at the
            # same time. This will ensure that we don't initiate a shutdown if ups is OL.
            stats_output = (await run('/usr/local/bin/upsc',
                                      upsc_identifier,
                                      check=False)).stdout

            ups_status = re.findall(
                fr'ups.status: (.*)',
                '' if not stats_output else stats_output.decode())
            if ups_status and 'ol' in ups_status[0].lower():
                self.middleware.logger.debug(
                    f'Shutdown not initiated as ups.status ({ups_status[0]}) indicates '
                    f'{config["identifier"]} is ONLINE (OL).')
            else:
                syslog.syslog(syslog.LOG_NOTICE,
                              'upssched-cmd "issuing shutdown"')
                await run('/usr/local/sbin/upsmon', '-c', 'fsd', check=False)
        elif 'notify' in notify_type.lower():
            # notify_type is expected to be of the following format
            # NOTIFY-EVENT i.e NOTIFY-LOWBATT
            notify_type = notify_type.split('-')[-1]

            # We would like to send alerts for the following events
            alert_mapping = await self.alerts_mapping()

            await self.dismiss_alerts()

            if notify_type in alert_mapping:
                await self.middleware.call('alert.oneshot_create',
                                           alert_mapping[notify_type],
                                           {'ups': config['identifier']})

            if config['emailnotify']:
                # Email user with the notification event and details
                # We send the email in the following format ( inclusive line breaks )

                # NOTIFICATION: 'LOWBATT'
                # UPS: 'ups'
                #
                # Statistics recovered:
                #
                # 1) Battery charge (percent)
                # battery.charge: 5
                #
                # 2) Remaining battery level when UPS switches to LB (percent)
                # battery.charge.low: 10
                #
                # 3) Battery runtime (seconds)
                # battery.runtime: 1860
                #
                # 4) Remaining battery runtime when UPS switches to LB (seconds)
                # battery.runtime.low: 900

                ups_name = config['identifier']
                hostname = (await
                            self.middleware.call('system.info'))['hostname']
                current_time = datetime.datetime.now(tz=dateutil.tz.tzlocal(
                )).strftime('%a %b %d %H:%M:%S %Z %Y')
                ups_subject = config['subject'].replace('%d',
                                                        current_time).replace(
                                                            '%h', hostname)
                body = f'NOTIFICATION: {notify_type!r}<br>UPS: {ups_name!r}<br><br>'

                # Let's gather following stats
                data_points = {
                    'battery.charge':
                    'Battery charge (percent)',
                    'battery.charge.low':
                    'Battery level remaining (percent) when UPS switches to Low Battery (LB)',
                    'battery.charge.status':
                    'Battery charge status',
                    'battery.runtime':
                    'Battery runtime (seconds)',
                    'battery.runtime.low':
                    'Battery runtime remaining (seconds) when UPS switches to Low Battery (LB)',
                    'battery.runtime.restart':
                    'Minimum battery runtime (seconds) to allow UPS restart after power-off',
                }

                stats_output = (await run('/usr/local/bin/upsc',
                                          upsc_identifier,
                                          check=False)).stdout
                recovered_stats = re.findall(
                    fr'({"|".join(data_points)}): (.*)',
                    '' if not stats_output else stats_output.decode())

                if recovered_stats:
                    body += 'Statistics recovered:<br><br>'
                    # recovered_stats is expected to be a list in this format
                    # [('battery.charge', '5'), ('battery.charge.low', '10'), ('battery.runtime', '1860')]
                    for index, stat in enumerate(recovered_stats):
                        body += f'{index + 1}) {data_points[stat[0]]}<br>  {stat[0]}: {stat[1]}<br><br>'

                else:
                    body += 'Statistics could not be recovered<br>'

                # Subject and body defined, send email
                job = await self.middleware.call('mail.send', {
                    'subject': ups_subject,
                    'text': body,
                    'to': config['toemail']
                })

                await job.wait()
                if job.error:
                    self.middleware.logger.debug(
                        f'Failed to send UPS status email: {job.error}')

        else:
            self.middleware.logger.debug(
                f'Unrecognized UPS notification event: {notify_type}')
Ejemplo n.º 4
0
class ServiceService(CRUDService):

    SERVICE_DEFS = {
        's3': ('minio', '/var/run/minio.pid'),
        'ssh': ('sshd', '/var/run/sshd.pid'),
        'rsync': ('rsync', '/var/run/rsyncd.pid'),
        'nfs': ('nfsd', None),
        'afp': ('netatalk', None),
        'cifs': ('smbd', '/var/run/samba4/smbd.pid'),
        'dynamicdns': ('inadyn-mt', None),
        'snmp': ('snmpd', '/var/run/net_snmpd.pid'),
        'ftp': ('proftpd', '/var/run/proftpd.pid'),
        'tftp': ('inetd', '/var/run/inetd.pid'),
        'iscsitarget': ('ctld', '/var/run/ctld.pid'),
        'lldp': ('ladvd', '/var/run/ladvd.pid'),
        'ups': ('upsd', '/var/db/nut/upsd.pid'),
        'upsmon': ('upsmon', '/var/db/nut/upsmon.pid'),
        'smartd': ('smartd', '/var/run/smartd.pid'),
        'webshell': (None, '/var/run/webshell.pid'),
        'webdav': ('httpd', '/var/run/httpd.pid'),
        'netdata': ('netdata', '/var/db/netdata/netdata.pid')
    }

    @filterable
    async def query(self, filters=None, options=None):
        if options is None:
            options = {}
        options['prefix'] = 'srv_'

        services = await self.middleware.call('datastore.query', 'services.services', filters, options)

        # In case a single service has been requested
        if not isinstance(services, list):
            services = [services]

        jobs = {
            asyncio.ensure_future(self._get_status(entry)): entry
            for entry in services
        }
        await asyncio.wait(list(jobs.keys()), timeout=15)

        def result(task):
            """
            Method to handle results of the greenlets.
            In case a greenlet has timed out, provide UNKNOWN state
            """
            try:
                result = task.result()
            except Exception:
                result = None
                self.logger.warn('Failed to get status', exc_info=True)
            if result is None:
                entry = jobs.get(task)
                entry['state'] = 'UNKNOWN'
                entry['pids'] = []
                return entry
            else:
                return result

        services = list(map(result, jobs))
        return filter_list(services, filters, options)

    @accepts(
        Int('id'),
        Dict(
            'service-update',
            Bool('enable'),
        ),
    )
    async def do_update(self, id, data):
        """
        Update service entry of `id`.

        Currently it only accepts `enable` option which means whether the
        service should start on boot.

        """
        return await self.middleware.call('datastore.update', 'services.services', id, {'srv_enable': data['enable']})

    @accepts(
        Str('service'),
        Dict(
            'service-control',
            Bool('onetime'),
        ),
    )
    async def start(self, service, options=None):
        """ Start the service specified by `service`.

        The helper will use method self._start_[service]() to start the service.
        If the method does not exist, it would fallback using service(8)."""
        if options is None:
            options = {
                'onetime': True,
            }
        await self.middleware.call_hook('service.pre_start', service)
        sn = self._started_notify("start", service)
        await self._simplecmd("start", service, options)
        return await self.started(service, sn)

    async def started(self, service, sn=None):
        """
        Test if service specified by `service` has been started.
        """
        if sn:
            await self.middleware.threaded(sn.join)

        try:
            svc = await self.query([('service', '=', service)], {'get': True})
            self.middleware.send_event('service.query', 'CHANGED', fields=svc)
            return svc['state'] == 'RUNNING'
        except IndexError:
            f = getattr(self, '_started_' + service, None)
            if callable(f):
                if inspect.iscoroutinefunction(f):
                    return (await f())[0]
                else:
                    return f()[0]
            else:
                return (await self._started(service))[0]

    @accepts(
        Str('service'),
        Dict(
            'service-control',
            Bool('onetime'),
        ),
    )
    async def stop(self, service, options=None):
        """ Stop the service specified by `service`.

        The helper will use method self._stop_[service]() to stop the service.
        If the method does not exist, it would fallback using service(8)."""
        if options is None:
            options = {
                'onetime': True,
            }
        await self.middleware.call_hook('service.pre_stop', service)
        sn = self._started_notify("stop", service)
        await self._simplecmd("stop", service, options)
        return await self.started(service, sn)

    @accepts(
        Str('service'),
        Dict(
            'service-control',
            Bool('onetime'),
        ),
    )
    async def restart(self, service, options=None):
        """
        Restart the service specified by `service`.

        The helper will use method self._restart_[service]() to restart the service.
        If the method does not exist, it would fallback using service(8)."""
        if options is None:
            options = {
                'onetime': True,
            }
        await self.middleware.call_hook('service.pre_restart', service)
        sn = self._started_notify("restart", service)
        await self._simplecmd("restart", service, options)
        return await self.started(service, sn)

    @accepts(
        Str('service'),
        Dict(
            'service-control',
            Bool('onetime'),
        ),
    )
    async def reload(self, service, options=None):
        """
        Reload the service specified by `service`.

        The helper will use method self._reload_[service]() to reload the service.
        If the method does not exist, the helper will try self.restart of the
        service instead."""
        if options is None:
            options = {
                'onetime': True,
            }
        await self.middleware.call_hook('service.pre_reload', service)
        try:
            await self._simplecmd("reload", service, options)
        except:
            await self.restart(service, options)
        return await self.started(service)

    async def _get_status(self, service):
        f = getattr(self, '_started_' + service['service'], None)
        if callable(f):
            if inspect.iscoroutinefunction(f):
                running, pids = await f()
            else:
                running, pids = f()
        else:
            running, pids = await self._started(service['service'])

        if running:
            state = 'RUNNING'
        else:
            if service['enable']:
                state = 'CRASHED'
            else:
                state = 'STOPPED'

        service['state'] = state
        service['pids'] = pids
        return service

    async def _simplecmd(self, action, what, options=None):
        self.logger.debug("Calling: %s(%s) ", action, what)
        f = getattr(self, '_' + action + '_' + what, None)
        if f is None:
            # Provide generic start/stop/restart verbs for rc.d scripts
            if what in self.SERVICE_DEFS:
                procname, pidfile = self.SERVICE_DEFS[what]
                if procname:
                    what = procname
            if action in ("start", "stop", "restart", "reload"):
                if action == 'restart':
                    await self._system("/usr/sbin/service " + what + " forcestop ")
                await self._service(what, action, **options)
            else:
                raise ValueError("Internal error: Unknown command")
        else:
            call = f(**(options or {}))
            if inspect.iscoroutinefunction(f):
                await call

    async def _system(self, cmd, options=None):
        stdout = DEVNULL
        if options and 'stdout' in options:
            stdout = options['stdout']
        stderr = DEVNULL
        if options and 'stderr' in options:
            stderr = options['stderr']

        proc = await Popen(cmd, stdout=stdout, stderr=stderr, shell=True, close_fds=True)
        await proc.communicate()
        return proc.returncode

    async def _service(self, service, verb, **options):
        onetime = options.get('onetime')
        force = options.get('force')
        quiet = options.get('quiet')

        # force comes before one which comes before quiet
        # they are mutually exclusive
        preverb = ''
        if force:
            preverb = 'force'
        elif onetime:
            preverb = 'one'
        elif quiet:
            preverb = 'quiet'

        return await self._system('/usr/sbin/service {} {}{}'.format(
            service,
            preverb,
            verb,
        ), options)

    def _started_notify(self, verb, what):
        """
        The check for started [or not] processes is currently done in 2 steps
        This is the first step which involves a thread StartNotify that watch for event
        before actually start/stop rc.d scripts

        Returns:
            StartNotify object if the service is known or None otherwise
        """

        if what in self.SERVICE_DEFS:
            procname, pidfile = self.SERVICE_DEFS[what]
            sn = StartNotify(verb=verb, pidfile=pidfile)
            sn.start()
            return sn
        else:
            return None

    async def _started(self, what, notify=None):
        """
        This is the second step::
        Wait for the StartNotify thread to finish and then check for the
        status of pidfile/procname using pgrep

        Returns:
            True whether the service is alive, False otherwise
        """

        if what in self.SERVICE_DEFS:
            procname, pidfile = self.SERVICE_DEFS[what]
            if notify:
                await self.middleware.threaded(notify.join)

            if pidfile:
                pgrep = "/bin/pgrep -F {}{}".format(
                    pidfile,
                    ' ' + procname if procname else '',
                )
            else:
                pgrep = "/bin/pgrep {}".format(procname)
            proc = await Popen(pgrep, shell=True, stdout=PIPE, stderr=PIPE, close_fds=True)
            data = (await proc.communicate())[0].decode()

            if proc.returncode == 0:
                return True, [
                    int(i)
                    for i in data.strip().split('\n') if i.isdigit()
                ]
        return False, []

    async def _start_webdav(self, **kwargs):
        await self._service("ix-apache", "start", force=True, **kwargs)
        await self._service("apache24", "start", **kwargs)

    async def _stop_webdav(self, **kwargs):
        await self._service("apache24", "stop", **kwargs)

    async def _restart_webdav(self, **kwargs):
        await self._service("apache24", "stop", force=True, **kwargs)
        await self._service("ix-apache", "start", force=True, **kwargs)
        await self._service("apache24", "restart", **kwargs)

    async def _reload_webdav(self, **kwargs):
        await self._service("ix-apache", "start", force=True, **kwargs)
        await self._service("apache24", "reload", **kwargs)

    async def _restart_django(self, **kwargs):
        await self._service("django", "restart", **kwargs)

    async def _start_webshell(self, **kwargs):
        await self._system("/usr/local/bin/python /usr/local/www/freenasUI/tools/webshell.py")

    async def _restart_webshell(self, **kwargs):
        try:
            with open('/var/run/webshell.pid', 'r') as f:
                pid = f.read()
                os.kill(int(pid), signal.SIGTERM)
                time.sleep(0.2)
                os.kill(int(pid), signal.SIGKILL)
        except:
            pass
        await self._system("ulimit -n 1024 && /usr/local/bin/python /usr/local/www/freenasUI/tools/webshell.py")

    async def _restart_iscsitarget(self, **kwargs):
        await self._service("ix-ctld", "start", force=True, **kwargs)
        await self._service("ctld", "stop", force=True, **kwargs)
        await self._service("ix-ctld", "start", quiet=True, **kwargs)
        await self._service("ctld", "restart", **kwargs)

    async def _start_iscsitarget(self, **kwargs):
        await self._service("ix-ctld", "start", quiet=True, **kwargs)
        await self._service("ctld", "start", **kwargs)

    async def _stop_iscsitarget(self, **kwargs):
        await self._service("ix-ctld", "stop", force=True, **kwargs)
        await self._service("ctld", "stop", force=True, **kwargs)

    async def _reload_iscsitarget(self, **kwargs):
        await self._service("ix-ctld", "start", quiet=True, **kwargs)
        await self._service("ctld", "reload", **kwargs)

    async def _start_collectd(self, **kwargs):
        await self._service("ix-collectd", "start", quiet=True, **kwargs)
        await self._service("collectd", "restart", **kwargs)

    async def _restart_collectd(self, **kwargs):
        await self._service("collectd", "stop", **kwargs)
        await self._service("ix-collectd", "start", quiet=True, **kwargs)
        await self._service("collectd", "start", **kwargs)

    async def _start_sysctl(self, **kwargs):
        await self._service("sysctl", "start", **kwargs)
        await self._service("ix-sysctl", "start", quiet=True, **kwargs)

    async def _reload_sysctl(self, **kwargs):
        await self._service("sysctl", "start", **kwargs)
        await self._service("ix-sysctl", "reload", **kwargs)

    async def _start_network(self, **kwargs):
        await self.middleware.call('interfaces.sync')
        await self.middleware.call('routes.sync')

    async def _stop_jails(self, **kwargs):
        for jail in await self.middleware.call('datastore.query', 'jails.jails'):
            await self.middleware.call('notifier.warden', 'stop', [], {'jail': jail['jail_host']})

    async def _start_jails(self, **kwargs):
        await self._service("ix-warden", "start", **kwargs)
        for jail in await self.middleware.call('datastore.query', 'jails.jails'):
            if jail['jail_autostart']:
                await self.middleware.call('notifier.warden', 'start', [], {'jail': jail['jail_host']})
        await self._service("ix-plugins", "start", **kwargs)
        await self.reload("http", kwargs)

    async def _restart_jails(self, **kwargs):
        await self._stop_jails()
        await self._start_jails()

    async def _stop_pbid(self, **kwargs):
        await self._service("pbid", "stop", **kwargs)

    async def _start_pbid(self, **kwargs):
        await self._service("pbid", "start", **kwargs)

    async def _restart_pbid(self, **kwargs):
        await self._service("pbid", "restart", **kwargs)

    async def _reload_named(self, **kwargs):
        await self._service("named", "reload", **kwargs)

    async def _reload_hostname(self, **kwargs):
        await self._system('/bin/hostname ""')
        await self._service("ix-hostname", "start", quiet=True, **kwargs)
        await self._service("hostname", "start", quiet=True, **kwargs)
        await self._service("mdnsd", "restart", quiet=True, **kwargs)
        await self._service("collectd", "stop", **kwargs)
        await self._service("ix-collectd", "start", quiet=True, **kwargs)
        await self._service("collectd", "start", **kwargs)

    async def _reload_resolvconf(self, **kwargs):
        await self._reload_hostname()
        await self._service("ix-resolv", "start", quiet=True, **kwargs)

    async def _reload_networkgeneral(self, **kwargs):
        await self._reload_resolvconf()
        await self._service("routing", "restart", **kwargs)

    async def _reload_timeservices(self, **kwargs):
        await self._service("ix-localtime", "start", quiet=True, **kwargs)
        await self._service("ix-ntpd", "start", quiet=True, **kwargs)
        await self._service("ntpd", "restart", **kwargs)
        os.environ['TZ'] = await self.middleware.call('datastore.query', 'system.settings', [], {'order_by': ['-id'], 'get': True})['stg_timezone']
        time.tzset()

    async def _restart_smartd(self, **kwargs):
        await self._service("ix-smartd", "start", quiet=True, **kwargs)
        await self._service("smartd", "stop", force=True, **kwargs)
        await self._service("smartd", "restart", **kwargs)

    async def _reload_ssh(self, **kwargs):
        await self._service("ix-sshd", "start", quiet=True, **kwargs)
        await self._service("ix_register", "reload", **kwargs)
        await self._service("openssh", "reload", **kwargs)
        await self._service("ix_sshd_save_keys", "start", quiet=True, **kwargs)

    async def _start_ssh(self, **kwargs):
        await self._service("ix-sshd", "start", quiet=True, **kwargs)
        await self._service("ix_register", "reload", **kwargs)
        await self._service("openssh", "start", **kwargs)
        await self._service("ix_sshd_save_keys", "start", quiet=True, **kwargs)

    async def _stop_ssh(self, **kwargs):
        await self._service("openssh", "stop", force=True, **kwargs)
        await self._service("ix_register", "reload", **kwargs)

    async def _restart_ssh(self, **kwargs):
        await self._service("ix-sshd", "start", quiet=True, **kwargs)
        await self._service("openssh", "stop", force=True, **kwargs)
        await self._service("ix_register", "reload", **kwargs)
        await self._service("openssh", "restart", **kwargs)
        await self._service("ix_sshd_save_keys", "start", quiet=True, **kwargs)

    async def _start_s3(self, **kwargs):
        await self.middleware.call('etc.generate', 's3')
        await self._service("minio", "start", quiet=True, stdout=None, stderr=None, **kwargs)

    async def _reload_rsync(self, **kwargs):
        await self._service("ix-rsyncd", "start", quiet=True, **kwargs)
        await self._service("rsyncd", "restart", **kwargs)

    async def _restart_rsync(self, **kwargs):
        await self._stop_rsync()
        await self._start_rsync()

    async def _start_rsync(self, **kwargs):
        await self._service("ix-rsyncd", "start", quiet=True, **kwargs)
        await self._service("rsyncd", "start", **kwargs)

    async def _stop_rsync(self, **kwargs):
        await self._service("rsyncd", "stop", force=True, **kwargs)

    async def _started_nis(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/NIS/ctl status"):
            res = True
        return res, []

    async def _start_nis(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/NIS/ctl start"):
            res = True
        return res

    async def _restart_nis(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/NIS/ctl restart"):
            res = True
        return res

    async def _stop_nis(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/NIS/ctl stop"):
            res = True
        return res

    async def _started_ldap(self, **kwargs):
        if (await self._system('/usr/sbin/service ix-ldap status') != 0):
            return False, []
        return await self.middleware.call('notifier.ldap_status'), []

    async def _start_ldap(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/LDAP/ctl start"):
            res = True
        return res

    async def _stop_ldap(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/LDAP/ctl stop"):
            res = True
        return res

    async def _restart_ldap(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/LDAP/ctl restart"):
            res = True
        return res

    async def _start_lldp(self, **kwargs):
        await self._service("ladvd", "start", **kwargs)

    async def _stop_lldp(self, **kwargs):
        await self._service("ladvd", "stop", force=True, **kwargs)

    async def _restart_lldp(self, **kwargs):
        await self._service("ladvd", "stop", force=True, **kwargs)
        await self._service("ladvd", "restart", **kwargs)

    async def _clear_activedirectory_config(self):
        await self._system("/bin/rm -f /etc/directoryservice/ActiveDirectory/config")

    async def _started_activedirectory(self, **kwargs):
        for srv in ('kinit', 'activedirectory', ):
            if await self._system('/usr/sbin/service ix-%s status' % (srv, )) != 0:
                return False, []
        if await self._system('/usr/local/bin/wbinfo -p') != 0:
                return False, []
        if await self._system('/usr/local/bin/wbinfo -t') != 0:
                return False, []
        return True, []

    async def _start_activedirectory(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/ActiveDirectory/ctl start"):
            res = True
        return res

    async def _stop_activedirectory(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/ActiveDirectory/ctl stop"):
            res = True
        return res

    async def _restart_activedirectory(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/ActiveDirectory/ctl restart"):
            res = True
        return res

    async def _started_domaincontroller(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/DomainController/ctl status"):
            res = True
        return res, []

    async def _start_domaincontroller(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/DomainController/ctl start"):
            res = True
        return res

    async def _stop_domaincontroller(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/DomainController/ctl stop"):
            res = True
        return res

    async def _restart_domaincontroller(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/DomainController/ctl restart"):
            res = True
        return res

    async def _restart_syslogd(self, **kwargs):
        await self._service("ix-syslogd", "start", quiet=True, **kwargs)
        await self._system("/etc/local/rc.d/syslog-ng restart")

    async def _start_syslogd(self, **kwargs):
        await self._service("ix-syslogd", "start", quiet=True, **kwargs)
        await self._system("/etc/local/rc.d/syslog-ng start")

    async def _stop_syslogd(self, **kwargs):
        await self._system("/etc/local/rc.d/syslog-ng stop")

    async def _reload_syslogd(self, **kwargs):
        await self._service("ix-syslogd", "start", quiet=True, **kwargs)
        await self._system("/etc/local/rc.d/syslog-ng reload")

    async def _start_tftp(self, **kwargs):
        await self._service("ix-inetd", "start", quiet=True, **kwargs)
        await self._service("inetd", "start", **kwargs)

    async def _reload_tftp(self, **kwargs):
        await self._service("ix-inetd", "start", quiet=True, **kwargs)
        await self._service("inetd", "stop", force=True, **kwargs)
        await self._service("inetd", "restart", **kwargs)

    async def _restart_tftp(self, **kwargs):
        await self._service("ix-inetd", "start", quiet=True, **kwargs)
        await self._service("inetd", "stop", force=True, **kwargs)
        await self._service("inetd", "restart", **kwargs)

    async def _restart_cron(self, **kwargs):
        await self._service("ix-crontab", "start", quiet=True, **kwargs)

    async def _start_motd(self, **kwargs):
        await self._service("ix-motd", "start", quiet=True, **kwargs)
        await self._service("motd", "start", quiet=True, **kwargs)

    async def _start_ttys(self, **kwargs):
        await self._service("ix-ttys", "start", quiet=True, **kwargs)

    async def _reload_ftp(self, **kwargs):
        await self._service("ix-proftpd", "start", quiet=True, **kwargs)
        await self._service("proftpd", "restart", **kwargs)

    async def _restart_ftp(self, **kwargs):
        await self._stop_ftp()
        await self._start_ftp()

    async def _start_ftp(self, **kwargs):
        await self._service("ix-proftpd", "start", quiet=True, **kwargs)
        await self._service("proftpd", "start", **kwargs)

    async def _stop_ftp(self, **kwargs):
        await self._service("proftpd", "stop", force=True, **kwargs)

    async def _start_ups(self, **kwargs):
        await self._service("ix-ups", "start", quiet=True, **kwargs)
        await self._service("nut", "start", **kwargs)
        await self._service("nut_upsmon", "start", **kwargs)
        await self._service("nut_upslog", "start", **kwargs)

    async def _stop_ups(self, **kwargs):
        await self._service("nut_upslog", "stop", force=True, **kwargs)
        await self._service("nut_upsmon", "stop", force=True, **kwargs)
        await self._service("nut", "stop", force=True, **kwargs)

    async def _restart_ups(self, **kwargs):
        await self._service("ix-ups", "start", quiet=True, **kwargs)
        await self._service("nut", "stop", force=True, **kwargs)
        await self._service("nut_upsmon", "stop", force=True, **kwargs)
        await self._service("nut_upslog", "stop", force=True, **kwargs)
        await self._service("nut", "restart", **kwargs)
        await self._service("nut_upsmon", "restart", **kwargs)
        await self._service("nut_upslog", "restart", **kwargs)

    async def _started_ups(self, **kwargs):
        mode = (await self.middleware.call('datastore.query', 'services.ups', [], {'order_by': ['-id'], 'get': True}))['ups_mode']
        if mode == "master":
            svc = "ups"
        else:
            svc = "upsmon"
        return await self._started(svc)

    async def _start_afp(self, **kwargs):
        await self._service("ix-afpd", "start", **kwargs)
        await self._service("netatalk", "start", **kwargs)

    async def _stop_afp(self, **kwargs):
        await self._service("netatalk", "stop", force=True, **kwargs)
        # when netatalk stops if afpd or cnid_metad is stuck
        # they'll get left behind, which can cause issues
        # restarting netatalk.
        await self._system("pkill -9 afpd")
        await self._system("pkill -9 cnid_metad")

    async def _restart_afp(self, **kwargs):
        await self._stop_afp()
        await self._start_afp()

    async def _reload_afp(self, **kwargs):
        await self._service("ix-afpd", "start", quiet=True, **kwargs)
        await self._system("killall -1 netatalk")

    async def _reload_nfs(self, **kwargs):
        await self._service("ix-nfsd", "start", quiet=True, **kwargs)

    async def _restart_nfs(self, **kwargs):
        await self._stop_nfs(**kwargs)
        await self._start_nfs(**kwargs)

    async def _stop_nfs(self, **kwargs):
        await self._service("lockd", "stop", force=True, **kwargs)
        await self._service("statd", "stop", force=True, **kwargs)
        await self._service("nfsd", "stop", force=True, **kwargs)
        await self._service("mountd", "stop", force=True, **kwargs)
        await self._service("nfsuserd", "stop", force=True, **kwargs)
        await self._service("gssd", "stop", force=True, **kwargs)
        await self._service("rpcbind", "stop", force=True, **kwargs)
        if not await self.middleware.call('system.is_freenas'):
            await self._service("vaaiserver", "stop", force=True, **kwargs)

    async def _start_nfs(self, **kwargs):
        await self._service("ix-nfsd", "start", quiet=True, **kwargs)
        await self._service("rpcbind", "start", quiet=True, **kwargs)
        await self._service("gssd", "start", quiet=True, **kwargs)
        await self._service("nfsuserd", "start", quiet=True, **kwargs)
        await self._service("mountd", "start", quiet=True, **kwargs)
        await self._service("nfsd", "start", quiet=True, **kwargs)
        await self._service("statd", "start", quiet=True, **kwargs)
        await self._service("lockd", "start", quiet=True, **kwargs)
        if not await self.middleware.call('system.is_freenas'):
            await self._service("vaaiserver", "start", quiet=True, **kwargs)

    async def _force_stop_jail(self, **kwargs):
        await self._service("jail", "stop", force=True, **kwargs)

    async def _start_plugins(self, jail=None, plugin=None, **kwargs):
        if jail and plugin:
            await self._system("/usr/sbin/service ix-plugins forcestart %s:%s" % (jail, plugin))
        else:
            await self._service("ix-plugins", "start", force=True, **kwargs)

    async def _stop_plugins(self, jail=None, plugin=None, **kwargs):
        if jail and plugin:
            await self._system("/usr/sbin/service ix-plugins forcestop %s:%s" % (jail, plugin))
        else:
            await self._service("ix-plugins", "stop", force=True, **kwargs)

    async def _restart_plugins(self, jail=None, plugin=None):
        await self._stop_plugins(jail=jail, plugin=plugin)
        await self._start_plugins(jail=jail, plugin=plugin)

    async def _started_plugins(self, jail=None, plugin=None, **kwargs):
        res = False
        if jail and plugin:
            if self._system("/usr/sbin/service ix-plugins status %s:%s" % (jail, plugin)) == 0:
                res = True
        else:
            if await self._service("ix-plugins", "status", **kwargs) == 0:
                res = True
        return res, []

    async def _restart_dynamicdns(self, **kwargs):
        await self._service("ix-inadyn", "start", quiet=True, **kwargs)
        await self._service("inadyn-mt", "stop", force=True, **kwargs)
        await self._service("inadyn-mt", "restart", **kwargs)

    async def _restart_system(self, **kwargs):
        asyncio.ensure_future(self._system("/bin/sleep 3 && /sbin/shutdown -r now"))

    async def _stop_system(self, **kwargs):
        asyncio.ensure_future(self._system("/bin/sleep 3 && /sbin/shutdown -p now"))

    async def _reload_cifs(self, **kwargs):
        await self._service("ix-pre-samba", "start", quiet=True, **kwargs)
        await self._service("samba_server", "reload", force=True, **kwargs)
        await self._service("ix-post-samba", "start", quiet=True, **kwargs)
        await self._service("mdnsd", "restart", **kwargs)
        # After mdns is restarted we need to reload netatalk to have it rereregister
        # with mdns. Ticket #7133
        await self._service("netatalk", "reload", **kwargs)

    async def _restart_cifs(self, **kwargs):
        await self._service("ix-pre-samba", "start", quiet=True, **kwargs)
        await self._service("samba_server", "stop", force=True, **kwargs)
        await self._service("samba_server", "restart", quiet=True, **kwargs)
        await self._service("ix-post-samba", "start", quiet=True, **kwargs)
        await self._service("mdnsd", "restart", **kwargs)
        # After mdns is restarted we need to reload netatalk to have it rereregister
        # with mdns. Ticket #7133
        await self._service("netatalk", "reload", **kwargs)

    async def _start_cifs(self, **kwargs):
        await self._service("ix-pre-samba", "start", quiet=True, **kwargs)
        await self._service("samba_server", "start", quiet=True, **kwargs)
        await self._service("ix-post-samba", "start", quiet=True, **kwargs)

    async def _stop_cifs(self, **kwargs):
        await self._service("samba_server", "stop", force=True, **kwargs)
        await self._service("ix-post-samba", "start", quiet=True, **kwargs)

    async def _start_snmp(self, **kwargs):
        await self._service("ix-snmpd", "start", quiet=True, **kwargs)
        await self._service("snmpd", "start", quiet=True, **kwargs)

    async def _stop_snmp(self, **kwargs):
        await self._service("snmpd", "stop", quiet=True, **kwargs)
        # The following is required in addition to just `snmpd`
        # to kill the `freenas-snmpd.py` daemon
        await self._service("ix-snmpd", "stop", quiet=True, **kwargs)

    async def _restart_snmp(self, **kwargs):
        await self._service("ix-snmpd", "start", quiet=True, **kwargs)
        await self._service("snmpd", "stop", force=True, **kwargs)
        await self._service("snmpd", "start", quiet=True, **kwargs)

    async def _restart_http(self, **kwargs):
        await self._service("ix-nginx", "start", quiet=True, **kwargs)
        await self._service("ix_register", "reload", **kwargs)
        await self._service("nginx", "restart", **kwargs)

    async def _reload_http(self, **kwargs):
        await self._service("ix-nginx", "start", quiet=True, **kwargs)
        await self._service("ix_register", "reload", **kwargs)
        await self._service("nginx", "reload", **kwargs)

    async def _reload_loader(self, **kwargs):
        await self._service("ix-loader", "reload", **kwargs)

    async def _start_loader(self, **kwargs):
        await self._service("ix-loader", "start", quiet=True, **kwargs)

    async def __saver_loaded(self):
        pipe = os.popen("kldstat|grep daemon_saver")
        out = pipe.read().strip('\n')
        pipe.close()
        return (len(out) > 0)

    async def _start_saver(self, **kwargs):
        if not self.__saver_loaded():
            await self._system("kldload daemon_saver")

    async def _stop_saver(self, **kwargs):
        if self.__saver_loaded():
            await self._system("kldunload daemon_saver")

    async def _restart_saver(self, **kwargs):
        await self._stop_saver()
        await self._start_saver()

    async def _reload_disk(self, **kwargs):
        await self._service("ix-fstab", "start", quiet=True, **kwargs)
        await self._service("ix-swap", "start", quiet=True, **kwargs)
        await self._service("swap", "start", quiet=True, **kwargs)
        await self._service("mountlate", "start", quiet=True, **kwargs)
        # Restarting collectd may take a long time and there is no
        # benefit in waiting for it since even if it fails it wont
        # tell the user anything useful.
        asyncio.ensure_future(self.restart("collectd", kwargs))

    async def _reload_user(self, **kwargs):
        await self._service("ix-passwd", "start", quiet=True, **kwargs)
        await self._service("ix-aliases", "start", quiet=True, **kwargs)
        await self._service("ix-sudoers", "start", quiet=True, **kwargs)
        await self.reload("cifs", kwargs)

    async def _restart_system_datasets(self, **kwargs):
        systemdataset = await self.middleware.call('notifier.system_dataset_create')
        if not systemdataset:
            return None
        systemdataset = await self.middleware.call('datastore.query', 'system.systemdataset', [], {'get': True})
        if systemdataset['sys_syslog_usedataset']:
            await self.restart("syslogd", kwargs)
        await self.restart("cifs", kwargs)
        if systemdataset['sys_rrd_usedataset']:
            # Restarting collectd may take a long time and there is no
            # benefit in waiting for it since even if it fails it wont
            # tell the user anything useful.
            asyncio.ensure_future(self.restart("collectd", kwargs))
Ejemplo n.º 5
0
class CoreService(Service):
    @filterable
    def sessions(self, filters=None, options=None):
        """
        Get currently open websocket sessions.
        """
        return filter_list([{
            'id':
            i.session_id,
            'socket_type':
            socket.AddressFamily(
                i.request.transport.get_extra_info('socket').family).name,
            'address':
            i.request.transport.get_extra_info('sockname'),
            'authenticated':
            i.authenticated,
            'call_count':
            i._softhardsemaphore.counter,
        } for i in self.middleware.get_wsclients().values()], filters, options)

    @filterable
    def get_jobs(self, filters=None, options=None):
        """Get the long running jobs."""
        jobs = filter_list([
            i.__encode__() for i in list(self.middleware.jobs.all().values())
        ], filters, options)
        return jobs

    @accepts(Int('id'))
    @job()
    def job_wait(self, job, id):
        target_job = self.middleware.jobs.get(id)
        target_job.wait_sync()
        if target_job.error:
            raise CallError(target_job.error)
        else:
            return target_job.result

    @accepts(Int('id'),
             Dict(
                 'job-update',
                 Dict('progress', additional_attrs=True),
             ))
    def job_update(self, id, data):
        job = self.middleware.jobs.all()[id]
        progress = data.get('progress')
        if progress:
            job.set_progress(
                progress['percent'],
                description=progress.get('description'),
                extra=progress.get('extra'),
            )

    @private
    def notify_postinit(self):
        self.middleware.call_sync('migration.run')

        # Sentinel file to tell we have gone far enough in the boot process.
        # See #17508
        open('/tmp/.bootready', 'w').close()

        # Send event to middlewared saying we are late enough in the process to call it ready
        self.middleware.call_sync('core.event_send', 'system', 'ADDED',
                                  {'id': 'ready'})

        # Let's setup periodic tasks now
        self.middleware._setup_periodic_tasks()

    @accepts(Int('id'))
    def job_abort(self, id):
        job = self.middleware.jobs.all()[id]
        return job.abort()

    @accepts()
    def get_services(self):
        """Returns a list of all registered services."""
        services = {}
        for k, v in list(self.middleware.get_services().items()):
            if v._config.private is True:
                continue
            if isinstance(v, CRUDService):
                _typ = 'crud'
            elif isinstance(v, ConfigService):
                _typ = 'config'
            else:
                _typ = 'service'
            services[k] = {
                'config': {
                    k: v
                    for k, v in list(v._config.__dict__.items())
                    if not k.startswith(('_', 'process_pool', 'thread_pool'))
                },
                'type': _typ,
            }
        return services

    @accepts(Str('service', default=None, null=True))
    def get_methods(self, service=None):
        """Return methods metadata of every available service.

        `service` parameter is optional and filters the result for a single service."""
        data = {}
        for name, svc in list(self.middleware.get_services().items()):
            if service is not None and name != service:
                continue

            # Skip private services
            if svc._config.private:
                continue

            for attr in dir(svc):

                if attr.startswith('_'):
                    continue

                method = None
                # For CRUD.do_{update,delete} they need to be accounted
                # as "item_method", since they are just wrapped.
                item_method = None
                if isinstance(svc, CRUDService):
                    """
                    For CRUD the create/update/delete are special.
                    The real implementation happens in do_create/do_update/do_delete
                    so thats where we actually extract pertinent information.
                    """
                    if attr in ('create', 'update', 'delete'):
                        method = getattr(svc, 'do_{}'.format(attr), None)
                        if method is None:
                            continue
                        if attr in ('update', 'delete'):
                            item_method = True
                    elif attr in ('do_create', 'do_update', 'do_delete'):
                        continue
                elif isinstance(svc, ConfigService):
                    """
                    For Config the update is special.
                    The real implementation happens in do_update
                    so thats where we actually extract pertinent information.
                    """
                    if attr == 'update':
                        original_name = 'do_{}'.format(attr)
                        if hasattr(svc, original_name):
                            method = getattr(svc, original_name, None)
                        else:
                            method = getattr(svc, attr)
                        if method is None:
                            continue
                    elif attr in ('do_update'):
                        continue

                if method is None:
                    method = getattr(svc, attr, None)

                if method is None or not callable(method):
                    continue

                # Skip private methods
                if hasattr(method, '_private'):
                    continue

                # terminate is a private method used to clean up a service on shutdown
                if attr == 'terminate':
                    continue

                examples = defaultdict(list)
                doc = inspect.getdoc(method)
                if doc:
                    """
                    Allow method docstring to have sections in the format of:

                      .. section_name::

                    Currently the following sections are available:

                      .. examples:: - goes into `__all__` list in examples
                      .. examples(rest):: - goes into `rest` list in examples
                      .. examples(websocket):: - goes into `websocket` list in examples
                    """
                    sections = re.split(r'^.. (.+?)::$', doc, flags=re.M)
                    doc = sections[0]
                    for i in range(int((len(sections) - 1) / 2)):
                        idx = (i + 1) * 2 - 1
                        reg = re.search(r'examples(?:\((.+)\))?',
                                        sections[idx])
                        if reg is None:
                            continue
                        exname = reg.groups()[0]
                        if exname is None:
                            exname = '__all__'
                        examples[exname].append(sections[idx + 1])

                accepts = getattr(method, 'accepts', None)
                if accepts:
                    accepts = [
                        i.to_json_schema() for i in accepts
                        if not getattr(i, 'hidden', False)
                    ]

                data['{0}.{1}'.format(name, attr)] = {
                    'description':
                    doc,
                    'examples':
                    examples,
                    'accepts':
                    accepts,
                    'item_method':
                    True if item_method else hasattr(method, '_item_method'),
                    'no_auth_required':
                    hasattr(method, '_no_auth_required'),
                    'filterable':
                    hasattr(method, '_filterable'),
                    'require_websocket':
                    hasattr(method, '_pass_app'),
                    'job':
                    hasattr(method, '_job'),
                    'downloadable':
                    hasattr(method, '_job')
                    and 'output' in method._job['pipes'],
                    'uploadable':
                    hasattr(method, '_job')
                    and 'input' in method._job['pipes'],
                    'require_pipes':
                    hasattr(method, '_job') and method._job['check_pipes']
                    and any(i in method._job['pipes']
                            for i in ('input', 'output')),
                }
        return data

    @accepts()
    def get_events(self):
        """
        Returns metadata for every possible event emitted from websocket server.
        """
        events = {}
        for name, attrs in self.middleware.get_events():
            events[name] = {
                'description': attrs['description'],
                'wildcard_subscription': attrs['wildcard_subscription'],
            }
        return events

    @private
    async def call_hook(self, name, args, kwargs=None):
        kwargs = kwargs or {}
        await self.middleware.call_hook(name, *args, **kwargs)

    @private
    async def event_send(self, name, event_type, kwargs):
        self.middleware.send_event(name, event_type, **kwargs)

    @accepts()
    def ping(self):
        """
        Utility method which just returns "pong".
        Can be used to keep connection/authtoken alive instead of using
        "ping" protocol message.
        """
        return 'pong'

    @accepts(
        Str('method'),
        List('args', default=[]),
        Str('filename'),
    )
    async def download(self, method, args, filename):
        """
        Core helper to call a job marked for download.

        Returns the job id and the URL for download.
        """
        job = await self.middleware.call(
            method, *args, pipes=Pipes(output=self.middleware.pipe()))
        token = await self.middleware.call('auth.generate_token', 300, {
            'filename': filename,
            'job': job.id
        })
        self.middleware.fileapp.register_job(job.id)
        return job.id, f'/_download/{job.id}?auth_token={token}'

    @private
    def reconfigure_logging(self):
        """
        When /var/log gets moved because of system dataset
        we need to make sure the log file is reopened because
        of the new location
        """
        handler = logging._handlers.get('file')
        if handler:
            stream = handler.stream
            handler.stream = handler._open()
            if sys.stdout is stream:
                sys.stdout = handler.stream
                sys.stderr = handler.stream
            try:
                stream.close()
            except Exception:
                pass

    @private
    @accepts(Dict(
        'core-job',
        Int('sleep'),
    ))
    @job()
    def job_test(self, job, data=None):
        """
        Private no-op method to test a job, simply returning `true`.
        """
        if data is None:
            data = {}

        sleep = data.get('sleep')
        if sleep is not None:

            def sleep_fn():
                i = 0
                while i < sleep:
                    job.set_progress((i / sleep) * 100)
                    time.sleep(1)
                    i += 1
                job.set_progress(100)

            t = threading.Thread(target=sleep_fn, daemon=True)
            t.start()
            t.join()
        return True

    @accepts(
        Str('engine', enum=['PTVS', 'PYDEV']),
        Dict(
            'options',
            Str('secret'),
            Str('bind_address', default='0.0.0.0'),
            Int('bind_port', default=3000),
            Str('host'),
            Bool('wait_attach', default=False),
            Str('local_path'),
        ),
    )
    async def debug(self, engine, options):
        """
        Setup middlewared for remote debugging.

        engines:
          - PTVS: Python Visual Studio
          - PYDEV: Python Dev (Eclipse/PyCharm)

        options:
          - secret: password for PTVS
          - host: required for PYDEV, hostname of local computer (developer workstation)
          - local_path: required for PYDEV, path for middlewared source in local computer (e.g. /home/user/freenas/src/middlewared/middlewared
        """
        if engine == 'PTVS':
            import ptvsd
            if 'secret' not in options:
                raise ValidationError('secret', 'secret is required for PTVS')
            ptvsd.enable_attach(
                options['secret'],
                address=(options['bind_address'], options['bind_port']),
            )
            if options['wait_attach']:
                ptvsd.wait_for_attach()
        elif engine == 'PYDEV':
            for i in ('host', 'local_path'):
                if i not in options:
                    raise ValidationError(i, f'{i} is required for PYDEV')
            os.environ['PATHS_FROM_ECLIPSE_TO_PYTHON'] = json.dumps([
                [
                    options['local_path'],
                    '/usr/local/lib/python3.7/site-packages/middlewared'
                ],
            ])
            import pydevd
            pydevd.stoptrace()
            pydevd.settrace(host=options['host'])

    @private
    async def profile(self, method, params=None):
        return await self.middleware.call(method,
                                          *(params or []),
                                          profile=True)

    @private
    def threads_stacks(self):
        return get_threads_stacks()

    @accepts(Str("method"), List("params", default=[]))
    @job(lock=lambda args: f"bulk:{args[0]}")
    async def bulk(self, job, method, params):
        """
        Will loop on a list of items for the given method, returning a list of
        dicts containing a result and error key.

        Result will be the message returned by the method being called,
        or a string of an error, in which case the error key will be the
        exception
        """
        statuses = []
        progress_step = 100 / len(params)
        current_progress = 0

        for p in params:
            try:
                msg = await self.middleware.call(method, *p)
                error = None

                if isinstance(msg, Job):
                    job = msg
                    msg = await msg.wait()

                    if job.error:
                        error = job.error

                statuses.append({"result": msg, "error": error})
            except Exception as e:
                statuses.append({"result": None, "error": str(e)})

            current_progress += progress_step
            job.set_progress(current_progress)

        return statuses
Ejemplo n.º 6
0
class SupportService(Service):

    @accepts(
        Str('username'),
        Str('password'),
    )
    def fetch_categories(self, username, password):
        """
        Fetch all the categories available for `username` using `password`.
        Returns a dict with the category name as a key and id as value.
        """

        sw_name = 'freenas' if self.middleware.call_sync('system.is_freenas') else 'truenas'
        try:
            r = requests.post(
                f'https://{ADDRESS}/{sw_name}/api/v1.0/categories',
                data=json.dumps({
                    'user': username,
                    'password': password,
                }),
                headers={'Content-Type': 'application/json'},
                timeout=10,
            )
            data = r.json()
        except simplejson.JSONDecodeError:
            self.logger.debug(f'Failed to decode ticket attachment response: {r.text}')
            raise CallError('Invalid proxy server response', errno.EBADMSG)
        except requests.ConnectionError as e:
            raise CallError(f'Connection error {e}', errno.EBADF)
        except requests.Timeout:
            raise CallError('Connection time out', errno.ETIMEDOUT)

        if 'error' in data:
            raise CallError(data['message'], errno.EINVAL)

        return data

    @accepts(Dict(
        'new_ticket',
        Str('title', required=True),
        Str('body', required=True),
        Str('category', required=True),
        Bool('attach_debug', default=False),
        Str('username'),
        Str('password'),
        Str('type', enum=['BUG', 'FEATURE']),
        Str('criticality'),
        Str('environment'),
        Str('phone'),
        Str('name'),
        Str('email'),
    ))
    @job()
    async def new_ticket(self, job, data):
        """
        Creates a new ticket for support.
        This is done using the support proxy API.
        For FreeNAS it will be created on Redmine and for TrueNAS on SupportSuite.

        For FreeNAS `criticality`, `environment`, `phone`, `name` and `email` attributes are not required.
        For TrueNAS `username`, `password` and `type` attributes are not required.
        """

        job.set_progress(1, 'Gathering data')

        sw_name = 'freenas' if await self.middleware.call('system.is_freenas') else 'truenas'

        if sw_name == 'freenas':
            required_attrs = ('type', 'username', 'password')
        else:
            required_attrs = ('phone', 'name', 'email', 'criticality', 'environment')
            data['serial'] = (await (await Popen(['/usr/local/sbin/dmidecode', '-s', 'system-serial-number'], stdout=subprocess.PIPE)).communicate())[0].decode().split('\n')[0].upper()
            license = get_license()[0]
            if license:
                data['company'] = license.customer_name
            else:
                data['company'] = 'Unknown'

        for i in required_attrs:
            if i not in data:
                raise CallError(f'{i} is required', errno.EINVAL)

        data['version'] = (await self.middleware.call('system.version')).split('-', 1)[-1]
        if 'username' in data:
            data['user'] = data.pop('username')
        debug = data.pop('attach_debug')

        type_ = data.get('type')
        if type_:
            data['type'] = type_.lower()

        job.set_progress(20, 'Submitting ticket')

        try:
            r = await self.middleware.run_in_thread(lambda: requests.post(
                f'https://{ADDRESS}/{sw_name}/api/v1.0/ticket',
                data=json.dumps(data),
                headers={'Content-Type': 'application/json'},
                timeout=10,
            ))
            result = r.json()
        except simplejson.JSONDecodeError:
            self.logger.debug(f'Failed to decode ticket attachment response: {r.text}')
            raise CallError('Invalid proxy server response', errno.EBADMSG)
        except requests.ConnectionError as e:
            raise CallError(f'Connection error {e}', errno.EBADF)
        except requests.Timeout:
            raise CallError('Connection time out', errno.ETIMEDOUT)

        if r.status_code != 200:
            self.logger.debug(f'Support Ticket failed ({r.status_code}): {r.text}', r.status_code, r.text)
            raise CallError('Ticket creation failed, try again later.', errno.EINVAL)

        if result['error']:
            raise CallError(result['message'], errno.EINVAL)

        ticket = result.get('ticketnum')
        url = result.get('message')
        if not ticket:
            raise CallError('New ticket number was not informed', errno.EINVAL)
        job.set_progress(50, f'Ticket created: {ticket}', extra={'ticket': ticket})

        if debug:
            # FIXME: generate debug from middleware
            mntpt, direc, dump = await self.middleware.run_in_thread(debug_get_settings)

            job.set_progress(60, 'Generating debug file')
            await self.middleware.run_in_thread(debug_generate)

            not_freenas = not (await self.middleware.call('system.is_freenas'))
            if not_freenas:
                not_freenas &= await self.middleware.call('notifier.failover_licensed')
            if not_freenas:
                debug_file = f'{direc}/debug.tar'
                debug_name = 'debug-{}.tar'.format(time.strftime('%Y%m%d%H%M%S'))
            else:
                debug_file = dump
                debug_name = 'debug-{}-{}.txz'.format(
                    socket.gethostname().split('.')[0],
                    time.strftime('%Y%m%d%H%M%S'),
                )

            job.set_progress(80, 'Attaching debug file')

            t = {
                'ticket': ticket,
                'filename': debug_name,
            }
            if 'user' in data:
                t['username'] = data['user']
            if 'password' in data:
                t['password'] = data['password']
            tjob = await self.middleware.call('support.attach_ticket', t, pipes=Pipes(input=self.middleware.pipe()))

            with open(debug_file, 'rb') as f:
                await self.middleware.run_in_io_thread(shutil.copyfileobj, f, tjob.pipes.input.w)
                await self.middleware.run_in_io_thread(tjob.pipes.input.w.close)

            await tjob.wait()
        else:
            job.set_progress(100)

        return {
            'ticket': ticket,
            'url': url,
        }

    @accepts(Dict(
        'attach_ticket',
        Int('ticket', required=True),
        Str('filename', required=True),
        Str('username'),
        Str('password'),
    ))
    @job(pipes=["input"])
    async def attach_ticket(self, job, data):
        """
        Method to attach a file to a existing ticket.
        """

        sw_name = 'freenas' if await self.middleware.call('system.is_freenas') else 'truenas'

        if 'username' in data:
            data['user'] = data.pop('username')
        data['ticketnum'] = data.pop('ticket')
        filename = data.pop('filename')

        try:
            r = await self.middleware.run_in_io_thread(lambda: requests.post(
                f'https://{ADDRESS}/{sw_name}/api/v1.0/ticket/attachment',
                data=data,
                timeout=10,
                files={'file': (filename, job.pipes.input.r)},
            ))
            data = r.json()
        except simplejson.JSONDecodeError:
            self.logger.debug(f'Failed to decode ticket attachment response: {r.text}')
            raise CallError('Invalid proxy server response', errno.EBADMSG)
        except requests.ConnectionError as e:
            raise CallError(f'Connection error {e}', errno.EBADF)
        except requests.Timeout:
            raise CallError('Connection time out', errno.ETIMEDOUT)

        if data['error']:
            raise CallError(data['message'], errno.EINVAL)
Ejemplo n.º 7
0
class NFSService(SystemServiceService):
    class Config:
        service = "nfs"
        datastore_prefix = "nfs_srv_"
        datastore_extend = 'nfs.nfs_extend'

    @private
    def nfs_extend(self, nfs):
        nfs["userd_manage_gids"] = nfs.pop("16")
        return nfs

    @private
    def nfs_compress(self, nfs):
        nfs["16"] = nfs.pop("userd_manage_gids")
        return nfs

    @accepts(
        Dict(
            'nfs_update',
            Int('servers', validators=[Range(min=1, max=256)]),
            Bool('udp'),
            Bool('allow_nonroot'),
            Bool('v4'),
            Bool('v4_v3owner'),
            Bool('v4_krb'),
            List('bindip', items=[IPAddr('ip')]),
            Int('mountd_port',
                required=False,
                validators=[Range(min=1, max=65535)]),
            Int('rpcstatd_port',
                required=False,
                validators=[Range(min=1, max=65535)]),
            Int('rpclockd_port',
                required=False,
                validators=[Range(min=1, max=65535)]),
            Bool('userd_manage_gids'),
            Bool('mountd_log'),
            Bool('statd_lockd_log'),
        ))
    async def do_update(self, data):
        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        if not new["v4"] and new["v4_v3owner"]:
            verrors.add("nfs_update.v4_v3owner",
                        "This option requires enabling NFSv4")

        if new["v4_v3owner"] and new["userd_manage_gids"]:
            verrors.add(
                "nfs_update.userd_manage_gids",
                "This option is incompatible with NFSv3 ownership model for NFSv4"
            )

        if verrors:
            raise verrors

        self.nfs_compress(new)

        await self._update_service(old, new)

        self.nfs_extend(new)

        return new
Ejemplo n.º 8
0
class KeychainCredentialService(CRUDService):
    class Config:
        datastore = "system.keychaincredential"

    @accepts(
        Dict(
            "keychain_credential_create",
            Str("name", required=True),
            Str("type", required=True),
            Dict("attributes",
                 additional_attrs=True,
                 required=True,
                 private=True),
            register=True,
        ))
    async def do_create(self, data):
        """
        Create a Keychain Credential

        Create a Keychain Credential of any type.
        Every Keychain Credential has a `name` which is used to distinguish it from others.
        The following `type`s are supported:
         * `SSH_KEY_PAIR`
           Which `attributes` are:
           * `private_key`
           * `public_key` (which can be omitted and thus automatically derived from private key)
           At least one attribute is required.

         * `SSH_CREDENTIALS`
           Which `attributes` are:
           * `host`
           * `port` (default 22)
           * `username` (default root)
           * `private_key` (Keychain Credential ID)
           * `remote_host_key` (you can use `keychaincredential.remote_ssh_host_key_scan` do discover it)
           * `cipher`: one of `STANDARD`, `FAST`, or `DISABLED` (last requires special support from both SSH server and
             client)
           * `connect_timeout` (default 10)

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "keychaincredential.create",
                "params": [{
                    "name": "Work SSH connection",
                    "type": "SSH_CREDENTIALS",
                    "attributes": {
                        "host": "work.freenas.org",
                        "private_key": 12,
                        "remote_host_key": "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMn1VjdSMatGnxbOsrneKyai+dh6d4Hm"
                    }
                }]
            }
        """

        await self._validate("keychain_credential_create", data)

        data["id"] = await self.middleware.call(
            "datastore.insert",
            self._config.datastore,
            data,
        )
        return data

    @accepts(Int("id"),
             Patch(
                 "keychain_credential_create",
                 "keychain_credential_update",
                 ("attr", {
                     "update": True
                 }),
                 ("rm", {
                     "name": "type"
                 }),
             ))
    async def do_update(self, id, data):
        """
        Update a Keychain Credential with specific `id`

        Please note that you can't change `type`

        Also you must specify full `attributes` value

        See the documentation for `create` method for information on payload contents

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "keychaincredential.update",
                "params": [
                    13,
                    {
                        "name": "Work SSH connection",
                        "attributes": {
                            "host": "work.ixsystems.com",
                            "private_key": 12,
                            "remote_host_key": "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMn1VjdSMatGnxbOsrneKyai+dh6d4Hm"
                        }
                    }
                ]
            }
        """

        old = await self._get_instance(id)

        new = old.copy()
        new.update(data)

        await self._validate("keychain_credentials_update", new, id)

        await self.middleware.call(
            "datastore.update",
            self._config.datastore,
            id,
            new,
        )

        if new["type"] in ["SSH_KEY_PAIR", "SSH_CREDENTIALS"]:
            await self.middleware.call("zettarepl.update_tasks")

        return new

    @accepts(Int("id"), Dict("options", Bool("cascade", default=False)))
    async def do_delete(self, id, options):
        """
        Delete Keychain Credential with specific `id`

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "keychaincredential.delete",
                "params": [
                    13
                ]
            }
        """

        instance = await self._get_instance(id)

        for delegate in TYPES[instance["type"]].used_by_delegates:
            delegate = delegate(self.middleware)
            for row in await delegate.query(instance["id"]):
                if not options["cascade"]:
                    raise CallError(
                        "This credential is used and no cascade option is specified"
                    )

                await delegate.unbind(row)

        await self.middleware.call(
            "datastore.delete",
            self._config.datastore,
            id,
        )

    @accepts(Int("id"))
    async def used_by(self, id):
        """
        Returns list of objects that use this credential.
        """
        instance = await self._get_instance(id)

        result = []
        for delegate in TYPES[instance["type"]].used_by_delegates:
            delegate = delegate(self.middleware)
            for row in await delegate.query(instance["id"]):
                result.append({
                    "title": await delegate.get_title(row),
                    "unbind_method": delegate.unbind_method.value,
                })
                if isinstance(
                        delegate,
                        OtherKeychainCredentialKeychainCredentialUsedByDelegate
                ):
                    result.extend(await self.middleware.call(
                        "keychaincredential.used_by", row["id"]))
        return result

    async def _validate(self, schema_name, data, id=None):
        verrors = ValidationErrors()

        await self._ensure_unique(verrors, schema_name, "name", data["name"],
                                  id)

        if data["type"] not in TYPES:
            verrors.add(f"{schema_name}.type", "Invalid type")
            raise verrors
        else:
            type = TYPES[data["type"]]

            attributes_verrors = validate_attributes(type.credentials_schema,
                                                     data)
            verrors.add_child(f"{schema_name}.attributes", attributes_verrors)

        if verrors:
            raise verrors

        await type.validate_and_pre_save(self.middleware, verrors,
                                         f"{schema_name}.attributes",
                                         data["attributes"])

        if verrors:
            raise verrors

    @private
    @accepts(Int("id"), Str("type"))
    async def get_of_type(self, id, type):
        try:
            credential = await self.middleware.call("keychaincredential.query",
                                                    [["id", "=", id]],
                                                    {"get": True})
        except IndexError:
            raise CallError("Credential does not exist", errno.ENOENT)
        else:
            if credential["type"] != type:
                raise CallError(f"Credential is not of type {type}",
                                errno.EINVAL)

            if not credential["attributes"]:
                raise CallError(
                    f"Decrypting credential {credential['name']} failed",
                    errno.EFAULT)

            return credential

    @accepts()
    def generate_ssh_key_pair(self):
        """
        Generate a public/private key pair

        Generate a public/private key pair (useful for `SSH_KEY_PAIR` type)

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "keychaincredential.generate_ssh_key_pair",
                "params": []
            }
        """

        key = os.path.join(
            "/tmp",
            "".join(random.choice(string.ascii_letters) for _ in range(32)))
        if os.path.exists(key):
            os.unlink(key)
        if os.path.exists(f"{key}.pub"):
            os.unlink(f"{key}.pub")
        try:
            subprocess.check_call(
                ["ssh-keygen", "-t", "rsa", "-f", key, "-N", "", "-q"])
            with open(key) as f:
                private_key = f.read()
            with open(f"{key}.pub") as f:
                public_key = f.read()
        finally:
            if os.path.exists(key):
                os.unlink(key)
            if os.path.exists(f"{key}.pub"):
                os.unlink(f"{key}.pub")

        return {
            "private_key": private_key,
            "public_key": public_key,
        }

    @accepts(
        Dict(
            "keychain_remote_ssh_host_key_scan",
            Str("host", required=True, empty=False),
            Str("port", default=22),
            Int("connect_timeout", default=10),
        ))
    async def remote_ssh_host_key_scan(self, data):
        """
        Discover a remote host key

        Discover a remote host key (useful for `SSH_CREDENTIALS`)

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "keychaincredential.delete",
                "params": [{
                    "host": "work.freenas.org"
                }]
            }
        """

        proc = await run([
            "ssh-keyscan", "-p",
            str(data["port"]), "-T",
            str(data["connect_timeout"]), data["host"]
        ],
                         check=False,
                         encoding="utf8")
        if proc.returncode == 0:
            if proc.stdout:
                try:
                    return process_ssh_keyscan_output(proc.stdout)
                except Exception:
                    raise CallError(
                        f"ssh-keyscan failed: {proc.stdout + proc.stderr}"
                    ) from None
            elif proc.stderr:
                raise CallError(f"ssh-keyscan failed: {proc.stderr}")
            else:
                raise CallError("SSH timeout")
        else:
            raise CallError(f"ssh-keyscan failed: {proc.stdout + proc.stderr}")

    @accepts(
        Dict(
            "keychain_remote_ssh_semiautomatic_setup",
            Str("name", required=True),
            Str("url", required=True, validators=[URL()]),
            Str("token", private=True),
            Str("password", private=True),
            Str("username", default="root"),
            Int("private_key", required=True),
            Str("cipher",
                enum=["STANDARD", "FAST", "DISABLED"],
                default="STANDARD"),
            Int("connect_timeout", default=10),
        ))
    def remote_ssh_semiautomatic_setup(self, data):
        """
        Perform semi-automatic SSH connection setup with other FreeNAS machine

        Perform semi-automatic SSH connection setup with other FreeNAS machine. It creates a `SSH_CREDENTIALS`
        credential with specified `name` that can be used to connect to FreeNAS machine with specified `url` and
        temporary auth `token`. Other FreeNAS machine adds `private_key` to allowed `username`'s private keys. Other
        `SSH_CREDENTIALS` attributes such as `cipher` and `connect_timeout` can be specified as well.

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "keychaincredential.keychain_remote_ssh_semiautomatic_setup",
                "params": [{
                    "name": "Work SSH connection",
                    "url": "https://work.freenas.org",
                    "token": "8c8d5fd1-f749-4429-b379-9c186db4f834",
                    "private_key": 12
                }]
            }
        """

        replication_key = self.middleware.run_coroutine(
            get_ssh_key_pair_with_private_key(self.middleware,
                                              data["private_key"]))

        try:
            client = Client(
                os.path.join(re.sub("^http", "ws", data["url"]), "websocket"))
        except Exception as e:
            raise CallError(f"Unable to connect to remote system: {e}")

        with client as c:
            if data.get("token"):
                if not c.call("auth.token", data["token"]):
                    raise CallError("Invalid token")
            elif data.get("password"):
                if not c.call("auth.login", "root", data["password"]):
                    raise CallError("Invalid password")
            else:
                raise CallError(
                    "You should specify either remote system password or temporary authentication token"
                )

            try:
                response = c.call(
                    "replication.pair", {
                        "hostname": "any-host",
                        "public-key":
                        replication_key["attributes"]["public_key"],
                        "user": data["username"],
                    })
            except Exception as e:
                raise CallError(
                    f"Semi-automatic SSH connection setup failed: {e!r}")

        return self.middleware.call_sync(
            "keychaincredential.create", {
                "name": data["name"],
                "type": "SSH_CREDENTIALS",
                "attributes": {
                    "host":
                    urllib.parse.urlparse(data["url"]).hostname,
                    "port":
                    response["ssh_port"],
                    "username":
                    data["username"],
                    "private_key":
                    replication_key["id"],
                    "remote_host_key":
                    process_ssh_keyscan_output(response["ssh_hostkey"]),
                    "cipher":
                    data["cipher"],
                    "connect_timeout":
                    data["connect_timeout"],
                }
            })

    @private
    @accepts(
        Dict(
            "keychain_ssh_pair",
            Str("remote_hostname", required=True),
            Str("username", default="root"),
            Str("public_key", required=True),
        ))
    async def ssh_pair(self, data):
        """
        Receives public key, storing it to accept SSH connection and return
        pertinent SSH data of this machine.
        """
        service = await self.middleware.call("service.query",
                                             [("service", "=", "ssh")],
                                             {"get": True})
        ssh = await self.middleware.call("ssh.config")
        try:
            user = await self.middleware.call(
                "user.query", [("username", "=", data["username"])],
                {"get": True})
        except IndexError:
            raise CallError(f"User {data['username']} does not exist")

        if user["home"].startswith("/nonexistent") or not os.path.exists(
                user["home"]):
            raise CallError(f"Home directory {user['home']} does not exist",
                            errno.ENOENT)

        # Make sure SSH is enabled
        if not service["enable"]:
            await self.middleware.call("service.update", "ssh",
                                       {"enable": True})
            await self.middleware.call("service.start", "ssh")

            # This might be the first time of the service being enabled
            # which will then result in new host keys we need to grab
            ssh = await self.middleware.call("ssh.config")

        # If .ssh dir does not exist, create it
        dotsshdir = os.path.join(user["home"], ".ssh")
        if not os.path.exists(dotsshdir):
            os.mkdir(dotsshdir)
            os.chown(dotsshdir, user["uid"], user["group"]["bsdgrp_gid"])

        # Write public key in user authorized_keys for SSH
        authorized_keys_file = f"{dotsshdir}/authorized_keys"
        with open(authorized_keys_file, "a+") as f:
            f.seek(0)
            if data["public_key"] not in f.read():
                f.write("\n" + data["public_key"] + "\n")

        ssh_hostkey = "{0} {1}\n{0} {2}\n{0} {3}\n".format(
            data["remote_hostname"],
            base64.b64decode(ssh["host_rsa_key_pub"].encode()).decode(),
            base64.b64decode(ssh["host_ecdsa_key_pub"].encode()).decode(),
            base64.b64decode(ssh["host_ed25519_key_pub"].encode()).decode(),
        )

        return {
            "port": ssh["tcpport"],
            "host_key": ssh_hostkey,
        }
Ejemplo n.º 9
0
class VMDeviceService(CRUDService):

    DEVICE_ATTRS = {
        'CDROM': CDROM.schema,
        'RAW': RAW.schema,
        'DISK': DISK.schema,
        'NIC': NIC.schema,
        'PCI': PCI.schema,
        'DISPLAY': DISPLAY.schema,
    }

    class Config:
        namespace = 'vm.device'
        datastore = 'vm.device'
        datastore_extend = 'vm.device.extend_device'
        cli_namespace = 'service.vm.device'

    @private
    async def create_resource(self, device, old=None):
        return ((device['dtype'] == 'DISK'
                 and device['attributes'].get('create_zvol'))
                or (device['dtype'] == 'RAW' and
                    (not device['attributes'].get('exists', True) or
                     (old and old['attributes'].get('size') !=
                      device['attributes'].get('size')))))

    @private
    async def extend_device(self, device):
        if device['vm']:
            device['vm'] = device['vm']['id']
        if not device['order']:
            if device['dtype'] == 'CDROM':
                device['order'] = 1000
            elif device['dtype'] in ('DISK', 'RAW'):
                device['order'] = 1001
            else:
                device['order'] = 1002
        return device

    @accepts()
    def nic_attach_choices(self):
        """
        Available choices for NIC Attach attribute.
        """
        return self.middleware.call_sync('interface.choices',
                                         {'exclude': ['epair', 'tap', 'vnet']})

    @accepts()
    async def bind_choices(self):
        """
        Available choices for Bind attribute.
        """
        return {
            d['address']: d['address']
            for d in await self.middleware.call('interface.ip_in_use', {
                'static': True,
                'any': True
            })
        }

    @private
    async def update_device(self, data, old=None):
        if data['dtype'] == 'DISK':
            create_zvol = data['attributes'].pop('create_zvol', False)

            if create_zvol:
                ds_options = {
                    'name': data['attributes'].pop('zvol_name'),
                    'type': 'VOLUME',
                    'volsize': data['attributes'].pop('zvol_volsize'),
                }

                self.logger.debug(
                    f'Creating ZVOL {ds_options["name"]} with volsize {ds_options["volsize"]}'
                )

                zvol_blocksize = await self.middleware.call(
                    'pool.dataset.recommended_zvol_blocksize',
                    ds_options['name'].split('/', 1)[0])
                ds_options['volblocksize'] = zvol_blocksize

                new_zvol = (await self.middleware.call('pool.dataset.create',
                                                       ds_options))['id']
                data['attributes']['path'] = f'/dev/zvol/{new_zvol}'
        elif data['dtype'] == 'RAW' and (
                not data['attributes'].pop('exists', True) or
            (old and old['attributes']['size'] != data['attributes']['size'])):
            path = data['attributes']['path']
            cp = await run(
                ['truncate', '-s',
                 str(data['attributes']['size']), path],
                check=False)
            if cp.returncode:
                raise CallError(
                    f'Failed to create or update raw file {path}: {cp.stderr}')

        return data

    @accepts(
        Dict(
            'vmdevice_create',
            Str('dtype',
                enum=['NIC', 'DISK', 'CDROM', 'PCI', 'DISPLAY', 'RAW'],
                required=True),
            Int('vm', required=True),
            Dict('attributes', additional_attrs=True, default=None),
            Int('order', default=None, null=True),
            register=True,
        ), )
    async def do_create(self, data):
        """
        Create a new device for the VM of id `vm`.

        If `dtype` is the `RAW` type and a new raw file is to be created, `attributes.exists` will be passed as false.
        This means the API handles creating the raw file and raises the appropriate exception if file creation fails.

        If `dtype` is of `DISK` type and a new Zvol is to be created, `attributes.create_zvol` will be passed as
        true with valid `attributes.zvol_name` and `attributes.zvol_volsize` values.
        """
        data = await self.validate_device(data, update=False)
        data = await self.update_device(data)

        id = await self.middleware.call('datastore.insert',
                                        self._config.datastore, data)
        await self.__reorder_devices(id, data['vm'], data['order'])

        return await self.get_instance(id)

    @accepts(Int('id'),
             Patch(
                 'vmdevice_create',
                 'vmdevice_update',
                 ('attr', {
                     'update': True
                 }),
             ))
    async def do_update(self, id, data):
        """
        Update a VM device of `id`.

        Pass `attributes.size` to resize a `dtype` `RAW` device. The raw file will be resized.
        """
        device = await self.get_instance(id)
        new = device.copy()
        new.update(data)

        new = await self.validate_device(new, device)
        new = await self.update_device(new, device)

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new)
        await self.__reorder_devices(id, device['vm'], new['order'])

        return await self.get_instance(id)

    @private
    async def delete_resource(self, options, device):
        if options['zvol']:
            if device['dtype'] != 'DISK':
                raise CallError(
                    'The device is not a disk and has no zvol to destroy.')
            zvol_id = device['attributes'].get('path',
                                               '').rsplit('/dev/zvol/')[-1]
            if await self.middleware.call('pool.dataset.query',
                                          [['id', '=', zvol_id]]):
                # FIXME: We should use pool.dataset.delete but right now FS attachments will consider
                # the current device as a valid reference. Also should we stopping the vm only when deleting an
                # attachment ?
                await self.middleware.call('zfs.dataset.delete', zvol_id)
        if options['raw_file']:
            if device['dtype'] != 'RAW':
                raise CallError('Device is not of RAW type.')
            try:
                os.unlink(device['attributes']['path'])
            except OSError:
                raise CallError(
                    f'Failed to destroy {device["attributes"]["path"]}')

    @accepts(Int('id'),
             Dict(
                 'vm_device_delete',
                 Bool('zvol', default=False),
                 Bool('raw_file', default=False),
                 Bool('force', default=False),
             ))
    async def do_delete(self, id, options):
        """
        Delete a VM device of `id`.
        """
        device = await self.get_instance(id)
        status = await self.middleware.call('vm.status', device['vm'])
        if status.get('state') == 'RUNNING':
            raise CallError(
                'Please stop associated VM before deleting VM device.')

        try:
            await self.delete_resource(options, device)
        except CallError:
            if not options['force']:
                raise

        if device['dtype'] == 'PCI':
            device_obj = PCI(device, middleware=self.middleware)
            if await self.middleware.run_in_thread(device_obj.safe_to_reattach
                                                   ):
                try:
                    await self.middleware.run_in_thread(
                        device_obj.reattach_device)
                except CallError:
                    if not options['force']:
                        raise

        return await self.middleware.call('datastore.delete',
                                          self._config.datastore, id)

    async def __reorder_devices(self, id, vm_id, order):
        if order is None:
            return
        filters = [('vm', '=', vm_id), ('id', '!=', id)]
        if await self.middleware.call('vm.device.query',
                                      filters + [('order', '=', order)]):
            used_order = [order]
            for device in await self.middleware.call('vm.device.query',
                                                     filters,
                                                     {'order_by': ['order']}):
                if not device['order']:
                    continue

                if device['order'] not in used_order:
                    used_order.append(device['order'])
                    continue

                device['order'] = min(used_order) + 1
                while device['order'] in used_order:
                    device['order'] += 1
                used_order.append(device['order'])
                await self.middleware.call('datastore.update',
                                           self._config.datastore,
                                           device['id'], device)

    @private
    async def disk_uniqueness_integrity_check(self, device, vm):
        # This ensures that the disk is not already present for `vm`
        def translate_device(dev):
            # A disk should have a path configured at all times, when that is not the case, that means `dtype` is DISK
            # and end user wants to create a new zvol in this case.
            return dev['attributes'].get(
                'path') or f'/dev/zvol/{dev["attributes"]["zvol_name"]}'

        disks = [
            d for d in vm['devices'] if d['dtype'] in ('DISK', 'RAW', 'CDROM')
            and translate_device(d) == translate_device(device)
        ]
        if not disks:
            # We don't have that disk path in vm devices, we are good to go
            return True
        elif len(disks) > 1:
            # VM is mis-configured
            return False
        elif not device.get('id') and disks:
            # A new device is being created, however it already exists in vm. This can also happen when VM instance
            # is being created, in that case it's okay. Key here is that we won't have the id field present
            return not bool(disks[0].get('id'))
        elif device.get('id'):
            # The device is being updated, if the device is same as we have in db, we are okay
            return device['id'] == disks[0].get('id')
        else:
            return False

    @private
    async def validate_device(self,
                              device,
                              old=None,
                              vm_instance=None,
                              update=True):
        # We allow vm_instance to be passed for cases where VM devices are being updated via VM and
        # the device checks should be performed with the modified vm_instance object not the one db holds
        # vm_instance should be provided at all times when handled by VMService, if VMDeviceService is interacting,
        # then it means the device is configured with a VM and we can retrieve the VM's data from db
        if not vm_instance:
            vm_instance = await self.middleware.call('vm.get_instance',
                                                     device['vm'])

        verrors = ValidationErrors()
        schema = self.DEVICE_ATTRS.get(device['dtype'])
        if schema:
            try:
                device['attributes'] = schema.clean(device['attributes'])
            except Error as e:
                verrors.add(f'attributes.{e.attribute}', e.errmsg, e.errno)

            try:
                schema.validate(device['attributes'])
            except ValidationErrors as e:
                verrors.extend(e)

            if verrors:
                raise verrors

        # vm_instance usages SHOULD NOT rely on device `id` field to uniquely identify objects as it's possible
        # VMService is creating a new VM with devices and the id's don't exist yet

        if device.get('dtype') == 'DISK':
            create_zvol = device['attributes'].get('create_zvol')
            path = device['attributes'].get('path')
            if create_zvol:
                for attr in ('zvol_name', 'zvol_volsize'):
                    if not device['attributes'].get(attr):
                        verrors.add(f'attributes.{attr}',
                                    'This field is required.')
                parentzvol = (device['attributes'].get('zvol_name')
                              or '').rsplit('/', 1)[0]
                if parentzvol and not await self.middleware.call(
                        'pool.dataset.query', [('id', '=', parentzvol)]):
                    verrors.add(
                        'attributes.zvol_name',
                        f'Parent dataset {parentzvol} does not exist.',
                        errno.ENOENT)
                zvol = await self.middleware.call(
                    'pool.dataset.query',
                    [['id', '=', device['attributes'].get('zvol_name')]])
                if not verrors and create_zvol and zvol:
                    verrors.add(
                        'attributes.zvol_name',
                        f'{device["attributes"]["zvol_name"]} already exists.')
                elif zvol and zvol[0]['locked']:
                    verrors.add('attributes.zvol_name',
                                f'{zvol[0]["id"]} is locked.')
            elif not path:
                verrors.add('attributes.path', 'Disk path is required.')
            elif path and not os.path.exists(path):
                verrors.add('attributes.path',
                            f'Disk path {path} does not exist.', errno.ENOENT)

            if path and len(path) > 63:
                # SPECNAMELEN is not long enough (63) in 12, 13 will be 255
                verrors.add(
                    'attributes.path',
                    f'Disk path {path} is too long, reduce to less than 63 characters',
                    errno.ENAMETOOLONG)
            if not await self.disk_uniqueness_integrity_check(
                    device, vm_instance):
                verrors.add(
                    'attributes.path',
                    f'{vm_instance["name"]} has "{path}" already configured')
        elif device.get('dtype') == 'RAW':
            path = device['attributes'].get('path')
            exists = device['attributes'].get('exists', True)
            if not path:
                verrors.add('attributes.path', 'Path is required.')
            else:
                if exists and not os.path.exists(path):
                    verrors.add('attributes.path', 'Path must exist.')
                if not exists:
                    if os.path.exists(path):
                        verrors.add('attributes.path', 'Path must not exist.')
                    elif not device['attributes'].get('size'):
                        verrors.add(
                            'attributes.size',
                            'Please provide a valid size for the raw file.')
                if (old and old['attributes'].get('size') !=
                        device['attributes'].get('size')
                        and not device['attributes'].get('size')):
                    verrors.add(
                        'attributes.size',
                        'Please provide a valid size for the raw file.')
                await check_path_resides_within_volume(
                    verrors,
                    self.middleware,
                    'attributes.path',
                    path,
                )
                if not await self.disk_uniqueness_integrity_check(
                        device, vm_instance):
                    verrors.add(
                        'attributes.path',
                        f'{vm_instance["name"]} has "{path}" already configured'
                    )
        elif device.get('dtype') == 'CDROM':
            path = device['attributes'].get('path')
            if not path:
                verrors.add('attributes.path', 'Path is required.')
            elif not os.path.exists(path):
                verrors.add('attributes.path',
                            f'Unable to locate CDROM device at {path}')
            elif not await self.disk_uniqueness_integrity_check(
                    device, vm_instance):
                verrors.add(
                    'attributes.path',
                    f'{vm_instance["name"]} has "{path}" already configured')
        elif device.get('dtype') == 'NIC':
            nic = device['attributes'].get('nic_attach')
            if nic:
                nic_choices = await self.middleware.call(
                    'vm.device.nic_attach_choices')
                if nic not in nic_choices:
                    verrors.add('attributes.nic_attach', 'Not a valid choice.')
            await self.failover_nic_check(device, verrors, 'attributes')
        elif device.get('dtype') == 'PCI':
            pptdev = device['attributes'].get('pptdev')
            if osc.IS_FREEBSD and not RE_PPTDEV_NAME.findall(pptdev):
                verrors.add('attribute.pptdev',
                            'Please specify correct PCI device for passthru.')
            device_details = await self.middleware.call(
                'vm.device.passthrough_device', pptdev)
            if device_details.get('error'):
                verrors.add(
                    'attribute.pptdev',
                    f'Not a valid choice. The PCI device is not available for passthru: {device_details["error"]}'
                )
            if not await self.middleware.call('vm.device.iommu_enabled'):
                verrors.add('attribute.pptdev', 'IOMMU support is required.')
        elif device.get('dtype') == 'DISPLAY':
            if vm_instance:
                if osc.IS_FREEBSD and vm_instance['bootloader'] != 'UEFI':
                    verrors.add('dtype',
                                'Display only works with UEFI bootloader.')

                if not update:
                    vm_instance['devices'].append(device)

                await self.validate_display_devices(verrors, vm_instance)

            if osc.IS_FREEBSD and device['attributes']['type'] != 'VNC':
                verrors.add(
                    'attributes.type',
                    'Only VNC Display device is supported for this platform.')

            all_ports = [
                d['attributes'].get('port')
                for d in (await self.middleware.call(
                    'vm.device.query', [['dtype', '=', 'DISPLAY']]))
                if d['id'] != device.get('id')
            ]
            if device['attributes'].get('port'):
                if device['attributes']['port'] in all_ports:
                    verrors.add('attributes.port',
                                'Specified display port is already in use')
            else:
                device['attributes']['port'] = (
                    await self.middleware.call('vm.port_wizard'))['port']

        if device['dtype'] in ('RAW', 'DISK') and device['attributes'].get('physical_sectorsize')\
                and not device['attributes'].get('logical_sectorsize'):
            verrors.add(
                'attributes.logical_sectorsize',
                'This field must be provided when physical_sectorsize is specified.'
            )

        if verrors:
            raise verrors

        return device

    @private
    async def validate_display_devices(self, verrors, vm_instance):
        devs = await self.get_display_devices(vm_instance)
        if len(devs['vnc']) > 1:
            verrors.add('attributes.type',
                        'Only one VNC Display device is supported')
        if len(devs['spice']) > 1:
            verrors.add('attributes.type',
                        'Only one SPICE Display device is supported')

    @private
    async def get_display_devices(self, vm_instance):
        devs = {'vnc': [], 'spice': []}
        for dev in filter(lambda d: d['dtype'] == 'DISPLAY',
                          vm_instance['devices']):
            if dev['attributes']['type'] == 'SPICE':
                devs['spice'].append(dev)
            else:
                devs['vnc'].append(dev)
        return devs

    @private
    async def failover_nic_check(self, vm_device, verrors, schema):
        if await self.middleware.call('failover.licensed'):
            nics = await self.middleware.call(
                'vm.device.nic_capability_checks', [vm_device])
            if nics:
                verrors.add(
                    f'{schema}.nic_attach',
                    f'Capabilities must be disabled for {",".join(nics)} interface '
                    'in Network->Interfaces section before using this device with VM.'
                )
Ejemplo n.º 10
0
class KubernetesService(ConfigService):
    class Config:
        datastore = 'services.kubernetes'
        datastore_extend = 'kubernetes.k8s_extend'
        cli_namespace = 'app.kubernetes'

    ENTRY = Dict(
        'kubernetes_entry',
        Bool('configure_gpus', required=True),
        Str('pool', required=True, null=True),
        IPAddr('cluster_cidr', required=True, cidr=True, empty=True),
        IPAddr('service_cidr', required=True, cidr=True, empty=True),
        IPAddr('cluster_dns_ip', required=True, empty=True),
        IPAddr('node_ip', required=True),
        Str('route_v4_interface', required=True, null=True),
        IPAddr('route_v4_gateway', required=True, null=True, v6=False),
        Str('route_v6_interface', required=True, null=True),
        IPAddr('route_v6_gateway', required=True, null=True, v4=False),
        Str('dataset', required=True, null=True),
        Int('id', required=True),
        update=True,
    )

    @private
    async def k8s_extend(self, data):
        data['dataset'] = applications_ds_name(
            data['pool']) if data['pool'] else None
        data.pop('cni_config')
        return data

    @private
    async def unused_cidrs(self, network_cidrs):
        return [
            str(network) for network in itertools.chain(
                ipaddress.ip_network('172.16.0.0/12', False).subnets(4),
                ipaddress.ip_network('10.0.0.0/8', False).subnets(8),
                ipaddress.ip_network('192.168.0.0/16', False).subnets(1),
            ) if not any(
                network.overlaps(used_network)
                for used_network in network_cidrs)
        ]

    @private
    async def validate_data(self, data, schema, old_data):
        verrors = ValidationErrors()

        if data.pop('migrate_applications', False):
            if data['pool'] == old_data['pool']:
                verrors.add(
                    f'{schema}.migrate_applications',
                    'Migration of applications dataset only happens when a new pool is configured.'
                )
            elif not data['pool']:
                verrors.add(
                    f'{schema}.migrate_applications',
                    'Pool must be specified when migration of ix-application dataset is desired.'
                )
            elif not old_data['pool']:
                verrors.add(
                    f'{schema}.migrate_applications',
                    'A pool must have been configured previously for ix-application dataset migration.'
                )
            else:
                if await self.middleware.call(
                        'zfs.dataset.query',
                    [['id', '=', applications_ds_name(data['pool'])]], {
                        'extra': {
                            'retrieve_children': False,
                            'retrieve_properties': False
                        }
                    }):
                    verrors.add(
                        f'{schema}.migrate_applications',
                        f'Migration of {applications_ds_name(old_data["pool"])!r} to {data["pool"]!r} not '
                        f'possible as {applications_ds_name(data["pool"])} already exists.'
                    )

                if not await self.middleware.call(
                        'zfs.dataset.query',
                    [['id', '=',
                      applications_ds_name(old_data['pool'])]], {
                          'extra': {
                              'retrieve_children': False,
                              'retrieve_properties': False
                          }
                      }):
                    # Edge case but handled just to be sure
                    verrors.add(
                        f'{schema}.migrate_applications',
                        f'{applications_ds_name(old_data["pool"])!r} does not exist, migration not possible.'
                    )

        network_cidrs = set([
            ipaddress.ip_network(
                f'{ip_config["address"]}/{ip_config["netmask"]}', False)
            for interface in await self.middleware.call('interface.query')
            for ip_config in itertools.chain(interface['aliases'],
                                             interface['state']['aliases'])
            if ip_config['type'] != 'LINK'
        ])

        unused_cidrs = []
        if not data['cluster_cidr'] or not data['service_cidr']:
            unused_cidrs = await self.unused_cidrs(network_cidrs)
            # If index 0,1 belong to different classes, let's make sure that is not the case anymore
            if len(unused_cidrs) > 2 and unused_cidrs[0].split(
                    '.')[0] != unused_cidrs[1].split('.')[0]:
                unused_cidrs.pop(0)

        if unused_cidrs and not data['cluster_cidr']:
            data['cluster_cidr'] = unused_cidrs.pop(0)

        if unused_cidrs and not data['service_cidr']:
            data['service_cidr'] = unused_cidrs.pop(0)

        if not data['cluster_dns_ip']:
            if data['service_cidr']:
                # Picking 10th ip ( which is the usual default ) from service cidr
                data['cluster_dns_ip'] = str(
                    list(
                        ipaddress.ip_network(data['service_cidr'],
                                             False).hosts())[9])
            else:
                verrors.add(f'{schema}.cluster_dns_ip',
                            'Please specify cluster_dns_ip.')

        if data['pool'] and not await self.middleware.call(
                'pool.query', [['name', '=', data['pool']]]):
            verrors.add(
                f'{schema}.pool',
                'Please provide a valid pool configured in the system.')

        for k in ('cluster_cidr', 'service_cidr'):
            if not data[k]:
                verrors.add(f'{schema}.{k}',
                            f'Please specify a {k.split("_")[0]} CIDR.')
            elif any(
                    ipaddress.ip_network(data[k], False).overlaps(cidr)
                    for cidr in network_cidrs):
                verrors.add(f'{schema}.{k}',
                            'Requested CIDR is already in use.')

        if data['cluster_cidr'] and data[
                'service_cidr'] and ipaddress.ip_network(
                    data['cluster_cidr'], False).overlaps(
                        ipaddress.ip_network(data['service_cidr'], False)):
            verrors.add(f'{schema}.cluster_cidr',
                        'Must not overlap with service CIDR.')

        if data['service_cidr'] and data[
                'cluster_dns_ip'] and ipaddress.ip_address(
                    data['cluster_dns_ip']) not in ipaddress.ip_network(
                        data['service_cidr']):
            verrors.add(f'{schema}.cluster_dns_ip',
                        'Must be in range of "service_cidr".')

        if data['node_ip'] not in await self.bindip_choices():
            verrors.add(f'{schema}.node_ip',
                        'Please provide a valid IP address.')

        if not await self.middleware.call('route.configured_default_ipv4_route'
                                          ):
            verrors.add(
                f'{schema}.route_v4_interface',
                'Please, set IPv4 Default Gateway (it can be fake) in Network → Global Configuration and then '
                'update Kubernetes settings. Currently, k3s cannot be used without a default route.'
            )

        valid_choices = await self.route_interface_choices()
        for k, _ in await self.validate_interfaces(data):
            verrors.add(
                f'{schema}.{k}',
                f'Please specify a valid interface (i.e {", ".join(valid_choices)!r}).'
            )

        for k in ('route_v4', 'route_v6'):
            gateway = data[f'{k}_gateway']
            interface = data[f'{k}_interface']
            if (not gateway and not interface) or (gateway and interface):
                continue
            for k2 in ('gateway', 'interface'):
                verrors.add(
                    f'{schema}.{k}_{k2}',
                    f'{k}_gateway and {k}_interface must be specified together.'
                )

        if data['route_v4_gateway']:
            gateway = ipaddress.ip_address(data['route_v4_gateway'])
            if not any(gateway in network_cidr
                       for network_cidr in network_cidrs):
                verrors.add(
                    f'{schema}.route_v4_gateway',
                    'Specified value is not present on any network cidr in use by the system'
                )

        verrors.check()

    @private
    async def validate_interfaces(self, data):
        errors = []
        interfaces = await self.route_interface_choices()
        for k in filter(lambda k: data[k] and data[k] not in interfaces,
                        ('route_v4_interface', 'route_v6_interface')):
            errors.append((k, data[k]))
        return errors

    @private
    async def validate_config(self):
        data = await self.middleware.call('kubernetes.config')
        data.pop('id')
        data.pop('dataset')

        try:
            await self.validate_data(data, 'kubernetes', data)
        except ValidationErrors as e:
            return e

    @accepts(
        Patch(
            'kubernetes_entry',
            'kubernetes_update',
            ('add', Bool('migrate_applications')),
            ('rm', {
                'name': 'id'
            }),
            ('rm', {
                'name': 'dataset'
            }),
            ('attr', {
                'update': True
            }),
        ))
    @job(lock='kubernetes_update')
    async def do_update(self, job, data):
        """
        `pool` must be a valid ZFS pool configured in the system. Kubernetes service will initialise the pool by
        creating datasets under `pool_name/ix-applications`.

        `cluster_cidr` is the CIDR to be used for default NAT network between workloads.

        `service_cidr` is the CIDR to be used for kubernetes services which are an abstraction and refer to a
        logically set of kubernetes pods.

        `cluster_dns_ip` is the IP of the DNS server running for the kubernetes cluster. It must be in the range
        of `service_cidr`.

        Specifying values for `cluster_cidr`, `service_cidr` and `cluster_dns_ip` are permanent and a subsequent change
        requires re-initialisation of the applications. To clarify, system will destroy old `ix-applications` dataset
        and any data within it when any of the values for the above configuration change.

        `node_ip` is the IP address which the kubernetes cluster will assign to the TrueNAS node. It defaults to
        0.0.0.0 and the cluster in this case will automatically manage which IP address to use for managing traffic
        for default NAT network.

        By default kubernetes pods will be using default gateway of the system for outward traffic. This might
        not be desirable for certain users who want to separate NAT traffic over a specific interface / route. System
        will create a L3 network which will be routing the traffic towards default gateway for NAT.

        If users want to restrict traffic over a certain gateway / interface, they can specify a default route
        for the NAT traffic. `route_v4_interface` and `route_v4_gateway` will set a default route for the kubernetes
        cluster IPv4 traffic. Similarly `route_v6_interface` and 'route_v6_gateway` can be used to specify default
        route for IPv6 traffic.

        In case user is switching pools and the new desired pool has not been configured for kubernetes before, it
        is possible to replicate data from old pool to new pool with setting `migrate_applications` attribute. This
        will replicate contents of old pool's ix-applications dataset to the new pool.
        """
        old_config = await self.config()
        old_config.pop('dataset')
        config = old_config.copy()
        config.update(data)
        migrate = config.get('migrate_applications')

        await self.validate_data(config, 'kubernetes_update', old_config)

        if migrate and config['pool'] != old_config['pool']:
            job.set_progress(
                25,
                f'Migrating {applications_ds_name(old_config["pool"])} to {applications_ds_name(config["pool"])}'
            )
            await self.middleware.call(
                'kubernetes.migrate_ix_applications_dataset', config['pool'],
                old_config['pool'])
            job.set_progress(40,
                             'Migration complete for ix-applications dataset')

        if len(set(old_config.items()) ^ set(config.items())) > 0:
            await self.middleware.call(
                'chart.release.clear_update_alerts_for_all_chart_releases')
            config['cni_config'] = {}
            await self.middleware.call('datastore.update',
                                       self._config.datastore,
                                       old_config['id'], config)
            await self.middleware.call('kubernetes.status_change')
            if not config['pool'] and config['pool'] != old_config['pool']:
                # We only want to do this when we don't have any pool configured and would like to use
                # host catalog repos temporarily. Otherwise, we should call this after k8s datasets have
                # been initialised
                await self.middleware.call('catalog.sync_all')

        return await self.config()

    @accepts()
    @returns(Dict(
        'kubernetes_bind_ip_choices',
        additional_attrs=True,
    ))
    async def bindip_choices(self):
        """
        Returns ip choices for Kubernetes service to use.
        """
        return {
            d['address']: d['address']
            for d in await self.middleware.call('interface.ip_in_use', {
                'static': True,
                'any': True
            })
        }

    @accepts()
    @returns(Dict(additional_attrs=True))
    async def route_interface_choices(self):
        """
        Returns Interface choices for Kubernetes service to use for ipv4 connections.
        """
        return await self.middleware.call('interface.choices', {
            'bridge_members': False,
            'lag_ports': False
        })

    @private
    async def validate_k8s_setup(self):
        k8s_config = await self.middleware.call('kubernetes.config')
        if not k8s_config['dataset']:
            raise CallError('Please configure kubernetes pool.')
        if not await self.middleware.call('service.started', 'kubernetes'):
            raise CallError('Kubernetes service is not running.')

    @accepts()
    @returns(Str('kubernetes_node_ip', null=True))
    async def node_ip(self):
        """
        Returns IP used by kubernetes which kubernetes uses to allow incoming connections.
        """
        node_ip = None
        if await self.middleware.call('service.started', 'kubernetes'):
            k8s_node_config = await self.middleware.call('k8s.node.config')
            if k8s_node_config['node_configured']:
                node_ip = next(
                    (addr['address']
                     for addr in k8s_node_config['status']['addresses']
                     if addr['type'] == 'InternalIP'), None)
        if not node_ip:
            node_ip = (await
                       self.middleware.call('kubernetes.config'))['node_ip']

        return node_ip
Ejemplo n.º 11
0
class SharingSMBService(SharingService):

    share_task_type = 'SMB'

    class Config:
        namespace = 'sharing.smb'
        datastore = 'sharing.cifs_share'
        datastore_prefix = 'cifs_'
        datastore_extend = 'sharing.smb.extend'

    @private
    async def strip_comments(self, data):
        parsed_config = ""
        for entry in data['auxsmbconf'].splitlines():
            if entry == "" or entry.startswith(('#', ';')):
                continue
            parsed_config += entry if len(parsed_config) == 0 else f'\n{entry}'

        data['auxsmbconf'] = parsed_config

    @accepts(
        Dict(
            'sharingsmb_create',
            Str('purpose',
                enum=[x.name for x in SMBSharePreset],
                default=SMBSharePreset.DEFAULT_SHARE.name),
            Str('path', required=True),
            Str('path_suffix', default=''),
            Bool('home', default=False),
            Str('name', max_length=80),
            Str('comment', default=''),
            Bool('ro', default=False),
            Bool('browsable', default=True),
            Bool('timemachine', default=False),
            Bool('recyclebin', default=False),
            Bool('guestok', default=False),
            Bool('abe', default=False),
            List('hostsallow', default=[]),
            List('hostsdeny', default=[]),
            Bool('aapl_name_mangling', default=False),
            Bool('acl', default=True),
            Bool('durablehandle', default=True),
            # shadowcopy only available for FreeBSD (for now)
            Bool('shadowcopy', default=osc.IS_FREEBSD),
            Bool('streams', default=True),
            Bool('fsrvp', default=False),
            Str('auxsmbconf', max_length=None, default=''),
            Bool('enabled', default=True),
            register=True))
    async def do_create(self, data):
        """
        Create a SMB Share.

        `purpose` applies common configuration presets depending on intended purpose.

        `timemachine` when set, enables Time Machine backups for this share.

        `ro` when enabled, prohibits write access to the share.

        `guestok` when enabled, allows access to this share without a password.

        `hostsallow` is a list of hostnames / IP addresses which have access to this share.

        `hostsdeny` is a list of hostnames / IP addresses which are not allowed access to this share. If a handful
        of hostnames are to be only allowed access, `hostsdeny` can be passed "ALL" which means that it will deny
        access to ALL hostnames except for the ones which have been listed in `hostsallow`.

        `acl` enables support for storing the SMB Security Descriptor as a Filesystem ACL.

        `streams` enables support for storing alternate datastreams as filesystem extended attributes.

        `fsrvp` enables support for the filesystem remote VSS protocol. This allows clients to create
        ZFS snapshots through RPC.

        `shadowcopy` enables support for the volume shadow copy service.

        `auxsmbconf` is a string of additional smb4.conf parameters not covered by the system's API.
        """
        verrors = ValidationErrors()
        path = data['path']

        await self.clean(data, 'sharingsmb_create', verrors)
        await self.validate(data, 'sharingsmb_create', verrors)

        verrors.check()

        if path and not os.path.exists(path):
            try:
                os.makedirs(path)
            except OSError as e:
                raise CallError(f'Failed to create {path}: {e}')

        await self.apply_presets(data)
        await self.compress(data)
        vuid = await self.generate_vuid(data['timemachine'])
        data.update({'vuid': vuid})
        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})

        await self.strip_comments(data)
        await self.middleware.call('sharing.smb.reg_addshare', data)
        enable_aapl = await self.check_aapl(data)

        if enable_aapl:
            await self._service_change('cifs', 'restart')
        else:
            await self._service_change('cifs', 'reload')

        return await self.get_instance(data['id'])

    @accepts(Int('id'),
             Patch('sharingsmb_create', 'sharingsmb_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        """
        Update SMB Share of `id`.
        """
        verrors = ValidationErrors()
        path = data.get('path')

        old = await self.middleware.call(
            'datastore.query', self._config.datastore, [('id', '=', id)], {
                'extend': self._config.datastore_extend,
                'prefix': self._config.datastore_prefix,
                'get': True
            })

        new = old.copy()
        new.update(data)

        oldname = 'homes' if old['home'] else old['name']
        newname = 'homes' if new['home'] else new['name']

        new['vuid'] = await self.generate_vuid(new['timemachine'], new['vuid'])
        await self.clean(new, 'sharingsmb_update', verrors, id=id)
        await self.validate(new, 'sharingsmb_update', verrors, old=old)

        if verrors:
            raise verrors

        if path and not os.path.exists(path):
            try:
                os.makedirs(path)
            except OSError as e:
                raise CallError(f'Failed to create {path}: {e}')

        if old['purpose'] != new['purpose']:
            await self.apply_presets(new)

        old_is_locked = (await self.get_instance(id))['locked']
        if old['path'] != new['path']:
            new_is_locked = await self.middleware.call(
                'pool.dataset.path_in_locked_datasets', new['path'])
        else:
            new_is_locked = old_is_locked

        await self.compress(new)
        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})

        await self.strip_comments(new)
        if not new_is_locked:
            """
            Enabling AAPL SMB2 extensions globally affects SMB shares. If this
            happens, the SMB service _must_ be restarted. Skip this step if dataset
            underlying the new path is encrypted.
            """
            enable_aapl = await self.check_aapl(new)
        else:
            enable_aapl = False
        """
        OLD    NEW   = dataset path is encrypted
         ----------
         -      -    = pre-12 behavior. Remove and replace if name changed, else update.
         -      X    = Delete share from running configuration
         X      -    = Add share to running configuration
         X      X    = no-op
        """
        if old_is_locked and new_is_locked:
            """
            Configuration change only impacts a locked SMB share. From standpoint of
            running config, this is a no-op. No need to restart or reload service.
            """
            return await self.get_instance(id)

        elif not old_is_locked and not new_is_locked:
            """
            Default behavior before changes for locked datasets.
            """
            if newname != oldname:
                # This is disruptive change. Share is actually being removed and replaced.
                # Forcibly closes any existing SMB sessions.
                await self.close_share(oldname)
                try:
                    await self.middleware.call('sharing.smb.reg_delshare',
                                               oldname)
                except Exception:
                    self.logger.warning('Failed to remove stale share [%s]',
                                        old['name'],
                                        exc_info=True)
                await self.middleware.call('sharing.smb.reg_addshare', new)
            else:
                diff = await self.middleware.call(
                    'sharing.smb.diff_middleware_and_registry', new['name'],
                    new)
                if diff is None:
                    await self.middleware.call('sharing.smb.reg_addshare', new)
                else:
                    share_name = new['name'] if not new['home'] else 'homes'
                    await self.middleware.call('sharing.smb.apply_conf_diff',
                                               'REGISTRY', share_name, diff)

        elif old_is_locked and not new_is_locked:
            """
            Since the old share was not in our running configuration, we need
            to add it.
            """
            await self.middleware.call('sharing.smb.reg_addshare', new)

        elif not old_is_locked and new_is_locked:
            try:
                await self.middleware.call('sharing.smb.reg_delshare', oldname)
            except Exception:
                self.logger.warning('Failed to remove locked share [%s]',
                                    old['name'],
                                    exc_info=True)

        if enable_aapl:
            await self._service_change('cifs', 'restart')
        else:
            await self._service_change('cifs', 'reload')

        return await self.get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete SMB Share of `id`. This will forcibly disconnect SMB clients
        that are accessing the share.
        """
        share = await self._get_instance(id)
        result = await self.middleware.call('datastore.delete',
                                            self._config.datastore, id)
        await self.close_share(share['name'])
        try:
            await self.middleware.call(
                'smb.sharesec._delete',
                share['name'] if not share['home'] else 'homes')
        except Exception:
            self.logger.debug('Failed to delete share ACL for [%s].',
                              share['name'],
                              exc_info=True)

        try:
            await self.middleware.call(
                'sharing.smb.reg_delshare',
                share['name'] if not share['home'] else 'homes')
        except Exception:
            self.logger.warn('Failed to remove registry entry for [%s].',
                             share['name'],
                             exc_info=True)

        if share['timemachine']:
            await self.middleware.call('service.restart', 'mdns')

        return result

    @private
    async def check_aapl(self, data):
        """
        Returns whether we changed the global aapl support settings.
        """
        aapl_extensions = (
            await self.middleware.call('smb.config'))['aapl_extensions']

        if not aapl_extensions and data['timemachine']:
            await self.middleware.call('datastore.update', 'services_cifs', 1,
                                       {'cifs_srv_aapl_extensions': True})
            return True

        return False

    @private
    async def close_share(self, share_name):
        c = await run(
            [SMBCmd.SMBCONTROL.value, 'smbd', 'close-share', share_name],
            check=False)
        if c.returncode != 0:
            self.logger.warn('Failed to close smb share [%s]: [%s]',
                             share_name,
                             c.stderr.decode().strip())

    @private
    async def clean(self, data, schema_name, verrors, id=None):
        data['name'] = await self.name_exists(data, schema_name, verrors, id)

    @private
    async def validate_aux_params(self, data, schema_name):
        """
        libsmbconf expects to be provided with key-value pairs.
        """
        verrors = ValidationErrors()
        for entry in data.splitlines():
            if entry == '' or entry.startswith(('#', ';')):
                continue

            kv = entry.split('=', 1)
            if len(kv) != 2:
                verrors.add(
                    f'{schema_name}.auxsmbconf',
                    f'Auxiliary parameters must be in the format of "key = value": {entry}'
                )
                continue

        verrors.check()

    @private
    async def validate(self, data, schema_name, verrors, old=None):
        home_result = await self.home_exists(data['home'], schema_name,
                                             verrors, old)

        if home_result:
            verrors.add(f'{schema_name}.home',
                        'Only one share is allowed to be a home share.')
        elif not home_result and not data['path']:
            verrors.add(f'{schema_name}.path', 'This field is required.')

        if data['path']:
            await self.validate_path_field(data, schema_name, verrors)

        if data['auxsmbconf']:
            try:
                await self.validate_aux_params(data['auxsmbconf'],
                                               f'{schema_name}.auxsmbconf')
            except ValidationErrors as errs:
                verrors.add_child(f'{schema_name}.auxsmbconf', errs)

        if not data['acl'] and not await self.middleware.call(
                'filesystem.acl_is_trivial', data['path']):
            verrors.add(
                f'{schema_name}.acl',
                f'ACL detected on {data["path"]}. ACLs must be stripped prior to creation '
                'of SMB share.')

        if data.get('name') and data['name'].lower() in [
                'global', 'homes', 'printers'
        ]:
            verrors.add(
                f'{schema_name}.name',
                f'{data["name"]} is a reserved section name, please select another one'
            )

        if osc.IS_LINUX:
            if data['shadowcopy']:
                verrors.add(
                    f'{schema_name}.shadowcopy',
                    'ZFS shadow copy support is not yet implemented in TrueNAS scale'
                )
            if data['fsrvp']:
                verrors.add(
                    f'{schema_name}.fsrvp',
                    'ZFS fsrvp support is not yet implemented in TrueNAS scale'
                )

        if data.get('path_suffix') and len(data['path_suffix'].split('/')) > 2:
            verrors.add(
                f'{schema_name}.name',
                'Path suffix may not contain more than two components.')

    @private
    async def home_exists(self, home, schema_name, verrors, old=None):
        home_filters = [('home', '=', True)]
        home_result = None

        if home:
            if old and old['id'] is not None:
                id = old['id']

                if not old['home']:
                    home_filters.append(('id', '!=', id))
                    # The user already had this set as the home share
                    home_result = await self.middleware.call(
                        'datastore.query', self._config.datastore,
                        home_filters,
                        {'prefix': self._config.datastore_prefix})

        return home_result

    @private
    async def auxsmbconf_dict(self, aux, direction="TO"):
        ret = None
        if direction == 'TO':
            ret = {}
            for entry in aux.splitlines():
                if entry == '':
                    continue

                if entry.startswith(('#', ';')):
                    # Special handling for comments
                    ret[entry] = None
                    continue

                kv = entry.split('=', 1)
                ret[kv[0].strip()] = kv[1].strip()

            return ret

        if direction == 'FROM':
            return '\n'.join(
                [f'{k}={v}' if v is not None else k for k, v in aux.items()])

    @private
    async def name_exists(self, data, schema_name, verrors, id=None):
        name = data['name']
        path = data['path']

        if path and not name:
            name = path.rsplit('/', 1)[-1]

        name_filters = [('name', '=', name)]

        if id is not None:
            name_filters.append(('id', '!=', id))

        name_result = await self.middleware.call(
            'datastore.query', self._config.datastore, name_filters,
            {'prefix': self._config.datastore_prefix})

        if name_result:
            verrors.add(f'{schema_name}.name',
                        'A share with this name already exists.')

        return name

    @private
    async def extend(self, data):
        data['hostsallow'] = data['hostsallow'].split()
        data['hostsdeny'] = data['hostsdeny'].split()
        if data['fsrvp']:
            data['shadowcopy'] = True

        if 'share_acl' in data:
            data.pop('share_acl')

        return data

    @private
    async def compress(self, data):
        data['hostsallow'] = ' '.join(data['hostsallow'])
        data['hostsdeny'] = ' '.join(data['hostsdeny'])
        data.pop(self.locked_field, None)

        return data

    @private
    async def generate_vuid(self, timemachine, vuid=""):
        try:
            if timemachine and vuid:
                uuid.UUID(vuid, version=4)
        except ValueError:
            self.logger.debug(
                f"Time machine VUID string ({vuid}) is invalid. Regenerating.")
            vuid = ""

        if timemachine and not vuid:
            vuid = str(uuid.uuid4())

        return vuid

    @private
    async def apply_presets(self, data):
        """
        Apply settings from presets. Only include auxiliary parameters
        from preset if user-defined aux parameters already exist. In this
        case user-defined takes precedence.
        """
        params = (SMBSharePreset[data["purpose"]].value)["params"].copy()
        aux = params.pop("auxsmbconf")
        data.update(params)
        if data["auxsmbconf"]:
            preset_aux = await self.auxsmbconf_dict(aux, direction="TO")
            data_aux = await self.auxsmbconf_dict(data["auxsmbconf"],
                                                  direction="TO")
            preset_aux.update(data_aux)
            data["auxsmbconf"] = await self.auxsmbconf_dict(preset_aux,
                                                            direction="FROM")

        return data

    @accepts()
    async def presets(self):
        """
        Retrieve pre-defined configuration sets for specific use-cases. These parameter
        combinations are often non-obvious, but beneficial in these scenarios.
        """
        return {x.name: x.value for x in SMBSharePreset}

    @private
    async def sync_registry(self):
        """
        Synchronize registry config with the share configuration in the truenas config
        file. This method simply reconciles lists of shares, removing from and adding to
        the registry as-needed.
        """
        if not os.path.exists(SMBPath.GLOBALCONF.platform()):
            self.logger.warning(
                "smb.conf does not exist. Skipping registry synchronization."
                "This may indicate that SMB service has not completed initialization."
            )
            return

        active_shares = await self.query([('locked', '=', False),
                                          ('enabled', '=', True)])
        registry_shares = await self.middleware.call(
            'sharing.smb.reg_listshares')
        cf_active = set([x['name'].casefold() for x in active_shares])
        cf_reg = set([x.casefold() for x in registry_shares])
        to_add = cf_active - cf_reg
        to_del = cf_reg - cf_active

        for share in to_add:
            share_conf = list(
                filter(lambda x: x['name'].casefold() == share.casefold(),
                       active_shares))
            if not os.path.exists(share_conf[0]['path']):
                self.logger.warning(
                    "Path [%s] for share [%s] does not exist. "
                    "Refusing to add share to SMB configuration.",
                    share_conf[0]['path'], share_conf[0]['name'])
                continue

            try:
                await self.middleware.call('sharing.smb.reg_addshare',
                                           share_conf[0])
            except Exception:
                self.logger.warning(
                    "Failed to add SMB share [%] while synchronizing registry config",
                    share,
                    exc_info=True)

        for share in to_del:
            await self.middleware.call('sharing.smb.close_share', share)
            try:
                await self.middleware.call('sharing.smb.reg_delshare', share)
            except Exception:
                self.middleware.logger.warning(
                    'Failed to remove stale share [%s]', share, exc_info=True)
Ejemplo n.º 12
0
class MailService(ConfigService):

    oauth_access_token = None
    oauth_access_token_expires_at = None

    class Config:
        datastore = 'system.email'
        datastore_prefix = 'em_'
        datastore_extend = 'mail.mail_extend'

    @private
    async def mail_extend(self, cfg):
        if cfg['security']:
            cfg['security'] = cfg['security'].upper()
        return cfg

    @accepts(Dict(
        'mail_update',
        Str('fromemail', validators=[Email()]),
        Str('fromname'),
        Str('outgoingserver'),
        Int('port'),
        Str('security', enum=['PLAIN', 'SSL', 'TLS']),
        Bool('smtp'),
        Str('user'),
        Str('pass', private=True),
        Dict('oauth',
             Str('client_id', required=True),
             Str('client_secret', required=True),
             Str('refresh_token', required=True),
             null=True,
             private=True),
        register=True,
        update=True,
    ))
    async def do_update(self, data):
        """
        Update Mail Service Configuration.

        `fromemail` is used as a sending address which the mail server will use for sending emails.

        `outgoingserver` is the hostname or IP address of SMTP server used for sending an email.

        `security` is type of encryption desired.

        `smtp` is a boolean value which when set indicates that SMTP authentication has been enabled and `user`/`pass`
        are required attributes now.
        """
        config = await self.config()

        new = config.copy()
        new.update(data)
        new['security'] = new['security'].lower()  # Django Model compatibility

        verrors = ValidationErrors()

        if new['smtp'] and new['user'] == '':
            verrors.add(
                'mail_update.user',
                'This field is required when SMTP authentication is enabled',
            )

        self.__password_verify(new['pass'], 'mail_update.pass', verrors)

        if verrors:
            raise verrors

        await self.middleware.call('datastore.update', 'system.email', config['id'], new, {'prefix': 'em_'})

        await self.middleware.call('mail.gmail_initialize')

        return await self.config()

    def __password_verify(self, password, schema, verrors=None):
        if not password:
            return
        if verrors is None:
            verrors = ValidationErrors()
        # FIXME: smtplib does not support non-ascii password yet
        # https://github.com/python/cpython/pull/8938
        try:
            password.encode('ascii')
        except UnicodeEncodeError:
            verrors.add(
                schema,
                'Only plain text characters (7-bit ASCII) are allowed in passwords. '
                'UTF or composed characters are not allowed.'
            )
        return verrors

    @accepts(Dict(
        'mail_message',
        Str('subject', required=True),
        Str('text', required=True, max_length=None),
        Str('html', null=True, max_length=None),
        List('to', items=[Str('email')]),
        List('cc', items=[Str('email')]),
        Int('interval', null=True),
        Str('channel', null=True),
        Int('timeout', default=300),
        Bool('attachments', default=False),
        Bool('queue', default=True),
        Dict('extra_headers', additional_attrs=True),
        register=True,
    ), Ref('mail_update'))
    @job(pipes=['input'], check_pipes=False)
    def send(self, job, message, config):
        """
        Sends mail using configured mail settings.

        `text` will be formatted to HTML using Markdown and rendered using default E-Mail template.
        You can put your own HTML using `html`. If `html` is null, no HTML MIME part will be added to E-Mail.

        If `attachments` is true, a list compromised of the following dict is required
        via HTTP upload:
          - headers(list)
            - name(str)
            - value(str)
            - params(dict)
          - content (str)

        [
         {
          "headers": [
           {
            "name": "Content-Transfer-Encoding",
            "value": "base64"
           },
           {
            "name": "Content-Type",
            "value": "application/octet-stream",
            "params": {
             "name": "test.txt"
            }
           }
          ],
          "content": "dGVzdAo="
         }
        ]
        """

        product_name = self.middleware.call_sync('system.product_name')

        gc = self.middleware.call_sync('datastore.config', 'network.globalconfiguration')

        hostname = f'{gc["gc_hostname"]}.{gc["gc_domain"]}'

        message['subject'] = f'{product_name} {hostname}: {message["subject"]}'

        if 'html' in message and message['html'] is None:
            message.pop('html')
        elif 'html' not in message:
            lookup = TemplateLookup(
                directories=[os.path.join(os.path.dirname(os.path.realpath(__file__)), '../assets/templates')],
                module_directory="/tmp/mako/templates")

            tmpl = lookup.get_template('mail.html')

            message['html'] = tmpl.render(body=html.escape(message['text']).replace('\n', '<br>\n'))

        return self.send_raw(job, message, config)

    @accepts(Ref('mail_message'), Ref('mail_update'))
    @job(pipes=['input'], check_pipes=False)
    @private
    def send_raw(self, job, message, config):
        config = dict(self.middleware.call_sync('mail.config'), **config)

        if config['fromname']:
            from_addr = Header(config['fromname'], 'utf-8')
            try:
                config['fromemail'].encode('ascii')
            except UnicodeEncodeError:
                from_addr.append(f'<{config["fromemail"]}>', 'utf-8')
            else:
                from_addr.append(f'<{config["fromemail"]}>', 'ascii')
        else:
            try:
                config['fromemail'].encode('ascii')
            except UnicodeEncodeError:
                from_addr = Header(config['fromemail'], 'utf-8')
            else:
                from_addr = Header(config['fromemail'], 'ascii')

        interval = message.get('interval')
        if interval is None:
            interval = timedelta()
        else:
            interval = timedelta(seconds=interval)

        sw_name = self.middleware.call_sync('system.info')['version'].split('-', 1)[0]

        channel = message.get('channel')
        if not channel:
            channel = sw_name.lower()
        if interval > timedelta():
            channelfile = '/tmp/.msg.%s' % (channel)
            last_update = datetime.now() - interval
            try:
                last_update = datetime.fromtimestamp(os.stat(channelfile).st_mtime)
            except OSError:
                pass
            timediff = datetime.now() - last_update
            if (timediff >= interval) or (timediff < timedelta()):
                # Make sure mtime is modified
                # We could use os.utime but this is simpler!
                with open(channelfile, 'w') as f:
                    f.write('!')
            else:
                raise CallError('This message was already sent in the given interval')

        verrors = self.__password_verify(config['pass'], 'mail-config.pass')
        if verrors:
            raise verrors
        to = message.get('to')
        if not to:
            to = [
                self.middleware.call_sync(
                    'user.query', [('username', '=', 'root')], {'get': True}
                )['email']
            ]
            if not to[0]:
                raise CallError('Email address for root is not configured')

        if message.get('attachments'):
            job.check_pipe("input")

            def read_json():
                f = job.pipes.input.r
                data = b''
                i = 0
                while True:
                    read = f.read(1048576)  # 1MiB
                    if read == b'':
                        break
                    data += read
                    i += 1
                    if i > 50:
                        raise ValueError('Attachments bigger than 50MB not allowed yet')
                if data == b'':
                    return None
                return json.loads(data)

            attachments = read_json()
        else:
            attachments = None

        if 'html' in message or attachments:
            msg = MIMEMultipart()
            msg.preamble = 'This is a multi-part message in MIME format.'
            if 'html' in message:
                msg2 = MIMEMultipart('alternative')
                msg2.attach(MIMEText(message['text'], 'plain', _charset='utf-8'))
                msg2.attach(MIMEText(message['html'], 'html', _charset='utf-8'))
                msg.attach(msg2)
            if attachments:
                for attachment in attachments:
                    m = Message()
                    m.set_payload(attachment['content'])
                    for header in attachment.get('headers'):
                        m.add_header(header['name'], header['value'], **(header.get('params') or {}))
                    msg.attach(m)
        else:
            msg = MIMEText(message['text'], _charset='utf-8')

        msg['Subject'] = message['subject']

        msg['From'] = from_addr
        msg['To'] = ', '.join(to)
        if message.get('cc'):
            msg['Cc'] = ', '.join(message.get('cc'))
        msg['Date'] = formatdate()

        local_hostname = socket.gethostname()

        msg['Message-ID'] = "<%s-%s.%s@%s>" % (sw_name.lower(), datetime.utcnow().strftime("%Y%m%d.%H%M%S.%f"), base64.urlsafe_b64encode(os.urandom(3)), local_hostname)

        extra_headers = message.get('extra_headers') or {}
        for key, val in list(extra_headers.items()):
            # We already have "Content-Type: multipart/mixed" and setting "Content-Type: text/plain" like some scripts
            # do will break python e-mail module.
            if key.lower() == "content-type":
                continue

            if key in msg:
                msg.replace_header(key, val)
            else:
                msg[key] = val

        syslog.openlog(logoption=syslog.LOG_PID, facility=syslog.LOG_MAIL)
        try:
            if config['oauth']:
                self.middleware.call_sync('mail.gmail_send', msg, config)
            else:
                server = self._get_smtp_server(config, message['timeout'], local_hostname=local_hostname)
                # NOTE: Don't do this.
                #
                # If smtplib.SMTP* tells you to run connect() first, it's because the
                # mailserver it tried connecting to via the outgoing server argument
                # was unreachable and it tried to connect to 'localhost' and barfed.
                # This is because FreeNAS doesn't run a full MTA.
                # else:
                #    server.connect()
                headers = '\n'.join([f'{k}: {v}' for k, v in msg._headers])
                syslog.syslog(f"sending mail to {', '.join(to)}\n{headers}")
                server.sendmail(from_addr.encode(), to, msg.as_string())
                server.quit()
        except Exception as e:
            # Don't spam syslog with these messages. They should only end up in the
            # test-email pane.
            # We are only interested in ValueError, not subclasses.
            if e.__class__ is ValueError:
                raise CallError(str(e))
            syslog.syslog(f'Failed to send email to {", ".join(to)}: {str(e)}')
            if isinstance(e, smtplib.SMTPAuthenticationError):
                raise CallError(
                    f'Authentication error ({e.smtp_code}): {e.smtp_error}',
                    errno.EAUTH if osc.IS_FREEBSD else errno.EPERM
                )
            self.logger.warn('Failed to send email: %s', str(e), exc_info=True)
            if message['queue']:
                with MailQueue() as mq:
                    mq.append(msg)
            raise CallError(f'Failed to send email: {e}')
        return True

    def _get_smtp_server(self, config, timeout=300, local_hostname=None):
        self.middleware.call_sync('network.general.will_perform_activity', 'mail')

        if local_hostname is None:
            local_hostname = socket.gethostname()

        if not config['outgoingserver'] or not config['port']:
            # See NOTE below.
            raise ValueError('you must provide an outgoing mailserver and mail'
                             ' server port when sending mail')
        if config['security'] == 'SSL':
            server = smtplib.SMTP_SSL(
                config['outgoingserver'],
                config['port'],
                timeout=timeout,
                local_hostname=local_hostname)
        else:
            server = smtplib.SMTP(
                config['outgoingserver'],
                config['port'],
                timeout=timeout,
                local_hostname=local_hostname)
            if config['security'] == 'TLS':
                server.starttls()
        if config['smtp']:
            server.login(config['user'], config['pass'])

        return server

    @periodic(600, run_on_start=False)
    @private
    def send_mail_queue(self):

        with MailQueue() as mq:
            for queue in list(mq.queue):
                try:
                    config = self.middleware.call_sync('mail.config')
                    server = self._get_smtp_server(config)
                    server.sendmail(queue.message['From'].encode(), queue.message['To'].split(', '), queue.message.as_string())
                    server.quit()
                except Exception:
                    self.logger.debug('Sending message from queue failed', exc_info=True)
                    queue.attempts += 1
                    if queue.attempts >= mq.MAX_ATTEMPTS:
                        mq.queue.remove(queue)
                else:
                    mq.queue.remove(queue)
Ejemplo n.º 13
0
class CloudSyncService(CRUDService):

    local_fs_lock_manager = FsLockManager()
    remote_fs_lock_manager = FsLockManager()

    class Config:
        datastore = "tasks.cloudsync"
        datastore_extend = "cloudsync._extend"

    @filterable
    async def query(self, filters=None, options=None):
        """
        Query all Cloud Sync Tasks with `query-filters` and `query-options`.
        """
        tasks_or_task = await super().query(filters, options)

        jobs = {}
        for j in await self.middleware.call("core.get_jobs", [("method", "=", "cloudsync.sync")],
                                            {"order_by": ["id"]}):
            try:
                task_id = int(j["arguments"][0])
            except (IndexError, ValueError):
                continue

            if task_id in jobs and jobs[task_id]["state"] == "RUNNING":
                continue

            jobs[task_id] = j

        if isinstance(tasks_or_task, list):
            for task in tasks_or_task:
                task["job"] = jobs.get(task["id"])
        else:
            tasks_or_task["job"] = jobs.get(tasks_or_task["id"])

        return tasks_or_task

    @private
    async def _extend(self, cloud_sync):
        cloud_sync["credentials"] = cloud_sync.pop("credential")

        cloud_sync["encryption_password"] = await self.middleware.call(
            "pwenc.decrypt", cloud_sync["encryption_password"]
        )
        cloud_sync["encryption_salt"] = await self.middleware.call(
            "pwenc.decrypt", cloud_sync["encryption_salt"]
        )

        Cron.convert_db_format_to_schedule(cloud_sync)

        return cloud_sync

    @private
    async def _compress(self, cloud_sync):
        cloud_sync["credential"] = cloud_sync.pop("credentials")

        cloud_sync["encryption_password"] = await self.middleware.call(
            "pwenc.encrypt", cloud_sync["encryption_password"]
        )
        cloud_sync["encryption_salt"] = await self.middleware.call(
            "pwenc.encrypt", cloud_sync["encryption_salt"]
        )

        Cron.convert_schedule_to_db_format(cloud_sync)

        cloud_sync.pop('job', None)

        return cloud_sync

    @private
    async def _get_credentials(self, credentials_id):
        try:
            return await self.middleware.call("datastore.query", "system.cloudcredentials",
                                              [("id", "=", credentials_id)], {"get": True})
        except IndexError:
            return None

    @private
    async def _basic_validate(self, verrors, name, data):
        if data["encryption"]:
            if not data["encryption_password"]:
                verrors.add(f"{name}.encryption_password", "This field is required when encryption is enabled")

        credentials = await self._get_credentials(data["credentials"])
        if not credentials:
            verrors.add(f"{name}.credentials", "Invalid credentials")

        try:
            shlex.split(data["args"])
        except ValueError as e:
            verrors.add(f"{name}.args", f"Parse error: {e.args[0]}")

        if verrors:
            raise verrors

        provider = REMOTES[credentials["provider"]]

        schema = []

        if provider.buckets:
            schema.append(Str("bucket", required=True, empty=False))

        schema.append(Str("folder", required=True))

        schema.extend(provider.task_schema)

        schema.extend(self.common_task_schema(provider))

        attributes_verrors = validate_attributes(schema, data, additional_attrs=True)

        if not attributes_verrors:
            await provider.pre_save_task(data, credentials, verrors)

        verrors.add_child(f"{name}.attributes", attributes_verrors)

    @private
    async def _validate(self, verrors, name, data):
        await self._basic_validate(verrors, name, data)

        for i, (limit1, limit2) in enumerate(zip(data["bwlimit"], data["bwlimit"][1:])):
            if limit1["time"] >= limit2["time"]:
                verrors.add(f"{name}.bwlimit.{i + 1}.time", f"Invalid time order: {limit1['time']}, {limit2['time']}")

        if data["snapshot"]:
            if data["direction"] != "PUSH":
                verrors.add(f"{name}.snapshot", "This option can only be enabled for PUSH tasks")

    @private
    async def _validate_folder(self, verrors, name, data):
        if data["direction"] == "PULL":
            folder = data["attributes"]["folder"].rstrip("/")
            if folder:
                folder_parent = os.path.normpath(os.path.join(folder, ".."))
                if folder_parent == ".":
                    folder_parent = ""
                folder_basename = os.path.basename(folder)
                ls = await self.list_directory(dict(
                    credentials=data["credentials"],
                    encryption=data["encryption"],
                    filename_encryption=data["filename_encryption"],
                    encryption_password=data["encryption_password"],
                    encryption_salt=data["encryption_salt"],
                    attributes=dict(data["attributes"], folder=folder_parent),
                    args=data["args"],
                ))
                for item in ls:
                    if item["Name"] == folder_basename:
                        if not item["IsDir"]:
                            verrors.add(f"{name}.attributes.folder", "This is not a directory")
                        break
                else:
                    verrors.add(f"{name}.attributes.folder", "Directory does not exist")

        if data["direction"] == "PUSH":
            credentials = await self._get_credentials(data["credentials"])

            provider = REMOTES[credentials["provider"]]

            if provider.readonly:
                verrors.add(f"{name}.direction", "This remote is read-only")

    @accepts(Dict(
        "cloud_sync_create",
        Str("description", default=""),
        Str("direction", enum=["PUSH", "PULL"], required=True),
        Str("transfer_mode", enum=["SYNC", "COPY", "MOVE"], required=True),
        Str("path", required=True),
        Int("credentials", required=True),
        Bool("encryption", default=False),
        Bool("filename_encryption", default=False),
        Str("encryption_password", default=""),
        Str("encryption_salt", default=""),
        Cron(
            "schedule",
            defaults={"minute": "00"},
            required=True
        ),
        Bool("follow_symlinks", default=False),
        Int("transfers", null=True, default=None, validators=[Range(min=1)]),
        List("bwlimit", default=[], items=[Dict("cloud_sync_bwlimit",
                                                Str("time", validators=[Time()]),
                                                Int("bandwidth", validators=[Range(min=1)], null=True))]),
        List("exclude", default=[], items=[Str("path", empty=False)]),
        Dict("attributes", additional_attrs=True, required=True),
        Bool("snapshot", default=False),
        Str("pre_script", default=""),
        Str("post_script", default=""),
        Str("args", default=""),
        Bool("enabled", default=True),
        register=True,
    ))
    async def do_create(self, cloud_sync):
        """
        Creates a new cloud_sync entry.

        .. examples(websocket)::

          Create a new cloud_sync using amazon s3 attributes, which is supposed to run every hour.

            :::javascript
            {
              "id": "6841f242-840a-11e6-a437-00e04d680384",
              "msg": "method",
              "method": "cloudsync.create",
              "params": [{
                "description": "s3 sync",
                "path": "/mnt/tank",
                "credentials": 1,
                "minute": "00",
                "hour": "*",
                "daymonth": "*",
                "month": "*",
                "attributes": {
                  "bucket": "mybucket",
                  "folder": ""
                },
                "enabled": true
              }]
            }
        """

        verrors = ValidationErrors()

        await self._validate(verrors, "cloud_sync", cloud_sync)

        if verrors:
            raise verrors

        await self._validate_folder(verrors, "cloud_sync", cloud_sync)

        if verrors:
            raise verrors

        cloud_sync = await self._compress(cloud_sync)

        cloud_sync["id"] = await self.middleware.call("datastore.insert", "tasks.cloudsync", cloud_sync)
        await self.middleware.call("service.restart", "cron")

        cloud_sync = await self._extend(cloud_sync)
        return cloud_sync

    @accepts(Int("id"), Patch("cloud_sync_create", "cloud_sync_update", ("attr", {"update": True})))
    async def do_update(self, id, data):
        """
        Updates the cloud_sync entry `id` with `data`.
        """
        cloud_sync = await self._get_instance(id)

        # credentials is a foreign key for now
        if cloud_sync["credentials"]:
            cloud_sync["credentials"] = cloud_sync["credentials"]["id"]

        cloud_sync.update(data)

        verrors = ValidationErrors()

        await self._validate(verrors, "cloud_sync_update", cloud_sync)

        if verrors:
            raise verrors

        await self._validate_folder(verrors, "cloud_sync_update", cloud_sync)

        if verrors:
            raise verrors

        cloud_sync = await self._compress(cloud_sync)

        await self.middleware.call("datastore.update", "tasks.cloudsync", id, cloud_sync)
        await self.middleware.call("service.restart", "cron")

        cloud_sync = await self._extend(cloud_sync)
        return cloud_sync

    @accepts(Int("id"))
    async def do_delete(self, id):
        """
        Deletes cloud_sync entry `id`.
        """
        await self.middleware.call("datastore.delete", "tasks.cloudsync", id)
        await self.middleware.call("alert.oneshot_delete", "CloudSyncTaskFailed", id)
        await self.middleware.call("service.restart", "cron")

    @accepts(Int("credentials_id"))
    async def list_buckets(self, credentials_id):
        credentials = await self._get_credentials(credentials_id)
        if not credentials:
            raise CallError("Invalid credentials")

        provider = REMOTES[credentials["provider"]]

        if not provider.buckets:
            raise CallError("This provider does not use buckets")

        return await self.ls({"credentials": credentials}, "")

    @accepts(Dict(
        "cloud_sync_ls",
        Int("credentials", required=True),
        Bool("encryption", default=False),
        Bool("filename_encryption", default=False),
        Str("encryption_password", default=""),
        Str("encryption_salt", default=""),
        Dict("attributes", required=True, additional_attrs=True),
        Str("args", default=""),
    ))
    async def list_directory(self, cloud_sync):
        """
        List contents of a remote bucket / directory.

        If remote supports buckets, path is constructed by two keys "bucket"/"folder" in `attributes`.
        If remote does not support buckets, path is constructed using "folder" key only in `attributes`.
        "folder" is directory name and "bucket" is bucket name for remote.

        Path examples:

        S3 Service
        `bucketname/directory/name`

        Dropbox Service
        `directory/name`


        `credentials` is a valid id of a Cloud Sync Credential which will be used to connect to the provider.
        """
        verrors = ValidationErrors()

        await self._basic_validate(verrors, "cloud_sync", dict(cloud_sync))

        if verrors:
            raise verrors

        credentials = await self._get_credentials(cloud_sync["credentials"])

        path = get_remote_path(REMOTES[credentials["provider"]], cloud_sync["attributes"])

        return await self.ls(dict(cloud_sync, credentials=credentials), path)

    @private
    async def ls(self, config, path):
        async with RcloneConfig(config) as config:
            proc = await run(["rclone", "--config", config.config_path, "lsjson", "remote:" + path],
                             check=False, encoding="utf8")
            if proc.returncode == 0:
                return json.loads(proc.stdout)
            else:
                raise CallError(proc.stderr)

    @item_method
    @accepts(Int("id"))
    @job(lock=lambda args: "cloud_sync:{}".format(args[-1]), lock_queue_size=1, logs=True)
    async def sync(self, job, id):
        """
        Run the cloud_sync job `id`, syncing the local data to remote.
        """

        cloud_sync = await self._get_instance(id)

        credentials = cloud_sync["credentials"]

        local_path = cloud_sync["path"]
        local_direction = FsLockDirection.READ if cloud_sync["direction"] == "PUSH" else FsLockDirection.WRITE

        remote_path = get_remote_path(REMOTES[credentials["provider"]], cloud_sync["attributes"])
        remote_direction = FsLockDirection.READ if cloud_sync["direction"] == "PULL" else FsLockDirection.WRITE

        directions = {
            FsLockDirection.READ: "reading",
            FsLockDirection.WRITE: "writing",
        }

        job.set_progress(0, f"Locking local path {local_path!r} for {directions[local_direction]}")
        async with self.local_fs_lock_manager.lock(local_path, local_direction):
            job.set_progress(0, f"Locking remote path {remote_path!r} for {directions[remote_direction]}")
            async with self.remote_fs_lock_manager.lock(f"{credentials['id']}/{remote_path}", remote_direction):
                job.set_progress(0, "Starting")
                try:
                    await rclone(self.middleware, job, cloud_sync)
                    await self.middleware.call("alert.oneshot_delete", "CloudSyncTaskFailed", cloud_sync["id"])
                except Exception:
                    await self.middleware.call("alert.oneshot_create", "CloudSyncTaskFailed", {
                        "id": cloud_sync["id"],
                        "name": cloud_sync["description"],
                    })
                    raise

    @accepts()
    async def providers(self):
        """
        Returns a list of dictionaries of supported providers for Cloud Sync Tasks.

        `credentials_schema` is JSON schema for credentials attributes.

        `task_schema` is JSON schema for task attributes.

        `buckets` is a boolean value which is set to "true" if provider supports buckets.

        Example of a single provider:

        [
            {
                "name": "AMAZON_CLOUD_DRIVE",
                "title": "Amazon Cloud Drive",
                "credentials_schema": [
                    {
                        "property": "client_id",
                        "schema": {
                            "title": "Amazon Application Client ID",
                            "_required_": true,
                            "type": "string"
                        }
                    },
                    {
                        "property": "client_secret",
                        "schema": {
                            "title": "Application Key",
                            "_required_": true,
                            "type": "string"
                        }
                    }
                ],
                "credentials_oauth": null,
                "buckets": false,
                "bucket_title": "Bucket",
                "task_schema": []
            }
        ]
        """
        return sorted(
            [
                {
                    "name": provider.name,
                    "title": provider.title,
                    "credentials_schema": [
                        {
                            "property": field.name,
                            "schema": field.to_json_schema()
                        }
                        for field in provider.credentials_schema
                    ],
                    "credentials_oauth": f"{OAUTH_URL}/{provider.name.lower()}" if provider.credentials_oauth else None,
                    "buckets": provider.buckets,
                    "bucket_title": provider.bucket_title,
                    "task_schema": [
                        {
                            "property": field.name,
                            "schema": field.to_json_schema()
                        }
                        for field in provider.task_schema + self.common_task_schema(provider)
                    ],
                }
                for provider in REMOTES.values()
            ],
            key=lambda provider: provider["title"].lower()
        )

    def common_task_schema(self, provider):
        schema = []

        if provider.fast_list:
            schema.append(Bool("fast_list", default=False, title="Use --fast-list", description=textwrap.dedent("""\
                Use fewer transactions in exchange for more RAM. This may also speed up or slow down your
                transfer. See [rclone documentation](https://rclone.org/docs/#fast-list) for more details.
            """).rstrip()))

        return schema
Ejemplo n.º 14
0
class CredentialsService(CRUDService):

    class Config:
        namespace = "cloudsync.credentials"

        datastore = "system.cloudcredentials"

    @accepts(Dict(
        "cloud_sync_credentials_verify",
        Str("provider", required=True),
        Dict("attributes", additional_attrs=True, required=True),
    ))
    async def verify(self, data):
        """
        Verify if `attributes` provided for `provider` are authorized by the `provider`.
        """
        data = dict(data, name="")
        await self._validate("cloud_sync_credentials_create", data)

        async with RcloneConfig({"credentials": data}) as config:
            proc = await run(["rclone", "--config", config.config_path, "lsjson", "remote:"],
                             check=False, encoding="utf8")
            if proc.returncode == 0:
                return {"valid": True}
            else:
                return {"valid": False, "error": proc.stderr}

    @accepts(Dict(
        "cloud_sync_credentials_create",
        Str("name", required=True),
        Str("provider", required=True),
        Dict("attributes", additional_attrs=True, required=True),
        register=True,
    ))
    async def do_create(self, data):
        """
        Create Cloud Sync Credentials.

        `attributes` is a dictionary of valid values which will be used to authorize with the `provider`.
        """
        await self._validate("cloud_sync_credentials_create", data)

        data["id"] = await self.middleware.call(
            "datastore.insert",
            "system.cloudcredentials",
            data,
        )
        return data

    @accepts(
        Int("id"),
        Patch(
            "cloud_sync_credentials_create",
            "cloud_sync_credentials_update",
            ("attr", {"update": True})
        )
    )
    async def do_update(self, id, data):
        """
        Update Cloud Sync Credentials of `id`.
        """
        old = await self._get_instance(id)

        new = old.copy()
        new.update(data)

        await self._validate("cloud_sync_credentials_update", new, id)

        await self.middleware.call(
            "datastore.update",
            "system.cloudcredentials",
            id,
            new,
        )

        data["id"] = id

        return data

    @accepts(Int("id"))
    async def do_delete(self, id):
        """
        Delete Cloud Sync Credentials of `id`.
        """
        await self.middleware.call(
            "datastore.delete",
            "system.cloudcredentials",
            id,
        )

    async def _validate(self, schema_name, data, id=None):
        verrors = ValidationErrors()

        await self._ensure_unique(verrors, schema_name, "name", data["name"], id)

        if data["provider"] not in REMOTES:
            verrors.add(f"{schema_name}.provider", "Invalid provider")
        else:
            provider = REMOTES[data["provider"]]

            attributes_verrors = validate_attributes(provider.credentials_schema, data)
            verrors.add_child(f"{schema_name}.attributes", attributes_verrors)

        if verrors:
            raise verrors
Ejemplo n.º 15
0
class ZFSDatasetService(CRUDService):
    class Config:
        namespace = 'zfs.dataset'
        private = True
        process_pool = True

    def locked_datasets(self):
        try:
            about_to_lock_dataset = self.middleware.call_sync(
                'cache.get', 'about_to_lock_dataset')
        except KeyError:
            about_to_lock_dataset = None

        or_filters = [
            'OR', [['key_loaded', '=', False]] +
            ([['id', '=', about_to_lock_dataset],
              ['id', '^', f'{about_to_lock_dataset}/']]
             if about_to_lock_dataset else [])
        ]
        return self.query(
            [['encrypted', '=', True], or_filters], {
                'extra': {
                    'properties': ['encryption', 'keystatus', 'mountpoint']
                },
                'select': ['id', 'mountpoint']
            })

    def flatten_datasets(self, datasets):
        return sum([[deepcopy(ds)] + self.flatten_datasets(ds['children'])
                    for ds in datasets], [])

    @filterable
    def query(self, filters=None, options=None):
        """
        In `query-options` we can provide `extra` arguments which control which data should be retrieved
        for a dataset.

        `query-options.extra.snapshots` is a boolean which when set will retrieve snapshots for the dataset in question
        by adding a snapshots key to the dataset data.

        `query-options.extra.top_level_properties` is a list of properties which we will like to include in the
        top level dict of dataset. It defaults to adding only mountpoint key keeping legacy behavior. If none are
        desired in top level dataset, an empty list should be passed else if null is specified it will add mountpoint
        key to the top level dict if it's present in `query-options.extra.properties` or it's null as well.

        `query-options.extra.properties` is a list of properties which should be retrieved. If null ( by default ),
        it would retrieve all properties, if empty, it will retrieve no property ( `mountpoint` is special in this
        case and is controlled by `query-options.extra.mountpoint` attribute ).

        We provide 2 ways how zfs.dataset.query returns dataset's data. First is a flat structure ( default ), which
        means that all the datasets in the system are returned as separate objects which also contain all the data
        their is for their children. This retrieval type is slightly slower because of duplicates which exist in
        each object.
        Second type is hierarchical where only top level datasets are returned in the list and they contain all the
        children there are for them in `children` key. This retrieval type is slightly faster.
        These options are controlled by `query-options.extra.flat` attribute which defaults to true.

        `query-options.extra.user_properties` controls if user defined properties of datasets should be retrieved
        or not.

        While we provide a way to exclude all properties from data retrieval, we introduce a single attribute
        `query-options.extra.retrieve_properties` which if set to false will make sure that no property is retrieved
        whatsoever and overrides any other property retrieval attribute.
        """
        options = options or {}
        extra = options.get('extra', {}).copy()
        top_level_props = None if extra.get(
            'top_level_properties'
        ) is None else extra['top_level_properties'].copy()
        props = extra.get('properties', None)
        flat = extra.get('flat', True)
        user_properties = extra.get('user_properties', True)
        retrieve_properties = extra.get('retrieve_properties', True)
        snapshots = extra.get('snapshots')
        if not retrieve_properties:
            # This is a short hand version where consumer can specify that they don't want any property to
            # be retrieved
            user_properties = False
            props = []

        with libzfs.ZFS() as zfs:
            # Handle `id` filter specially to avoiding getting all datasets
            kwargs = dict(
                props=props,
                top_level_props=top_level_props,
                user_props=user_properties,
                snapshots=snapshots,
            )
            if filters and len(filters) == 1 and list(
                    filters[0][:2]) == ['id', '=']:
                kwargs['datasets'] = [filters[0][2]]

            datasets = zfs.datasets_serialized(**kwargs)
            if flat:
                datasets = self.flatten_datasets(datasets)
            else:
                datasets = list(datasets)

        return filter_list(datasets, filters, options)

    def query_for_quota_alert(self):
        return [{
            k: v
            for k, v in dataset['properties'].items() if k in [
                "name", "quota", "available", "refquota", "usedbydataset",
                "mounted", "mountpoint", "org.freenas:quota_warning",
                "org.freenas:quota_critical", "org.freenas:refquota_warning",
                "org.freenas:refquota_critical"
            ]
        } for dataset in self.query()]

    def common_load_dataset_checks(self, ds):
        self.common_encryption_checks(ds)
        if ds.key_loaded:
            raise CallError(f'{id} key is already loaded')

    def common_encryption_checks(self, ds):
        if not ds.encrypted:
            raise CallError(f'{id} is not encrypted')

    def path_to_dataset(self, path):
        with libzfs.ZFS() as zfs:
            try:
                zh = zfs.get_dataset_by_path(path)
                ds_name = zh.name
            except libzfs.ZFSException:
                ds_name = None

        return ds_name

    def get_quota(self, ds, quota_type):
        if quota_type == 'dataset':
            dataset = self.query([('id', '=', ds)], {'get': True})
            return [{
                'quota_type':
                'DATASET',
                'id':
                ds,
                'name':
                ds,
                'quota':
                int(dataset['properties']['quota']['rawvalue']),
                'refquota':
                int(dataset['properties']['refquota']['rawvalue']),
                'used_bytes':
                int(dataset['properties']['used']['rawvalue']),
            }]

        quota_list = []
        quota_get = subprocess.run(
            [
                'zfs', f'{quota_type}space', '-H', '-n', '-p', '-o',
                'name,used,quota,objquota,objused', ds
            ],
            capture_output=True,
            check=False,
        )
        if quota_get.returncode != 0:
            raise CallError(
                f'Failed to get {quota_type} quota for {ds}: [{quota_get.stderr.decode()}]'
            )

        for quota in quota_get.stdout.decode().splitlines():
            m = quota.split('\t')
            if len(m) != 5:
                self.logger.debug('Invalid %s quota: %s', quota_type.lower(),
                                  quota)
                continue

            entry = {
                'quota_type': quota_type.upper(),
                'id': int(m[0]),
                'name': None,
                'quota': int(m[2]),
                'used_bytes': int(m[1]),
                'used_percent': 0,
                'obj_quota': int(m[3]) if m[3] != '-' else 0,
                'obj_used': int(m[4]) if m[4] != '-' else 0,
                'obj_used_percent': 0,
            }
            if entry['quota'] > 0:
                entry['used_percent'] = entry['used_bytes'] / entry[
                    'quota'] * 100

            if entry['obj_quota'] > 0:
                entry['obj_used_percent'] = entry['obj_used'] / entry[
                    'obj_quota'] * 100

            try:
                if entry['quota_type'] == 'USER':
                    entry['name'] = (self.middleware.call_sync(
                        'user.get_user_obj', {'uid': entry['id']}))['pw_name']
                else:
                    entry['name'] = (self.middleware.call_sync(
                        'group.get_group_obj',
                        {'gid': entry['id']}))['gr_name']

            except Exception:
                self.logger.debug('Unable to resolve %s id %d to name',
                                  quota_type.lower(), entry['id'])
                pass

            quota_list.append(entry)

        return quota_list

    def set_quota(self, ds, quota_list):
        cmd = ['zfs', 'set']
        cmd.extend(quota_list)
        cmd.append(ds)
        quota_set = subprocess.run(cmd, check=False)
        if quota_set.returncode != 0:
            raise CallError(
                f'Failed to set userspace quota on {ds}: [{quota_set.stderr.decode()}]'
            )

    @accepts(
        Str('id'),
        Dict(
            'load_key_options',
            Bool('mount', default=True),
            Bool('recursive', default=False),
            Any('key', default=None, null=True),
            Str('key_location', default=None, null=True),
        ),
    )
    def load_key(self, id, options):
        mount_ds = options.pop('mount')
        recursive = options.pop('recursive')
        try:
            with libzfs.ZFS() as zfs:
                ds = zfs.get_dataset(id)
                self.common_load_dataset_checks(ds)
                ds.load_key(**options)
        except libzfs.ZFSException as e:
            self.logger.error(f'Failed to load key for {id}', exc_info=True)
            raise CallError(f'Failed to load key for {id}: {e}')
        else:
            if mount_ds:
                self.mount(id, {'recursive': recursive})

    @accepts(Str('name'), List('params', default=[], private=True))
    @job()
    def bulk_process(self, job, name, params):
        f = getattr(self, name, None)
        if not f:
            raise CallError(f'{name} method not found in zfs.dataset')

        statuses = []
        for i in params:
            result = error = None
            try:
                result = f(*i)
            except Exception as e:
                error = str(e)
            finally:
                statuses.append({'result': result, 'error': error})

        return statuses

    @accepts(Str('id'),
             Dict(
                 'check_key',
                 Any('key', default=None, null=True),
                 Str('key_location', default=None, null=True),
             ))
    def check_key(self, id, options):
        """
        Returns `true` if the `key` is valid, `false` otherwise.
        """
        try:
            with libzfs.ZFS() as zfs:
                ds = zfs.get_dataset(id)
                self.common_encryption_checks(ds)
                return ds.check_key(**options)
        except libzfs.ZFSException as e:
            self.logger.error(f'Failed to check key for {id}', exc_info=True)
            raise CallError(f'Failed to check key for {id}: {e}')

    @accepts(Str('id'),
             Dict(
                 'unload_key_options',
                 Bool('recursive', default=False),
                 Bool('force_umount', default=False),
                 Bool('umount', default=False),
             ))
    def unload_key(self, id, options):
        force = options.pop('force_umount')
        if options.pop('umount') and self.middleware.call_sync(
                'zfs.dataset.get_instance', id)['mountpoint']:
            self.umount(id, {'force': force})
        try:
            with libzfs.ZFS() as zfs:
                ds = zfs.get_dataset(id)
                self.common_encryption_checks(ds)
                if not ds.key_loaded:
                    raise CallError(f'{id}\'s key is not loaded')
                ds.unload_key(**options)
        except libzfs.ZFSException as e:
            self.logger.error(f'Failed to unload key for {id}', exc_info=True)
            raise CallError(f'Failed to unload key for {id}: {e}')

    @accepts(
        Str('id'),
        Dict(
            'change_key_options',
            Dict('encryption_properties', Str('keyformat'), Str('keylocation'),
                 Int('pbkdf2iters')),
            Bool('load_key', default=True),
            Any('key', default=None, null=True),
        ),
    )
    def change_key(self, id, options):
        try:
            with libzfs.ZFS() as zfs:
                ds = zfs.get_dataset(id)
                self.common_encryption_checks(ds)
                ds.change_key(props=options['encryption_properties'],
                              load_key=options['load_key'],
                              key=options['key'])
        except libzfs.ZFSException as e:
            self.logger.error(f'Failed to change key for {id}', exc_info=True)
            raise CallError(f'Failed to change key for {id}: {e}')

    @accepts(Str('id'),
             Dict(
                 'change_encryption_root_options',
                 Bool('load_key', default=True),
             ))
    def change_encryption_root(self, id, options):
        try:
            with libzfs.ZFS() as zfs:
                ds = zfs.get_dataset(id)
                ds.change_key(load_key=options['load_key'], inherit=True)
        except libzfs.ZFSException as e:
            raise CallError(f'Failed to change encryption root for {id}: {e}')

    @accepts(
        Dict(
            'dataset_create',
            Str('name', required=True),
            Str('type', enum=['FILESYSTEM', 'VOLUME'], default='FILESYSTEM'),
            Dict(
                'properties',
                Bool('sparse'),
                additional_attrs=True,
            ),
        ))
    def do_create(self, data):
        """
        Creates a ZFS dataset.
        """

        verrors = ValidationErrors()

        if '/' not in data['name']:
            verrors.add('name', 'You need a full name, e.g. pool/newdataset')

        if verrors:
            raise verrors

        properties = data.get('properties') or {}
        sparse = properties.pop('sparse', False)
        params = {}

        for k, v in data['properties'].items():
            params[k] = v

        try:
            with libzfs.ZFS() as zfs:
                pool = zfs.get(data['name'].split('/')[0])
                pool.create(data['name'],
                            params,
                            fstype=getattr(libzfs.DatasetType, data['type']),
                            sparse_vol=sparse)
        except libzfs.ZFSException as e:
            self.logger.error('Failed to create dataset', exc_info=True)
            raise CallError(f'Failed to create dataset: {e}')

    @accepts(
        Str('id'),
        Dict(
            'dataset_update',
            Dict(
                'properties',
                additional_attrs=True,
            ),
        ),
    )
    def do_update(self, id, data):
        try:
            with libzfs.ZFS() as zfs:
                dataset = zfs.get_dataset(id)

                if 'properties' in data:
                    properties = data['properties'].copy()
                    # Set these after reservations
                    for k in ['quota', 'refquota']:
                        if k in properties:
                            properties[k] = properties.pop(k)  # Set them last
                    for k, v in properties.items():

                        # If prop already exists we just update it,
                        # otherwise create a user property
                        prop = dataset.properties.get(k)
                        try:
                            if prop:
                                if v.get('source') == 'INHERIT':
                                    prop.inherit(
                                        recursive=v.get('recursive', False))
                                elif 'value' in v and (prop.value != v['value']
                                                       or prop.source.name
                                                       == 'INHERITED'):
                                    prop.value = v['value']
                                elif 'parsed' in v and (
                                        prop.parsed != v['parsed']
                                        or prop.source.name == 'INHERITED'):
                                    prop.parsed = v['parsed']
                            else:
                                if v.get('source') == 'INHERIT':
                                    pass
                                else:
                                    if 'value' not in v:
                                        raise ValidationError(
                                            'properties',
                                            f'properties.{k} needs a "value" attribute'
                                        )
                                    if ':' not in k:
                                        raise ValidationError(
                                            'properties',
                                            f'User property needs a colon (:) in its name`'
                                        )
                                    prop = libzfs.ZFSUserProperty(v['value'])
                                    dataset.properties[k] = prop
                        except libzfs.ZFSException as e:
                            raise ZFSSetPropertyError(k, str(e))

        except libzfs.ZFSException as e:
            self.logger.error('Failed to update dataset', exc_info=True)
            raise CallError(f'Failed to update dataset: {e}')

    def do_delete(self, id, options=None):
        options = options or {}
        force = options.get('force', False)
        recursive = options.get('recursive', False)

        args = []
        if force:
            args += ['-f']
        if recursive:
            args += ['-r']

        # If dataset is mounted and has receive_resume_token, we should destroy it or ZFS will say
        # "cannot destroy 'pool/dataset': dataset already exists"
        recv_run = subprocess.run(['zfs', 'recv', '-A', id],
                                  stdout=subprocess.DEVNULL,
                                  stderr=subprocess.DEVNULL)
        # Destroying may take a long time, lets not use py-libzfs as it will block
        # other ZFS operations.
        try:
            subprocess.run(
                ['zfs', 'destroy'] + args + [id],
                text=True,
                capture_output=True,
                check=True,
            )
        except subprocess.CalledProcessError as e:
            if recv_run.returncode == 0 and e.stderr.strip().endswith(
                    'dataset does not exist'):
                # This operation might have deleted this dataset if it was created by `zfs recv` operation
                return
            self.logger.error('Failed to delete dataset', exc_info=True)
            error = e.stderr.strip()
            errno_ = errno.EFAULT
            if "Device busy" in error:
                errno_ = errno.EBUSY
            raise CallError(f'Failed to delete dataset: {error}', errno_)

    @accepts(Str('name'), Dict('options', Bool('recursive', default=False)))
    def mount(self, name, options):
        try:
            with libzfs.ZFS() as zfs:
                dataset = zfs.get_dataset(name)
                if options['recursive']:
                    dataset.mount_recursive()
                else:
                    dataset.mount()
        except libzfs.ZFSException as e:
            self.logger.error('Failed to mount dataset', exc_info=True)
            raise CallError(f'Failed to mount dataset: {e}')

    @accepts(Str('name'), Dict('options', Bool('force', default=False)))
    def umount(self, name, options):
        try:
            with libzfs.ZFS() as zfs:
                dataset = zfs.get_dataset(name)
                dataset.umount(force=options['force'])
        except libzfs.ZFSException as e:
            self.logger.error('Failed to umount dataset', exc_info=True)
            raise CallError(f'Failed to umount dataset: {e}')

    @accepts(Str('dataset'),
             Dict('options', Str('new_name', required=True, empty=False),
                  Bool('recursive', default=False)))
    def rename(self, name, options):
        try:
            with libzfs.ZFS() as zfs:
                dataset = zfs.get_dataset(name)
                dataset.rename(options['new_name'],
                               recursive=options['recursive'])
        except libzfs.ZFSException as e:
            self.logger.error('Failed to rename dataset', exc_info=True)
            raise CallError(f'Failed to rename dataset: {e}')

    def promote(self, name):
        try:
            with libzfs.ZFS() as zfs:
                dataset = zfs.get_dataset(name)
                dataset.promote()
        except libzfs.ZFSException as e:
            self.logger.error('Failed to promote dataset', exc_info=True)
            raise CallError(f'Failed to promote dataset: {e}')

    def inherit(self, name, prop, recursive=False):
        try:
            with libzfs.ZFS() as zfs:
                dataset = zfs.get_dataset(name)
                zprop = dataset.properties.get(prop)
                if not zprop:
                    raise CallError(f'Property {prop!r} not found.',
                                    errno.ENOENT)
                zprop.inherit(recursive=recursive)
        except libzfs.ZFSException as e:
            raise CallError(str(e))
Ejemplo n.º 16
0
class SNMPService(SystemServiceService):
    class Config:
        service = 'snmp'
        datastore_prefix = 'snmp_'

    @accepts(
        Dict('snmp_update',
             Str('location'),
             Str('contact',
                 validators=[Or(Email(), Match(r'^[-_a-zA-Z0-9\s]*$'))]),
             Bool('traps'),
             Bool('v3'),
             Str('community',
                 validators=[Match(r'^[-_.a-zA-Z0-9\s]*$')],
                 default='public'),
             Str('v3_username', max_length=20),
             Str('v3_authtype', enum=['', 'MD5', 'SHA']),
             Str('v3_password'),
             Str('v3_privproto', enum=[None, 'AES', 'DES'], null=True),
             Str('v3_privpassphrase'),
             Int('loglevel', validators=[Range(min=0, max=7)]),
             Str('options', max_length=None),
             Bool('zilstat'),
             update=True))
    async def do_update(self, data):
        """
        Update SNMP Service Configuration.

        `v3` when set enables SNMP version 3.

        `v3_username`, `v3_authtype`, `v3_password`, `v3_privproto` and `v3_privpassphrase` are only used when `v3`
        is enabled.
        """
        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        if not new['v3'] and not new['community']:
            verrors.add('snmp_update.community',
                        'This field is required when SNMPv3 is disabled')

        if new['v3_authtype'] and not new['v3_password']:
            verrors.add(
                'snmp_update.v3_password',
                'This field is requires when SNMPv3 auth type is specified',
            )

        if new['v3_password'] and len(new['v3_password']) < 8:
            verrors.add('snmp_update.v3_password',
                        'Password must contain at least 8 characters')

        if new['v3_privproto'] and not new['v3_privpassphrase']:
            verrors.add(
                'snmp_update.v3_privpassphrase',
                'This field is requires when SNMPv3 private protocol is specified',
            )

        if verrors:
            raise verrors

        await self._update_service(old, new)

        return await self.config()
Ejemplo n.º 17
0
class ServiceService(CRUDService):
    class Config:
        cli_namespace = "service"

    @filterable
    async def query(self, filters, options):
        """
        Query all system services with `query-filters` and `query-options`.
        """
        if options is None:
            options = {}
        options['prefix'] = 'srv_'

        services = await self.middleware.call('datastore.query',
                                              'services.services', filters,
                                              options)

        # In case a single service has been requested
        if not isinstance(services, list):
            services = [services]

        jobs = {
            asyncio.ensure_future(
                (await self.middleware.call('service.object',
                                            service['service'])).get_state()):
            service
            for service in services
        }
        if jobs:
            done, pending = await asyncio.wait(list(jobs.keys()), timeout=15)

        def result(task):
            """
            Method to handle results of the coroutines.
            In case of error or timeout, provide UNKNOWN state.
            """
            entry = jobs.get(task)

            result = None
            if task in done:
                try:
                    result = task.result()
                except Exception:
                    self.logger.warning('Task %r failed', exc_info=True)

            if result is None:
                entry['state'] = 'UNKNOWN'
                entry['pids'] = []
            else:
                entry['state'] = 'RUNNING' if result.running else 'STOPPED'
                entry['pids'] = result.pids

            return entry

        services = list(map(result, jobs))
        return filter_list(services, filters, options)

    @accepts(
        Str('id_or_name'),
        Dict(
            'service-update',
            Bool('enable', default=False),
        ),
    )
    async def do_update(self, id_or_name, data):
        """
        Update service entry of `id_or_name`.

        Currently it only accepts `enable` option which means whether the
        service should start on boot.

        """
        if not id_or_name.isdigit():
            svc = await self.middleware.call(
                'datastore.query', 'services.services',
                [('srv_service', '=', id_or_name)])
            if not svc:
                raise CallError(f'Service {id_or_name} not found.',
                                errno.ENOENT)
            id_or_name = svc[0]['id']

        rv = await self.middleware.call('datastore.update',
                                        'services.services', id_or_name,
                                        {'srv_enable': data['enable']})
        await self.middleware.call('etc.generate', 'rc')
        return rv

    @accepts(
        Str('service'),
        Dict(
            'service-control',
            Bool('ha_propagate', default=True),
            register=True,
        ),
    )
    async def start(self, service, options):
        """
        Start the service specified by `service`.
        """
        service_object = await self.middleware.call('service.object', service)

        await self.middleware.call_hook('service.pre_action', service, 'start',
                                        options)

        await self.middleware.call('service.generate_etc', service_object)

        await service_object.before_start()
        await service_object.start()
        state = await service_object.get_state()
        if state.running:
            await service_object.after_start()
            await self.middleware.call('service.notify_running', service)
            return True
        else:
            self.logger.error("Service %r not running after start", service)
            await self.middleware.call('service.notify_running', service)
            return False

    async def started(self, service):
        """
        Test if service specified by `service` has been started.
        """
        service_object = await self.middleware.call('service.object', service)

        state = await service_object.get_state()
        return state.running

    @accepts(
        Str('service'),
        Ref('service-control'),
    )
    async def stop(self, service, options):
        """
        Stop the service specified by `service`.
        """
        service_object = await self.middleware.call('service.object', service)

        await self.middleware.call_hook('service.pre_action', service, 'stop',
                                        options)

        try:
            await service_object.before_stop()
        except Exception:
            self.logger.error("Failed before stop action for %r service",
                              service)
        await service_object.stop()
        state = await service_object.get_state()
        if not state.running:
            await service_object.after_stop()
            await self.middleware.call('service.notify_running', service)
            return False
        else:
            self.logger.error("Service %r running after stop", service)
            await self.middleware.call('service.notify_running', service)
            return True

    @accepts(
        Str('service'),
        Ref('service-control'),
    )
    async def restart(self, service, options):
        """
        Restart the service specified by `service`.
        """
        service_object = await self.middleware.call('service.object', service)

        await self.middleware.call_hook('service.pre_action', service,
                                        'restart', options)

        await self.middleware.call('service.generate_etc', service_object)

        return await self._restart(service, service_object)

    async def _restart(self, service, service_object):
        if service_object.restartable:
            await service_object.before_restart()
            await service_object.restart()
            await service_object.after_restart()

            state = await service_object.get_state()
            if state.running:
                await self.middleware.call('service.notify_running', service)
                return True
            else:
                await self.middleware.call('service.notify_running', service)
                self.logger.error("Service %r not running after restart",
                                  service)
                return False
        else:
            try:
                await service_object.before_stop()
            except Exception:
                self.logger.error("Failed before stop action for %r service",
                                  service)
            await service_object.stop()
            state = await service_object.get_state()
            if not state.running:
                await service_object.after_stop()
            else:
                self.logger.error(
                    "Service %r running after restart-caused stop", service)

            await service_object.before_start()
            await service_object.start()
            state = await service_object.get_state()
            if state.running:
                await service_object.after_start()
                await self.middleware.call('service.notify_running', service)
                return True
            else:
                await self.middleware.call('service.notify_running', service)
                self.logger.error(
                    "Service %r not running after restart-caused start",
                    service)
                return False

    @accepts(
        Str('service'),
        Ref('service-control'),
    )
    async def reload(self, service, options):
        """
        Reload the service specified by `service`.
        """
        service_object = await self.middleware.call('service.object', service)

        await self.middleware.call_hook('service.pre_action', service,
                                        'reload', options)

        await self.middleware.call('service.generate_etc', service_object)

        if service_object.reloadable:
            await service_object.before_reload()
            await service_object.reload()
            await service_object.after_reload()

            state = await service_object.get_state()
            if state.running:
                return True
            else:
                self.logger.error("Service %r not running after reload",
                                  service)
                return False
        else:
            return await self._restart(service, service_object)

    SERVICES = {}

    @private
    async def register_object(self, object):
        if object.name in self.SERVICES:
            raise CallError(
                f"Service object {object.name} is already registered")

        self.SERVICES[object.name] = object

    @private
    async def object(self, name):
        return self.SERVICES[name]

    @private
    async def generate_etc(self, object):
        for etc in object.etc:
            await self.middleware.call("etc.generate", etc)

    @private
    async def notify_running(self, service):
        try:
            svc = await self.middleware.call('service.query',
                                             [('service', '=', service)],
                                             {'get': True})
        except MatchNotFound:
            return

        self.middleware.send_event('service.query', 'CHANGED', fields=svc)

    @private
    async def identify_process(self, procname):
        for service_name, service in self.SERVICES.items():
            if isinstance(service, IdentifiableServiceInterface):
                if await service.identify(procname):
                    return service_name

    @accepts(Int("pid"), Int("timeout", default=10))
    def terminate_process(self, pid, timeout):
        """
        Terminate process by `pid`.

        First send `TERM` signal, then, if was not terminated in `timeout` seconds, send `KILL` signal.

        Returns `true` is process has been successfully terminated with `TERM` and `false` if we had to use `KILL`.
        """
        try:
            process = psutil.Process(pid)
        except psutil.NoSuchProcess:
            raise CallError("Process does not exist")

        process.terminate()

        gone, alive = psutil.wait_procs([process], timeout)
        if not alive:
            return True

        alive[0].kill()
        return False
Ejemplo n.º 18
0
class AuthService(Service):

    def __init__(self, *args, **kwargs):
        super(AuthService, self).__init__(*args, **kwargs)
        self.authtokens = AuthTokens()

    @accepts(Str('username'), Str('password'))
    async def check_user(self, username, password):
        """
        Verify username and password
        """
        if username != 'root':
            return False
        try:
            user = await self.middleware.call('datastore.query', 'account.bsdusers', [('bsdusr_username', '=', username)], {'get': True})
        except IndexError:
            return False
        if user['bsdusr_unixhash'] in ('x', '*'):
            return False
        return crypt.crypt(password, user['bsdusr_unixhash']) == user['bsdusr_unixhash']

    @accepts(Int('ttl', required=False), Dict('attrs', additional_attrs=True))
    def generate_token(self, ttl=None, attrs=None):
        """Generate a token to be used for authentication."""
        if ttl is None:
            ttl = 600
        return self.authtokens.new(ttl, attrs=attrs)['id']

    @private
    def get_token(self, token_id):
        return self.authtokens.get_token(token_id)

    @no_auth_required
    @accepts(Str('username'), Str('password'))
    @pass_app
    async def login(self, app, username, password):
        """Authenticate session using username and password.
        Currently only root user is allowed.
        """
        valid = await self.check_user(username, password)
        if valid:
            app.authenticated = True
        return valid

    @no_auth_required
    @accepts(Str('token'))
    @pass_app
    def token(self, app, token):
        """Authenticate using a given `token` id."""

        def update_token(app, message):
            """
            On every new message from the registered connection
            make sure the token is still valid, updating last time or
            removing authentication
            """
            token = self.authtokens.get_token_by_sessionid(app.sessionid)
            if token is None:
                return
            if int(time.time()) - token['ttl'] < token['last']:
                token['last'] = int(time.time())
            else:
                self.authtokens.pop_token(token['id'])
                app.authenticated = False

        def remove_session(app):
            """
            On connection close, remove session id from token
            """
            self.authtokens.remove_session(app.sessionid)

        token = self.authtokens.get_token(token)
        if token is None:
            return False

        """
        If token exists and is still valid (TTL) do the following:
          - authenticate the connection
          - add the session id to token
          - register connection callbacks to update/remove token
        """
        if int(time.time()) - token['ttl'] < token['last']:
            token['last'] = int(time.time())
            self.authtokens.add_session(app.sessionid, token)
            app.register_callback('on_message', update_token)
            app.register_callback('on_close', remove_session)
            app.authenticated = True
            return True
        else:
            self.authtokens.pop_token(token['id'])
            return False
Ejemplo n.º 19
0
class ZFSSnapshot(CRUDService):
    class Config:
        namespace = 'zfs.snapshot'

    @filterable
    def query(self, filters, options):
        with libzfs.ZFS() as zfs:
            snapshots = [i.__getstate__() for i in list(zfs.snapshots)]
        # FIXME: awful performance with hundreds/thousands of snapshots
        return filter_list(snapshots, filters, options)

    @accepts(
        Dict('snapshot_create', Str('dataset'), Str('name'), Bool('recursive'),
             Int('vmsnaps_count'), Dict('properties', additional_attrs=True)))
    async def do_create(self, data):
        """
        Take a snapshot from a given dataset.

        Returns:
            bool: True if succeed otherwise False.
        """

        dataset = data.get('dataset', '')
        name = data.get('name', '')
        recursive = data.get('recursive', False)
        vmsnaps_count = data.get('vmsnaps_count', 0)
        properties = data.get('properties', None)

        if not dataset or not name:
            return False

        try:
            with libzfs.ZFS() as zfs:
                ds = zfs.get_dataset(dataset)
                ds.snapshot(f'{dataset}@{name}',
                            recursive=recursive,
                            fsopts=properties)

                if vmsnaps_count > 0:
                    ds.properties['freenas:vmsynced'] = libzfs.ZFSUserProperty(
                        'Y')

            self.logger.info(f"Snapshot taken: {dataset}@{name}")
            return True
        except libzfs.ZFSException as err:
            self.logger.error(f"{err}")
            return False

    @accepts(
        Dict('snapshot_remove', Str('dataset', required=True),
             Str('name', required=True), Bool('defer_delete')))
    async def remove(self, data):
        """
        Remove a snapshot from a given dataset.

        Returns:
            bool: True if succeed otherwise False.
        """
        snapshot_name = data['dataset'] + '@' + data['name']

        try:
            with libzfs.ZFS() as zfs:
                snap = zfs.get_snapshot(snapshot_name)
                snap.delete(True if data.get('defer_delete') else False)
        except libzfs.ZFSException as err:
            self.logger.error("{0}".format(err))
            return False
        else:
            self.logger.info(f"Destroyed snapshot: {snapshot_name}")

        return True

    @accepts(Dict(
        'snapshot_clone',
        Str('snapshot'),
        Str('dataset_dst'),
    ))
    async def clone(self, data):
        """
        Clone a given snapshot to a new dataset.

        Returns:
            bool: True if succeed otherwise False.
        """

        snapshot = data.get('snapshot', '')
        dataset_dst = data.get('dataset_dst', '')

        if not snapshot or not dataset_dst:
            return False

        try:
            with libzfs.ZFS() as zfs:
                snp = zfs.get_snapshot(snapshot)
                snp.clone(dataset_dst)
            self.logger.info("Cloned snapshot {0} to dataset {1}".format(
                snapshot, dataset_dst))
            return True
        except libzfs.ZFSException as err:
            self.logger.error("{0}".format(err))
            return False
Ejemplo n.º 20
0
class StatsService(Service):
    @accepts()
    def get_sources(self):
        """
        Returns an object with all available sources tried with metric datasets.
        """
        sources = {}
        if not os.path.exists(RRD_PATH):
            return {}
        for i in glob.glob('{}/*/*.rrd'.format(RRD_PATH)):
            source, metric = i.replace(RRD_PATH, '').split('/', 1)
            if metric.endswith('.rrd'):
                metric = metric[:-4]
            if source not in sources:
                sources[source] = []
            sources[source].append(metric)
        return sources

    @accepts(Str('source'), Str('type'))
    async def get_dataset_info(self, source, _type):
        """
        Returns info about a given dataset from some source.
        """
        rrdfile = '{}/{}/{}.rrd'.format(RRD_PATH, source, _type)
        proc = await Popen(
            ['/usr/local/bin/rrdtool', 'info', rrdfile],
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
        )
        data, err = await proc.communicate()
        if proc.returncode != 0:
            raise ValueError('rrdtool failed: {}'.format(err.decode()))
        data = data.decode()

        info = {'source': source, 'type': _type, 'datasets': {}}
        for dataset, _type in RE_DSTYPE.findall(data):
            info['datasets'][dataset] = {'type': _type}

        reg = RE_STEP.search(data)
        if reg:
            info['step'] = int(reg.group(1))
        reg = RE_LAST_UPDATE.search(data)
        if reg:
            info['last_update'] = int(reg.group(1))
        return info

    @accepts(
        List('stats_list',
             items=[
                 Dict(
                     'stats-data',
                     Str('source'),
                     Str('type'),
                     Str('dataset'),
                     Str('cf', default='AVERAGE'),
                     additional_attrs=False,
                 )
             ]),
        Dict(
            'stats-filter',
            Int('step', default=10),
            Str('start', default='now-1h'),
            Str('end', default='now'),
        ),
    )
    async def get_data(self, data_list, stats):
        """
        Get data points from rrd files.
        """
        if not data_list:
            raise ValidationError('stats_list',
                                  'This parameter cannot be empty')

        defs = []
        names_pair = []
        for i, data in enumerate(data_list):
            names_pair.append([data['source'], data['type']])
            rrdfile = '{}/{}/{}.rrd'.format(RRD_PATH, data['source'],
                                            data['type'])
            defs.extend([
                'DEF:xxx{}={}:{}:{}'.format(i, rrdfile, data['dataset'],
                                            data['cf']),
                'XPORT:xxx{}:{}/{}'.format(i, data['source'], data['type']),
            ])
        proc = await Popen(
            [
                '/usr/local/bin/rrdtool',
                'xport',
                '--json',
                '--start',
                stats['start'],
                '--end',
                stats['end'],
            ] + (['--step', str(stats['step'])] if stats.get('step') else []) +
            defs,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
        )
        data, err = await proc.communicate()
        if proc.returncode != 0:
            raise CallError('rrdtool failed: {}'.format(err.decode()))
        data = json.loads(data.decode())

        # Custom about property
        data['about'] = 'Data for ' + ','.join(
            ['/'.join(i) for i in names_pair])
        return data
Ejemplo n.º 21
0
class SharingNFSService(CRUDService):
    class Config:
        namespace = "sharing.nfs"
        datastore = "sharing.nfs_share"
        datastore_prefix = "nfs_"
        datastore_extend = "sharing.nfs.extend"

    @accepts(
        Dict(
            "sharingnfs_create",
            List("paths", items=[Dir("path")]),
            Str("comment"),
            List("networks", items=[IPAddr("network", cidr=True)]),
            List("hosts", items=[Str("host")]),
            Bool("alldirs"),
            Bool("ro"),
            Bool("quiet"),
            Str("maproot_user", required=False, default=None),
            Str("maproot_group", required=False, default=None),
            Str("mapall_user", required=False, default=None),
            Str("mapall_group", required=False, default=None),
            List("security",
                 items=[
                     Str("provider", enum=["SYS", "KRB5", "KRB5I", "KRB5P"])
                 ]),
            register=True,
        ))
    async def do_create(self, data):
        verrors = ValidationErrors()

        await self.validate(data, "sharingnfs_create", verrors)

        if verrors:
            raise verrors

        await self.compress(data)
        paths = data.pop("paths")
        data["id"] = await self.middleware.call(
            "datastore.insert",
            self._config.datastore,
            data,
            {"prefix": self._config.datastore_prefix},
        )
        for path in paths:
            await self.middleware.call(
                "datastore.insert",
                "sharing.nfs_share_path",
                {
                    "share_id": data["id"],
                    "path": path,
                },
            )
        await self.extend(data)

        await self.middleware.call("service.reload", "nfs")

        return data

    @accepts(Int("id"),
             Patch("sharingnfs_create", "sharingnfs_update", ("attr", {
                 "update": True
             })))
    async def do_update(self, id, data):
        verrors = ValidationErrors()
        old = await self.middleware.call(
            "datastore.query",
            self._config.datastore,
            [("id", "=", id)],
            {
                "extend": self._config.datastore_extend,
                "prefix": self._config.datastore_prefix,
                "get": True
            },
        )

        new = old.copy()
        new.update(data)

        await self.validate(new, "sharingnfs_update", verrors, old=old)

        if verrors:
            raise verrors

        await self.compress(new)
        paths = new.pop("paths")
        await self.middleware.call("datastore.update", self._config.datastore,
                                   id, new,
                                   {"prefix": self._config.datastore_prefix})
        await self.middleware.call("datastore.delete",
                                   "sharing.nfs_share_path",
                                   [["share_id", "=", id]])
        for path in paths:
            await self.middleware.call(
                "datastore.insert",
                "sharing.nfs_share_path",
                {
                    "share_id": id,
                    "path": path,
                },
            )

        await self.extend(new)
        new["paths"] = paths

        await self.middleware.call("service.reload", "nfs")

        return new

    @accepts(Int("id"))
    async def do_delete(self, id):
        await self.middleware.call("datastore.delete",
                                   "sharing.nfs_share_path",
                                   [["share_id", "=", id]])
        await self.middleware.call("datastore.delete", self._config.datastore,
                                   id)

    @private
    async def validate(self, data, schema_name, verrors, old=None):
        if not data["paths"]:
            verrors.add(f"{schema_name}.paths",
                        "At least one path is required")

        await self.middleware.run_in_io_thread(self.validate_paths, data,
                                               schema_name, verrors)

        filters = []
        if old:
            filters.append(["id", "!=", old["id"]])
        other_shares = await self.middleware.call("sharing.nfs.query", filters)
        dns_cache = await self.resolve_hostnames(
            sum([share["hosts"]
                 for share in other_shares], []) + data["hosts"])
        await self.middleware.run_in_io_thread(
            self.validate_hosts_and_networks, other_shares, data, schema_name,
            verrors, dns_cache)

        for k in ["maproot", "mapall"]:
            if not data[f"{k}_user"] and not data[f"{k}_group"]:
                pass
            elif not data[f"{k}_user"] and data[f"{k}_group"]:
                verrors.add(
                    f"{schema_name}.{k}_user",
                    "This field is required when map group is specified")
            else:
                user = await self.middleware.call(
                    "user.query", [("username", "=", data[f"{k}_user"])])
                if not user:
                    verrors.add(f"{schema_name}.{k}_user", "User not found")

                if data[f"{k}_group"]:
                    group = await self.middleware.call(
                        "group.query", [("group", "=", data[f"{k}_group"])])
                    if not group:
                        verrors.add(f"{schema_name}.{k}_group",
                                    "Group not found")

        if data["maproot_user"] and data["mapall_user"]:
            verrors.add(f"{schema_name}.mapall_user",
                        "maproot_user disqualifies mapall_user")

        if data["security"]:
            nfs_config = await self.middleware.call("nfs.config")
            if not nfs_config["v4"]:
                verrors.add(f"{schema_name}.security",
                            "This is not allowed when NFS v4 is disabled")

    @private
    def validate_paths(self, data, schema_name, verrors):
        dev = None
        is_mountpoint = False
        for i, path in enumerate(data["paths"]):
            stat = os.stat(path)
            if dev is None:
                dev = stat.st_dev
            else:
                if dev != stat.st_dev:
                    verrors.add(
                        f"{schema_name}.paths.{i}",
                        "Paths for a NFS share must reside within the same filesystem"
                    )

            parent = os.path.abspath(os.path.join(path, ".."))
            if os.stat(parent).st_dev != dev:
                is_mountpoint = True
                if any(
                        os.path.abspath(p).startswith(parent + "/")
                        for p in data["paths"] if p != path):
                    verrors.add(
                        f"{schema_name}.paths.{i}",
                        "You cannot share a mount point and subdirectories all at once"
                    )

        if not is_mountpoint and data["alldirs"]:
            verrors.add(f"{schema_name}.alldirs",
                        "This option can only be used for datasets")

    @private
    async def resolve_hostnames(self, hostnames):
        hostnames = list(set(hostnames))

        async def resolve(hostname):
            try:
                return (await asyncio.wait_for(
                    self.middleware.run_in_io_thread(socket.getaddrinfo,
                                                     hostname, None),
                    5))[0][4][0]
            except Exception as e:
                self.logger.warning("Unable to resolve host %r: %r", hostname,
                                    e)
                return None

        resolved_hostnames = await asyncio_map(resolve, hostnames, 8)

        return dict(zip(hostnames, resolved_hostnames))

    @private
    def validate_hosts_and_networks(self, other_shares, data, schema_name,
                                    verrors, dns_cache):
        explanation = (
            ". This is so because /etc/exports does not act like ACL and it is undefined which rule among "
            "all overlapping networks will be applied.")

        dev = os.stat(data["paths"][0]).st_dev

        used_networks = set()
        for share in other_shares:
            try:
                share_dev = os.stat(share["paths"][0]).st_dev
            except Exception:
                self.logger.warning("Failed to stat first path for %r",
                                    share,
                                    exc_info=True)
                continue

            if share_dev == dev:
                for host in share["hosts"]:
                    host = dns_cache[host]
                    if host is None:
                        continue

                    try:
                        network = ipaddress.ip_network(host)
                    except Exception:
                        self.logger.warning("Got invalid host %r", host)
                        continue
                    else:
                        used_networks.add(network)

                for network in share["networks"]:
                    try:
                        network = ipaddress.ip_network(network, strict=False)
                    except Exception:
                        self.logger.warning("Got invalid network %r", network)
                        continue
                    else:
                        used_networks.add(network)

                if not share["hosts"] and not share["networks"]:
                    used_networks.add(ipaddress.ip_network("0.0.0.0/0"))
                    used_networks.add(ipaddress.ip_network("::/0"))

                if share["alldirs"] and data["alldirs"]:
                    verrors.add(
                        f"{schema_name}.alldirs",
                        "This option is only available once per mountpoint")

        had_explanation = False
        for i, host in enumerate(data["hosts"]):
            host = dns_cache[host]
            if host is None:
                verrors.add(f"{schema_name}.hosts.{i}",
                            "Unable to resolve host")
                continue

            network = ipaddress.ip_network(host)
            for another_network in used_networks:
                if network.overlaps(another_network):
                    verrors.add(f"{schema_name}.hosts.{i}", (
                        f"You can't share same filesystem with overlapping networks {network} and {another_network}"
                        + ("" if had_explanation else explanation)))
                    had_explanation = True

            used_networks.add(network)

        had_explanation = False
        for i, network in enumerate(data["networks"]):
            network = ipaddress.ip_network(network, strict=False)

            for another_network in used_networks:
                if network.overlaps(another_network):
                    verrors.add(f"{schema_name}.networks.{i}", (
                        f"You can't share same filesystem with overlapping networks {network} and {another_network}"
                        + ("" if had_explanation else explanation)))
                    had_explanation = True

            used_networks.add(network)

        if not data["hosts"] and not data["networks"]:
            if used_networks:
                verrors.add(
                    f"{schema_name}.networks",
                    (f"You can't share same filesystem with all hosts twice" +
                     ("" if had_explanation else explanation)))

    @private
    async def extend(self, data):
        data["paths"] = [
            path["path"] for path in await self.middleware.call(
                "datastore.query", "sharing.nfs_share_path",
                [["share_id", "=", data["id"]]])
        ]
        data["networks"] = data.pop("network").split()
        data["hosts"] = data["hosts"].split()
        data["security"] = [s.upper() for s in data["security"]]
        return data

    @private
    async def compress(self, data):
        data["network"] = " ".join(data.pop("networks"))
        data["hosts"] = " ".join(data["hosts"])
        data["security"] = [s.lower() for s in data["security"]]
        return data
Ejemplo n.º 22
0
class VMService(Service, VMSupervisorMixin):

    @item_method
    @accepts(Int('id'), Dict('options', Bool('overcommit', default=False)))
    @returns()
    async def start(self, id, options):
        """
        Start a VM.

        options.overcommit defaults to false, meaning VMs are not allowed to
        start if there is not enough available memory to hold all configured VMs.
        If true, VM starts even if there is not enough memory for all configured VMs.

        Error codes:

            ENOMEM(12): not enough free memory to run the VM without overcommit
        """
        await self.middleware.run_in_thread(self._check_setup_connection)

        vm = await self.middleware.call('vm.get_instance', id)
        if vm['status']['state'] == 'RUNNING':
            raise CallError(f'{vm["name"]} is already running')

        if osc.IS_FREEBSD and not await self.middleware.call('vm.validate_slots', vm):
            raise CallError(
                'Please adjust the devices attached to this VM. '
                f'A maximum of {await self.middleware.call("vm.available_slots")} PCI slots are allowed.'
            )

        if not await self.middleware.call('vm.supports_virtualization'):
            raise CallError('This system does not support virtualization.')

        if osc.IS_LINUX and vm['bootloader'] not in await self.middleware.call('vm.bootloader_options'):
            raise CallError(f'"{vm["bootloader"]}" is not supported on this platform.')

        # Perhaps we should have a default config option for VMs?
        await self.middleware.call('vm.init_guest_vmemory', vm, options['overcommit'])

        try:
            await self.middleware.run_in_thread(self._start, vm['name'])
        except Exception:
            if (await self.middleware.call('vm.get_instance', id))['status']['state'] != 'RUNNING':
                await self.middleware.call('vm.teardown_guest_vmemory', id)
            raise

    @item_method
    @accepts(
        Int('id'),
        Dict(
            'options',
            Bool('force', default=False),
            Bool('force_after_timeout', default=False),
        ),
    )
    @returns()
    @job(lock=lambda args: f'stop_vm_{args[0]}_{args[1].get("force") if len(args) == 2 else False}')
    def stop(self, job, id, options):
        """
        Stops a VM.

        For unresponsive guests who have exceeded the `shutdown_timeout` defined by the user and have become
        unresponsive, they required to be powered down using `vm.poweroff`. `vm.stop` is only going to send a
        shutdown signal to the guest and wait the desired `shutdown_timeout` value before tearing down guest vmemory.

        `force_after_timeout` when supplied, it will initiate poweroff for the VM forcing it to exit if it has
        not already stopped within the specified `shutdown_timeout`.
        """
        self._check_setup_connection()
        vm_data = self.middleware.call_sync('vm.get_instance', id)

        if options['force']:
            self._poweroff(vm_data['name'])
        else:
            self._stop(vm_data['name'], vm_data['shutdown_timeout'])

        if options['force_after_timeout'] and self.middleware.call_sync('vm.status', id)['state'] == 'RUNNING':
            self._poweroff(vm_data['name'])

        self.middleware.call_sync('vm.teardown_guest_vmemory', id)

    @item_method
    @accepts(Int('id'))
    @returns()
    def poweroff(self, id):
        """
        Poweroff a VM.
        """
        self._check_setup_connection()

        vm_data = self.middleware.call_sync('vm.get_instance', id)
        self._poweroff(vm_data['name'])
        self.middleware.call_sync('vm.teardown_guest_vmemory', id)

    @item_method
    @accepts(Int('id'))
    @returns()
    @job(lock=lambda args: f'restart_vm_{args[0]}')
    def restart(self, job, id):
        """
        Restart a VM.
        """
        self._check_setup_connection()
        vm = self.middleware.call_sync('vm.get_instance', id)
        self._restart(vm['name'])
Ejemplo n.º 23
0
class DatastoreService(Service):
    def _filters_to_queryset(self, filters, field_prefix=None):
        opmap = {
            '=': 'exact',
            '!=': 'exact',
            '>': 'gt',
            '>=': 'gte',
            '<': 'lt',
            '<=': 'lte',
            '~': 'regex',
        }

        rv = []
        for f in filters:
            if not isinstance(f, (list, tuple)):
                raise ValueError('Filter must be a list: {0}'.format(f))
            if len(f) == 3:
                name, op, value = f
                if field_prefix:
                    name = field_prefix + name
                if op not in opmap:
                    raise Exception("Invalid operation: {0}".format(op))
                q = Q(**{'{0}__{1}'.format(name, opmap[op]): value})
                if op == '!=':
                    q.negate()
                rv.append(q)
            elif len(f) == 2:
                op, value = f
                if op == 'OR':
                    or_value = None
                    for value in self._filters_to_queryset(
                            value, field_prefix=field_prefix):
                        if or_value is None:
                            or_value = value
                        else:
                            or_value |= value
                    rv.append(or_value)
                else:
                    raise ValueError('Invalid operation: {0}'.format(op))
            else:
                raise Exception("Invalid filter {0}".format(f))
        return rv

    def __get_model(self, name):
        """Helper method to get Model for given name
        e.g. network.interfaces -> Interfaces
        """
        app, model = name.split('.', 1)
        return apps.get_model(app, model)

    def __queryset_serialize(self, qs, extend=None, field_prefix=None):
        for i in self.middleware.threaded(lambda: list(qs)):
            yield django_modelobj_serialize(self.middleware,
                                            i,
                                            extend=extend,
                                            field_prefix=field_prefix)

    @accepts(
        Str('name'),
        List('query-filters', register=True),
        Dict(
            'query-options',
            Str('extend'),
            Dict('extra', additional_attrs=True),
            List('order_by'),
            Bool('count'),
            Bool('get'),
            Str('prefix'),
            register=True,
        ),
    )
    def query(self, name, filters=None, options=None):
        """Query for items in a given collection `name`.

        `filters` is a list which each entry can be in one of the following formats:

            entry: simple_filter | conjuntion
            simple_filter: '[' attribute_name, OPERATOR, value ']'
            conjunction: '[' CONJUNTION, '[' simple_filter (',' simple_filter)* ']]'

            OPERATOR: ('=' | '!=' | '>' | '>=' | '<' | '<=' | '~' )
            CONJUNCTION: 'OR'

        e.g.

        `['OR', [ ['username', '=', 'root' ], ['uid', '=', 0] ] ]`

        `[ ['username', '=', 'root' ] ]`

        .. examples(websocket)::

          Querying for username "root" and returning a single item:

            :::javascript
            {
              "id": "d51da71b-bb48-4b8b-a8f7-6046fcc892b4",
              "msg": "method",
              "method": "datastore.query",
              "params": ["account.bsdusers", [ ["username", "=", "root" ] ], {"get": true}]
            }
        """
        model = self.__get_model(name)
        if options is None:
            options = {}
        else:
            # We do not want to make changes to original options
            # which might happen with "prefix"
            options = options.copy()

        qs = model.objects.all()

        extra = options.get('extra')
        if extra:
            qs = qs.extra(**extra)

        prefix = options.get('prefix')

        if filters:
            qs = qs.filter(*self._filters_to_queryset(filters, prefix))

        order_by = options.get('order_by')
        if order_by:
            if prefix:
                # Do not change original order_by
                order_by = order_by[:]
                for i, order in enumerate(order_by):
                    if order.startswith('-'):
                        order_by[i] = '-' + prefix + order[1:]
                    else:
                        order_by[i] = prefix + order
            qs = qs.order_by(*order_by)

        if options.get('count') is True:
            return qs.count()

        result = list(
            self.__queryset_serialize(qs,
                                      extend=options.get('extend'),
                                      field_prefix=options.get('prefix')))

        if options.get('get') is True:
            return result[0]
        return result

    @accepts(Str('name'), Ref('query-options'))
    def config(self, name, options=None):
        """
        Get configuration settings object for a given `name`.

        This is a shortcut for `query(name, {"get": true})`.
        """
        if options is None:
            options = {}
        options['get'] = True
        return self.query(name, None, options)

    @accepts(Str('name'), Dict('data', additional_attrs=True))
    def insert(self, name, data):
        """
        Insert a new entry to `name`.
        """
        model = self.__get_model(name)
        for field in model._meta.fields:
            if field.name not in data:
                continue
            if isinstance(field, ForeignKey):
                data[field.name] = field.rel.to.objects.get(
                    pk=data[field.name])
        obj = model(**data)
        obj.save()
        return obj.pk

    @accepts(Str('name'), Int('id'), Dict('data', additional_attrs=True))
    def update(self, name, id, data):
        """
        Update an entry `id` in `name`.
        """
        model = self.__get_model(name)
        obj = model.objects.get(pk=id)
        for field in model._meta.fields:
            if field.name not in data:
                continue
            if isinstance(field, ForeignKey):
                data[field.name] = field.rel.to.objects.get(
                    pk=data[field.name])
        for k, v in list(data.items()):
            setattr(obj, k, v)
        obj.save()
        return obj.pk

    @accepts(Str('name'), Int('id'))
    def delete(self, name, id):
        """
        Delete an entry `id` in `name`.
        """
        model = self.__get_model(name)
        model.objects.get(pk=id).delete()
        return True

    @private
    def sql(self, query, params=None):
        cursor = connection.cursor()
        rv = None
        try:
            if params is None:
                cursor.executelocal(query)
            else:
                cursor.executelocal(query, params)
            rv = cursor.fetchall()
        finally:
            cursor.close()
        return rv
Ejemplo n.º 24
0
from middlewared.schema import Dict, Int, Str
from middlewared.validators import validate_schema, Range


@pytest.mark.parametrize("schema,data,result", [
    ([Str("text")], {
        "text": "XXX"
    }, {
        "text": "XXX"
    }),
    ([Str("text", default="XXX")], {}, {
        "text": "XXX"
    }),
    ([Str("text", required=True)], {}, {"text"}),
    ([Int("number")], {
        "number": "1"
    }, {
        "number": 1
    }),
    ([Int("number")], {
        "number": "XXX"
    }, {"number"}),
    ([Int("number", validators=[Range(min=2)])], {
        "number": 1
    }, {"number"}),
    ([Dict("image", Str("repository", required=True))
      ], {}, {"image.repository"}),
])
def test__validate_schema(schema, data, result):
    verrors = validate_schema(schema, data)
Ejemplo n.º 25
0
class PoolService(Service):
    @accepts(Int('oid'),
             Dict(
                 'pool_attach',
                 Str('target_vdev', reuired=True),
                 Str('new_disk', reuired=True),
                 Str('passphrase'),
             ))
    @job(lock=lambda args: f'pool_attach_{args[0]}')
    async def attach(self, job, oid, options):
        """
        For TrueNAS Core/Enterprise platform, if the `oid` pool is passphrase GELI encrypted, `passphrase`
        must be specified for this operation to succeed.

        `target_vdev` is the GUID of the vdev where the disk needs to be attached. In case of STRIPED vdev, this
        is the STRIPED disk GUID which will be converted to mirror. If `target_vdev` is mirror, it will be converted
        into a n-way mirror.
        """
        pool = await self.middleware.call('pool.get_instance', oid)
        verrors = ValidationErrors()
        if not pool['is_decrypted']:
            verrors.add('oid', 'Pool must be unlocked for this action.')
            verrors.check()
        topology = pool['topology']
        topology_type = vdev = None
        for i in topology:
            for v in topology[i]:
                if v['guid'] == options['target_vdev']:
                    topology_type = i
                    vdev = v
                    break
            if topology_type:
                break
        else:
            verrors.add('pool_attach.target_vdev', 'Unable to locate VDEV')
            verrors.check()
        if topology_type in ('cache', 'spares'):
            verrors.add('pool_attach.target_vdev',
                        f'Attaching disks to {topology_type} not allowed.')
        elif topology_type == 'data':
            # We would like to make sure here that we don't have inconsistent vdev types across data
            if vdev['type'] not in ('DISK', 'MIRROR'):
                verrors.add(
                    'pool_attach.target_vdev',
                    f'Attaching disk to {vdev["type"]} vdev is not allowed.')

        if osc.IS_FREEBSD and pool['encrypt'] == 2:
            if not options.get('passphrase'):
                verrors.add('pool_attach.passphrase',
                            'Passphrase is required for encrypted pool.')
            elif not await self.middleware.call('disk.geli_testkey', pool,
                                                options['passphrase']):
                verrors.add('pool_attach.passphrase',
                            'Passphrase is not valid.')

        if osc.IS_LINUX and options.get('passphrase'):
            verrors.add('pool_attach.passphrase',
                        'This field is not valid on this platform.')

        # Let's validate new disk now
        await self.middleware.call('disk.check_disks_availability', verrors,
                                   [options['new_disk']], 'pool_attach')
        verrors.check()

        guid = vdev['guid'] if vdev['type'] == 'DISK' else vdev['children'][0][
            'guid']
        disks = {
            options['new_disk']: {
                'create_swap': topology_type == 'data',
                'vdev': []
            }
        }
        enc_disks = await self.middleware.call(
            'pool.format_disks', job, disks, {
                'enc_keypath': pool['encryptkey_path'],
                'passphrase': options.get('passphrase')
            })

        devname = disks[options['new_disk']]['vdev'][0]
        extend_job = await self.middleware.call('zfs.pool.extend',
                                                pool['name'], None,
                                                [{
                                                    'target': guid,
                                                    'type': 'DISK',
                                                    'path': devname
                                                }])
        try:
            await job.wrap(extend_job)
        except CallError:
            if osc.IS_FREEBSD and pool['encrypt'] > 0:
                try:
                    # If replace has failed lets detach geli to not keep disk busy
                    await self.middleware.call('disk.geli_detach_single',
                                               devname)
                except Exception:
                    self.logger.warn(f'Failed to geli detach {devname}',
                                     exc_info=True)
            raise

        if osc.IS_FREEBSD:
            disk = await self.middleware.call(
                'disk.query', [['devname', '=', options['new_disk']]],
                {'get': True})
            await self.middleware.call('pool.save_encrypteddisks', oid,
                                       enc_disks, {disk['devname']: disk})
        await self.middleware.call('disk.swaps_configure')
Ejemplo n.º 26
0
class SharingAFPService(CRUDService):
    class Config:
        namespace = 'sharing.afp'
        datastore = 'sharing.afp_share'
        datastore_prefix = 'afp_'
        datastore_extend = 'sharing.afp.extend'

    @accepts(
        Dict('sharingafp_create',
             Str('path'),
             Bool('home', default=False),
             Str('name'),
             Str('comment'),
             List('allow'),
             List('deny'),
             List('ro'),
             List('rw'),
             Bool('timemachine', default=False),
             Int('timemachine_quota', default=0),
             Bool('nodev', default=False),
             Bool('nostat', default=False),
             Bool('upriv', default=True),
             UnixPerm('fperm', default='644'),
             UnixPerm('dperm', default='755'),
             UnixPerm('umask', default='000'),
             List('hostsallow', items=[IPAddr('ip', cidr=True)]),
             List('hostsdeny', items=[IPAddr('ip', cidr=True)]),
             Str('auxparams'),
             register=True))
    async def do_create(self, data):
        verrors = ValidationErrors()
        path = data['path']

        await self.clean(data, 'sharingafp_create', verrors)
        await self.validate(data, 'sharingafp_create', verrors)

        await check_path_resides_within_volume(verrors, self.middleware,
                                               "sharingafp_create.path", path)

        if verrors:
            raise verrors

        if path and not os.path.exists(path):
            try:
                os.makedirs(path)
            except OSError as e:
                raise CallError(f'Failed to create {path}: {e}')

        await self.compress(data)
        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})
        await self.extend(data)

        await self.middleware.call('service.reload', 'afp')

        return data

    @accepts(Int('id'),
             Patch('sharingafp_create', 'sharingafp_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        verrors = ValidationErrors()
        old = await self.middleware.call(
            'datastore.query', self._config.datastore, [('id', '=', id)], {
                'extend': self._config.datastore_extend,
                'prefix': self._config.datastore_prefix,
                'get': True
            })
        path = data['path']

        new = old.copy()
        new.update(data)

        await self.clean(new, 'sharingafp_update', verrors, id=id)
        await self.validate(new, 'sharingafp_update', verrors, old=old)

        await check_path_resides_within_volume(verrors, self.middleware,
                                               "sharingafp_create.path", path)

        if verrors:
            raise verrors

        if path and not os.path.exists(path):
            try:
                os.makedirs(path)
            except OSError as e:
                raise CallError(f'Failed to create {path}: {e}')

        await self.compress(new)
        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})
        await self.extend(new)

        await self.middleware.call('service.reload', 'afp')

        return new

    @accepts(Int('id'))
    async def do_delete(self, id):
        return await self.middleware.call('datastore.delete',
                                          self._config.datastore, id)

    @private
    async def clean(self, data, schema_name, verrors, id=None):
        data['name'] = await self.name_exists(data, schema_name, verrors, id)

    @private
    async def validate(self, data, schema_name, verrors, old=None):
        await self.home_exists(data['home'], schema_name, verrors, old)

    @private
    async def home_exists(self, home, schema_name, verrors, old=None):
        home_filters = [('home', '=', True)]
        home_result = None

        if home:
            if old and old['id'] is not None:
                id = old['id']

                if not old['home']:
                    home_filters.append(('id', '!=', id))
                    # The user already had this set as the home share
                    home_result = await self.middleware.call(
                        'datastore.query', self._config.datastore,
                        home_filters,
                        {'prefix': self._config.datastore_prefix})

        if home_result:
            verrors.add(f'{schema_name}.home',
                        'Only one share is allowed to be a home share.')

    async def name_exists(self, data, schema_name, verrors, id=None):
        name = data['name']
        path = data['path']
        home = data['home']
        name_filters = [('name', '=', name)]
        path_filters = [('path', '=', path)]

        if not name:
            if home:
                name = 'Homes'
            else:
                name = path.rsplit('/', 1)[-1]

        if id is not None:
            name_filters.append(('id', '!=', id))
            path_filters.append(('id', '!=', id))

        name_result = await self.middleware.call(
            'datastore.query', self._config.datastore, name_filters,
            {'prefix': self._config.datastore_prefix})
        path_result = await self.middleware.call(
            'datastore.query', self._config.datastore, path_filters,
            {'prefix': self._config.datastore_prefix})

        if name_result:
            verrors.add(f'{schema_name}.name',
                        'A share with this name already exists.')

        if path_result:
            verrors.add(f'{schema_name}.path',
                        'A share with this path already exists.')

        return name

    @private
    async def extend(self, data):
        data['allow'] = data['allow'].split()
        data['deny'] = data['deny'].split()
        data['ro'] = data['ro'].split()
        data['rw'] = data['rw'].split()
        data['hostsallow'] = data['hostsallow'].split()
        data['hostsdeny'] = data['hostsdeny'].split()

        return data

    @private
    async def compress(self, data):
        data['allow'] = ' '.join(data['allow'])
        data['deny'] = ' '.join(data['deny'])
        data['ro'] = ' '.join(data['ro'])
        data['rw'] = ' '.join(data['rw'])
        data['hostsallow'] = ' '.join(data['hostsallow'])
        data['hostsdeny'] = ' '.join(data['hostsdeny'])

        return data
Ejemplo n.º 27
0
class FilesystemService(Service):

    @accepts(Str('path', required=True), Ref('query-filters'), Ref('query-options'))
    def listdir(self, path, filters=None, options=None):
        """
        Get the contents of a directory.

        Each entry of the list consists of:
          name(str): name of the file
          path(str): absolute path of the entry
          realpath(str): absolute real path of the entry (if SYMLINK)
          type(str): DIRECTORY | FILESYSTEM | SYMLINK | OTHER
          size(int): size of the entry
          mode(int): file mode/permission
          uid(int): user id of entry owner
          gid(int): group id of entry onwer
        """
        if not os.path.exists(path):
            raise CallError(f'Directory {path} does not exist', errno.ENOENT)

        if not os.path.isdir(path):
            raise CallError(f'Path {path} is not a directory', errno.ENOTDIR)

        rv = []
        for entry in os.scandir(path):
            if entry.is_dir():
                etype = 'DIRECTORY'
            elif entry.is_file():
                etype = 'FILE'
            elif entry.is_symlink():
                etype = 'SYMLINK'
            else:
                etype = 'OTHER'

            data = {
                'name': entry.name,
                'path': entry.path,
                'realpath': os.path.realpath(entry.path) if etype == 'SYMLINK' else entry.path,
                'type': etype,
            }
            try:
                stat = entry.stat()
                data.update({
                    'size': stat.st_size,
                    'mode': stat.st_mode,
                    'uid': stat.st_uid,
                    'gid': stat.st_gid,
                })
            except FileNotFoundError:
                data.update({'size': None, 'mode': None, 'uid': None, 'gid': None})
            rv.append(data)
        return filter_list(rv, filters=filters or [], options=options or {})

    @accepts(Str('path'))
    def stat(self, path):
        """
        Return the filesystem stat(2) for a given `path`.
        """
        try:
            stat = os.stat(path, follow_symlinks=False)
        except FileNotFoundError:
            raise CallError(f'Path {path} not found', errno.ENOENT)

        stat = {
            'size': stat.st_size,
            'mode': stat.st_mode,
            'uid': stat.st_uid,
            'gid': stat.st_gid,
            'atime': stat.st_atime,
            'mtime': stat.st_mtime,
            'ctime': stat.st_ctime,
            'dev': stat.st_dev,
            'inode': stat.st_ino,
            'nlink': stat.st_nlink,
        }

        try:
            stat['user'] = pwd.getpwuid(stat['uid']).pw_name
        except KeyError:
            stat['user'] = None

        try:
            stat['group'] = grp.getgrgid(stat['gid']).gr_name
        except KeyError:
            stat['group'] = None

        if os.path.exists(os.path.join(path, ".windows")):
            stat["acl"] = "windows"
        elif os.path.exists(os.path.join(path, ".mac")):
            stat["acl"] = "mac"
        else:
            stat["acl"] = "unix"

        return stat

    @private
    @accepts(
        Str('path'),
        Str('content'),
        Dict(
            'options',
            Bool('append', default=False),
            Int('mode'),
        ),
    )
    def file_receive(self, path, content, options=None):
        """
        Simplified file receiving method for small files.

        `content` must be a base 64 encoded file content.

        DISCLAIMER: DO NOT USE THIS FOR BIG FILES (> 500KB).
        """
        options = options or {}
        dirname = os.path.dirname(path)
        if not os.path.exists(dirname):
            os.makedirs(dirname)
        if options.get('append'):
            openmode = 'ab'
        else:
            openmode = 'wb+'
        with open(path, openmode) as f:
            f.write(binascii.a2b_base64(content))
        mode = options.get('mode')
        if mode:
            os.chmod(path, mode)
        return True

    @private
    @accepts(
        Str('path'),
        Dict(
            'options',
            Int('offset'),
            Int('maxlen'),
        ),
    )
    def file_get_contents(self, path, options=None):
        """
        Get contents of a file `path` in base64 encode.

        DISCLAIMER: DO NOT USE THIS FOR BIG FILES (> 500KB).
        """
        options = options or {}
        if not os.path.exists(path):
            return None
        with open(path, 'rb') as f:
            if options.get('offset'):
                f.seek(options['offset'])
            data = binascii.b2a_base64(f.read(options.get('maxlen'))).decode().strip()
        return data

    @accepts(Str('path'))
    @job(pipes=["output"])
    async def get(self, job, path):
        """
        Job to get contents of `path`.
        """

        if not os.path.isfile(path):
            raise CallError(f'{path} is not a file')

        with open(path, 'rb') as f:
            await self.middleware.run_in_thread(shutil.copyfileobj, f, job.pipes.output.w)

    @accepts(
        Str('path'),
        Dict(
            'options',
            Bool('append', default=False),
            Int('mode'),
        ),
    )
    @job(pipes=["input"])
    async def put(self, job, path, options=None):
        """
        Job to put contents to `path`.
        """
        options = options or {}
        dirname = os.path.dirname(path)
        if not os.path.exists(dirname):
            os.makedirs(dirname)
        if options.get('append'):
            openmode = 'ab'
        else:
            openmode = 'wb+'

        with open(path, openmode) as f:
            await self.middleware.run_in_thread(shutil.copyfileobj, job.pipes.input.r, f)

        mode = options.get('mode')
        if mode:
            os.chmod(path, mode)
        return True

    @accepts(Str('path'))
    def statfs(self, path):
        """
        Return stats from the filesystem of a given path.

        Raises:
            CallError(ENOENT) - Path not found
        """
        try:
            statfs = bsd.statfs(path)
        except FileNotFoundError:
            raise CallError('Path not found.', errno.ENOENT)
        return {
            **statfs.__getstate__(),
            'total_bytes': statfs.total_blocks * statfs.blocksize,
            'free_bytes': statfs.free_blocks * statfs.blocksize,
            'avail_bytes': statfs.avail_blocks * statfs.blocksize,
        }

    def __convert_to_basic_permset(self, permset):
        """
        Convert "advanced" ACL permset format to basic format using
        bitwise operation and constants defined in py-bsd/bsd/acl.pyx,
        py-bsd/defs.pxd and acl.h.

        If the advanced ACL can't be converted without losing
        information, we return 'OTHER'.

        Reverse process converts the constant's value to a dictionary
        using a bitwise operation.
        """
        perm = 0
        for k, v, in permset.items():
            if v:
                perm |= acl.NFS4Perm[k]

        try:
            SimplePerm = (acl.NFS4BasicPermset(perm)).name
        except Exception:
            SimplePerm = 'OTHER'

        return SimplePerm

    def __convert_to_basic_flagset(self, flagset):
        flags = 0
        for k, v, in flagset.items():
            if k == "INHERITED":
                continue
            if v:
                flags |= acl.NFS4Flag[k]

        try:
            SimpleFlag = (acl.NFS4BasicFlagset(flags)).name
        except Exception:
            SimpleFlag = 'OTHER'

        return SimpleFlag

    def __convert_to_adv_permset(self, basic_perm):
        permset = {}
        perm_mask = acl.NFS4BasicPermset[basic_perm].value
        for name, member in acl.NFS4Perm.__members__.items():
            if perm_mask & member.value:
                permset.update({name: True})
            else:
                permset.update({name: False})

        return permset

    def __convert_to_adv_flagset(self, basic_flag):
        flagset = {}
        flag_mask = acl.NFS4BasicFlagset[basic_flag].value
        for name, member in acl.NFS4Flag.__members__.items():
            if flag_mask & member.value:
                flagset.update({name: True})
            else:
                flagset.update({name: False})

        return flagset

    @accepts(Str('path'))
    def acl_is_trivial(self, path):
        """
        ACL is trivial if it can be fully expressed as a file mode without losing
        any access rules. This is intended to be used as a check before allowing
        users to chmod() through the webui
        """
        if not os.path.exists(path):
            raise CallError('Path not found.', errno.ENOENT)
        a = acl.ACL(file=path)
        return a.is_trivial

    @accepts(
        Str('path'),
        Bool('simplified', default=True),
    )
    def getacl(self, path, simplified=True):
        """
        Return ACL of a given path.
        Simplified returns a shortened form of the ACL permset and flags
        - TRAVERSE = sufficient rights to traverse a directory, but not read contents.
        - READ = sufficient rights to traverse a directory, and read file contents.
        - MODIFIY = sufficient rights to traverse, read, write, and modify a file. Equivalent to modify_set.
        - FULL_CONTROL = all permissions.
        - OTHER = does not fit into any of the above categories without losing information.

        In all cases we replace USER_OBJ, GROUP_OBJ, and EVERYONE with owner@, group@, everyone@ for
        consistency with getfacl and setfacl. If one of aforementioned special tags is used, 'id' must
        be set to None.

        An inheriting empty everyone@ ACE is appended to non-trivial ACLs in order to enforce Windows
        expectations regarding permissions inheritance. This entry is removed from NT ACL returned
        to SMB clients when 'ixnas' samba VFS module is enabled. We also remove it here to avoid confusion.
        """
        if not os.path.exists(path):
            raise CallError('Path not found.', errno.ENOENT)

        a = acl.ACL(file=path)
        fs_acl = a.__getstate__()

        if not simplified:
            advanced_acl = []
            for entry in fs_acl:
                ace = {
                    'tag': (acl.ACLWho[entry['tag']]).value,
                    'id': entry['id'],
                    'type': entry['type'],
                    'perms': entry['perms'],
                    'flags': entry['flags'],
                }
                if ace['tag'] == 'everyone@' and self.__convert_to_basic_permset(ace['perms']) == 'NOPERMS':
                    self.logger.debug('detected hidden ace')
                    continue
                advanced_acl.append(ace)
            return advanced_acl

        if simplified:
            simple_acl = []
            for entry in fs_acl:
                ace = {
                    'tag': (acl.ACLWho[entry['tag']]).value,
                    'id': entry['id'],
                    'type': entry['type'],
                    'perms': {'BASIC': self.__convert_to_basic_permset(entry['perms'])},
                    'flags': {'BASIC': self.__convert_to_basic_flagset(entry['flags'])},
                }
                if ace['tag'] == 'everyone@' and ace['perms']['BASIC'] == 'NOPERMS':
                    continue
                simple_acl.append(ace)

            return simple_acl

    @accepts(
        Str('path'),
        List(
            'dacl',
            items=[
                Dict(
                    'aclentry',
                    Str('tag', enum=['owner@', 'group@', 'everyone@', 'USER', 'GROUP']),
                    Int('id', null=True),
                    Str('type', enum=['ALLOW', 'DENY']),
                    Dict(
                        'perms',
                        Bool('READ_DATA'),
                        Bool('WRITE_DATA'),
                        Bool('APPEND_DATA'),
                        Bool('READ_NAMED_ATTRS'),
                        Bool('WRITE_NAMED_ATTRS'),
                        Bool('EXECUTE'),
                        Bool('DELETE_CHILD'),
                        Bool('READ_ATTRIBUTES'),
                        Bool('WRITE_ATTRIBUTES'),
                        Bool('DELETE'),
                        Bool('READ_ACL'),
                        Bool('WRITE_ACL'),
                        Bool('WRITE_OWNER'),
                        Bool('SYNCHRONIZE'),
                        Str('BASIC', enum=['FULL_CONTROL', 'MODIFY', 'READ', 'OTHER']),
                    ),
                    Dict(
                        'flags',
                        Bool('FILE_INHERIT'),
                        Bool('DIRECTORY_INHERIT'),
                        Bool('NO_PROPAGATE_INHERIT'),
                        Bool('INHERIT_ONLY'),
                        Bool('INHERITED'),
                        Str('BASIC', enum=['INHERIT', 'NOINHERIT', 'OTHER']),
                    ),
                )
            ],
            default=[]
        ),
        Dict(
            'options',
            Bool('stripacl', default=False),
            Bool('recursive', default=False),
            Bool('traverse', default=False),
        )
    )
    @job(lock=lambda args: f'setacl:{args[0]}')
    def setacl(self, job, path, dacl, options):
        """
        Set ACL of a given path. Takes the following parameters:
        :path: realpath or relative path. We make a subsequent realpath call to resolve it.
        :dacl: Accept a "simplified" ACL here or a full ACL. If the simplified ACL
        contains ACE perms or flags that are "SPECIAL", then raise a validation error.
        :recursive: apply the ACL recursively
        :traverse: traverse filestem boundaries (ZFS datasets)
        :strip: convert ACL to trivial. ACL is trivial if it can be expressed as a file mode without
        losing any access rules.

        In all cases we replace USER_OBJ, GROUP_OBJ, and EVERYONE with owner@, group@, everyone@ for
        consistency with getfacl and setfacl. If one of aforementioned special tags is used, 'id' must
        be set to None.

        An inheriting empty everyone@ ACE is appended to non-trivial ACLs in order to enforce Windows
        expectations regarding permissions inheritance. This entry is removed from NT ACL returned
        to SMB clients when 'ixnas' samba VFS module is enabled.
        """
        if not os.path.exists(path):
            raise CallError('Path not found.', errno.ENOENT)

        if dacl and options['stripacl']:
            raise CallError('Setting ACL and stripping ACL are not permitted simultaneously.', errno.EINVAL)

        if options['stripacl']:
            a = acl.ACL(file=path)
            a.strip()
            a.apply(path)
        else:
            cleaned_acl = []
            lockace_is_present = False
            for entry in dacl:
                if entry['perms'].get('BASIC') == 'OTHER' or entry['flags'].get('BASIC') == 'OTHER':
                    raise CallError('Unable to apply simplified ACL due to OTHER entry. Use full ACL.', errno.EINVAL)
                ace = {
                    'tag': (acl.ACLWho(entry['tag'])).name,
                    'id': entry['id'],
                    'type': entry['type'],
                    'perms': self.__convert_to_adv_permset(entry['perms']['BASIC']) if 'BASIC' in entry['perms'] else entry['perms'],
                    'flags': self.__convert_to_adv_flagset(entry['flags']['BASIC']) if 'BASIC' in entry['perms'] else entry['flags'],
                }
                if ace['tag'] == 'EVERYONE' and self.__convert_to_basic_permset(ace['perms']) == 'NOPERMS':
                    lockace_is_present = True
                cleaned_acl.append(ace)
            if not lockace_is_present:
                locking_ace = {
                    'tag': 'EVERYONE',
                    'id': None,
                    'type': 'ALLOW',
                    'perms': self.__convert_to_adv_permset('NOPERMS'),
                    'flags': self.__convert_to_adv_flagset('INHERIT')
                }
                cleaned_acl.append(locking_ace)

            a = acl.ACL()
            a.__setstate__(cleaned_acl)
            a.apply(path)

        if not options['recursive']:
            self.logger.debug('exiting early on non-recursive task')
            return True

        winacl = subprocess.run([
            '/usr/local/bin/winacl',
            '-a', 'clone',
            f"{'-rx' if options['traverse'] else '-r'}",
            '-p', path], check=False
        )
        if winacl.returncode != 0:
            raise CallError(f"Failed to recursively apply ACL: {winacl.stderr.decode()}")

        return True
Ejemplo n.º 28
0
class FilesystemService(Service):
    @accepts(Str('path', required=True), Ref('query-filters'),
             Ref('query-options'))
    def listdir(self, path, filters=None, options=None):
        """
        Get the contents of a directory.

        Each entry of the list consists of:
          name(str): name of the file
          path(str): absolute path of the entry
          realpath(str): absolute real path of the entry (if SYMLINK)
          type(str): DIRECTORY | FILESYSTEM | SYMLINK | OTHER
          size(int): size of the entry
          mode(int): file mode/permission
          uid(int): user id of entry owner
          gid(int): group id of entry onwer
          acl(bool): extended ACL is present on file
        """
        if not os.path.exists(path):
            raise CallError(f'Directory {path} does not exist', errno.ENOENT)

        if not os.path.isdir(path):
            raise CallError(f'Path {path} is not a directory', errno.ENOTDIR)

        rv = []
        for entry in os.scandir(path):
            if entry.is_symlink():
                etype = 'SYMLINK'
            elif entry.is_dir():
                etype = 'DIRECTORY'
            elif entry.is_file():
                etype = 'FILE'
            else:
                etype = 'OTHER'

            data = {
                'name':
                entry.name,
                'path':
                entry.path,
                'realpath':
                os.path.realpath(entry.path)
                if etype == 'SYMLINK' else entry.path,
                'type':
                etype,
            }
            try:
                stat = entry.stat()
                data.update({
                    'size':
                    stat.st_size,
                    'mode':
                    stat.st_mode,
                    'acl':
                    False if self.acl_is_trivial(data["realpath"]) else True,
                    'uid':
                    stat.st_uid,
                    'gid':
                    stat.st_gid,
                })
            except FileNotFoundError:
                data.update({
                    'size': None,
                    'mode': None,
                    'acl': None,
                    'uid': None,
                    'gid': None
                })
            rv.append(data)
        return filter_list(rv, filters=filters or [], options=options or {})

    @accepts(Str('path'))
    def stat(self, path):
        """
        Return the filesystem stat(2) for a given `path`.
        """
        try:
            stat = os.stat(path, follow_symlinks=False)
        except FileNotFoundError:
            raise CallError(f'Path {path} not found', errno.ENOENT)

        stat = {
            'size': stat.st_size,
            'mode': stat.st_mode,
            'uid': stat.st_uid,
            'gid': stat.st_gid,
            'atime': stat.st_atime,
            'mtime': stat.st_mtime,
            'ctime': stat.st_ctime,
            'dev': stat.st_dev,
            'inode': stat.st_ino,
            'nlink': stat.st_nlink,
        }

        try:
            stat['user'] = pwd.getpwuid(stat['uid']).pw_name
        except KeyError:
            stat['user'] = None

        try:
            stat['group'] = grp.getgrgid(stat['gid']).gr_name
        except KeyError:
            stat['group'] = None

        stat['acl'] = False if self.middleware.call_sync(
            'filesystem.acl_is_trivial', path) else True

        return stat

    @private
    @accepts(
        Str('path'),
        Str('content', max_length=2048000),
        Dict(
            'options',
            Bool('append', default=False),
            Int('mode'),
        ),
    )
    def file_receive(self, path, content, options=None):
        """
        Simplified file receiving method for small files.

        `content` must be a base 64 encoded file content.
        """
        options = options or {}
        dirname = os.path.dirname(path)
        if not os.path.exists(dirname):
            os.makedirs(dirname)
        if options.get('append'):
            openmode = 'ab'
        else:
            openmode = 'wb+'
        with open(path, openmode) as f:
            f.write(binascii.a2b_base64(content))
        mode = options.get('mode')
        if mode:
            os.chmod(path, mode)
        return True

    @private
    @accepts(
        Str('path'),
        Dict(
            'options',
            Int('offset'),
            Int('maxlen'),
        ),
    )
    def file_get_contents(self, path, options=None):
        """
        Get contents of a file `path` in base64 encode.

        DISCLAIMER: DO NOT USE THIS FOR BIG FILES (> 500KB).
        """
        options = options or {}
        if not os.path.exists(path):
            return None
        with open(path, 'rb') as f:
            if options.get('offset'):
                f.seek(options['offset'])
            data = binascii.b2a_base64(f.read(
                options.get('maxlen'))).decode().strip()
        return data

    @accepts(Str('path'))
    @job(pipes=["output"])
    async def get(self, job, path):
        """
        Job to get contents of `path`.
        """

        if not os.path.isfile(path):
            raise CallError(f'{path} is not a file')

        with open(path, 'rb') as f:
            await self.middleware.run_in_thread(shutil.copyfileobj, f,
                                                job.pipes.output.w)

    @accepts(
        Str('path'),
        Dict(
            'options',
            Bool('append', default=False),
            Int('mode'),
        ),
    )
    @job(pipes=["input"])
    async def put(self, job, path, options=None):
        """
        Job to put contents to `path`.
        """
        options = options or {}
        dirname = os.path.dirname(path)
        if not os.path.exists(dirname):
            os.makedirs(dirname)
        if options.get('append'):
            openmode = 'ab'
        else:
            openmode = 'wb+'

        with open(path, openmode) as f:
            await self.middleware.run_in_thread(shutil.copyfileobj,
                                                job.pipes.input.r, f)

        mode = options.get('mode')
        if mode:
            os.chmod(path, mode)
        return True

    @accepts(Str('path'))
    def statfs(self, path):
        """
        Return stats from the filesystem of a given path.

        Raises:
            CallError(ENOENT) - Path not found
        """
        try:
            statfs = bsd.statfs(path)
        except FileNotFoundError:
            raise CallError('Path not found.', errno.ENOENT)
        return {
            **statfs.__getstate__(),
            'total_bytes': statfs.total_blocks * statfs.blocksize,
            'free_bytes': statfs.free_blocks * statfs.blocksize,
            'avail_bytes': statfs.avail_blocks * statfs.blocksize,
        }

    def __convert_to_basic_permset(self, permset):
        """
        Convert "advanced" ACL permset format to basic format using
        bitwise operation and constants defined in py-bsd/bsd/acl.pyx,
        py-bsd/defs.pxd and acl.h.

        If the advanced ACL can't be converted without losing
        information, we return 'OTHER'.

        Reverse process converts the constant's value to a dictionary
        using a bitwise operation.
        """
        perm = 0
        for k, v, in permset.items():
            if v:
                perm |= acl.NFS4Perm[k]

        try:
            SimplePerm = (acl.NFS4BasicPermset(perm)).name
        except Exception:
            SimplePerm = 'OTHER'

        return SimplePerm

    def __convert_to_basic_flagset(self, flagset):
        flags = 0
        for k, v, in flagset.items():
            if k == "INHERITED":
                continue
            if v:
                flags |= acl.NFS4Flag[k]

        try:
            SimpleFlag = (acl.NFS4BasicFlagset(flags)).name
        except Exception:
            SimpleFlag = 'OTHER'

        return SimpleFlag

    def __convert_to_adv_permset(self, basic_perm):
        permset = {}
        perm_mask = acl.NFS4BasicPermset[basic_perm].value
        for name, member in acl.NFS4Perm.__members__.items():
            if perm_mask & member.value:
                permset.update({name: True})
            else:
                permset.update({name: False})

        return permset

    def __convert_to_adv_flagset(self, basic_flag):
        flagset = {}
        flag_mask = acl.NFS4BasicFlagset[basic_flag].value
        for name, member in acl.NFS4Flag.__members__.items():
            if flag_mask & member.value:
                flagset.update({name: True})
            else:
                flagset.update({name: False})

        return flagset

    def _winacl(self, path, action, uid, gid, options):
        chroot_dir = os.path.dirname(path)
        target = os.path.basename(path)
        winacl = subprocess.run([
            '/usr/local/bin/winacl', '-a', action, '-O',
            str(uid), '-G',
            str(gid), '-rx' if options['traverse'] else '-r', '-c', chroot_dir,
            '-p', target
        ],
                                check=False,
                                capture_output=True)
        if winacl.returncode != 0:
            CallError(
                f"Winacl {action} on path {path} failed with error: [{winacl.stderr.decode().strip()}]"
            )

    def _common_perm_path_validate(self, path):
        if not os.path.exists(path):
            raise CallError(f"Path not found: {path}", errno.ENOENT)

        if not os.path.realpath(path).startswith('/mnt/'):
            raise CallError(
                f"Changing permissions on paths outside of /mnt is not permitted: {path}",
                errno.EPERM)

        if os.path.realpath(path) in [
                x['path'] for x in self.middleware.call_sync('pool.query')
        ]:
            raise CallError(
                f"Changing permissions of root level dataset is not permitted: {path}",
                errno.EPERM)

    @accepts(Str('path'))
    def acl_is_trivial(self, path):
        """
        Returns True if the ACL can be fully expressed as a file mode without losing
        any access rules, or if the path does not support NFSv4 ACLs (for example
        a path on a tmpfs filesystem).
        """
        if not os.path.exists(path):
            raise CallError('Path not found.', errno.ENOENT)

        has_nfs4_acl_support = os.pathconf(path, 64)
        if not has_nfs4_acl_support:
            return True

        return acl.ACL(file=path).is_trivial

    @accepts(
        Dict(
            'filesystem_ownership', Str('path', required=True),
            Int('uid', null=True, default=None),
            Int('gid', null=True, default=None),
            Dict('options', Bool('recursive', default=False),
                 Bool('traverse', default=False))))
    @job(lock="perm_change")
    def chown(self, job, data):
        """
        Change owner or group of file at `path`.

        `uid` and `gid` specify new owner of the file. If either
        key is absent or None, then existing value on the file is not
        changed.

        `recursive` performs action recursively, but does
        not traverse filesystem mount points.

        If `traverse` and `recursive` are specified, then the chown
        operation will traverse filesystem mount points.
        """
        job.set_progress(0, 'Preparing to change owner.')

        self._common_perm_path_validate(data['path'])

        uid = -1 if data['uid'] is None else data['uid']
        gid = -1 if data['gid'] is None else data['gid']
        options = data['options']

        if not options['recursive']:
            job.set_progress(100, 'Finished changing owner.')
            os.chown(data['path'], uid, gid)
        else:
            job.set_progress(10,
                             f'Recursively changing owner of {data["path"]}.')
            self._winacl(data['path'], 'chown', uid, gid, options)
            job.set_progress(100, 'Finished changing owner.')

    @accepts(
        Dict(
            'filesystem_permission', Str('path', required=True),
            UnixPerm('mode', null=True), Int('uid', null=True, default=None),
            Int('gid', null=True, default=None),
            Dict(
                'options',
                Bool('stripacl', default=False),
                Bool('recursive', default=False),
                Bool('traverse', default=False),
            )))
    @job(lock="perm_change")
    def setperm(self, job, data):
        """
        Remove extended ACL from specified path.

        If `mode` is specified then the mode will be applied to the
        path and files and subdirectories depending on which `options` are
        selected. Mode should be formatted as string representation of octal
        permissions bits.

        `uid` the desired UID of the file user. If set to None (the default), then user is not changed.

        `gid` the desired GID of the file group. If set to None (the default), then group is not changed.

        `stripacl` setperm will fail if an extended ACL is present on `path`,
        unless `stripacl` is set to True.

        `recursive` remove ACLs recursively, but do not traverse dataset
        boundaries.

        `traverse` remove ACLs from child datasets.

        If no `mode` is set, and `stripacl` is True, then non-trivial ACLs
        will be converted to trivial ACLs. An ACL is trivial if it can be
        expressed as a file mode without losing any access rules.

        """
        job.set_progress(0, 'Preparing to set permissions.')
        options = data['options']
        mode = data.get('mode', None)

        uid = -1 if data['uid'] is None else data['uid']
        gid = -1 if data['gid'] is None else data['gid']

        self._common_perm_path_validate(data['path'])

        acl_is_trivial = self.middleware.call_sync('filesystem.acl_is_trivial',
                                                   data['path'])
        if not acl_is_trivial and not options['stripacl']:
            raise CallError(
                f'Non-trivial ACL present on [{data["path"]}]. Option "stripacl" required to change permission.',
                errno.EINVAL)

        if mode is not None:
            mode = int(mode, 8)

        a = acl.ACL(file=data['path'])
        a.strip()
        a.apply(data['path'])

        if mode:
            os.chmod(data['path'], mode)

        if uid or gid:
            os.chown(data['path'], uid, gid)

        if not options['recursive']:
            job.set_progress(100, 'Finished setting permissions.')
            return

        action = 'clone' if mode else 'strip'
        job.set_progress(
            10, f'Recursively setting permissions on {data["path"]}.')
        self._winacl(data['path'], action, uid, gid, options)
        job.set_progress(100, 'Finished setting permissions.')

    @accepts()
    async def default_acl_choices(self):
        """
        Get list of default ACL types.
        """
        acl_choices = []
        for x in ACLDefault:
            if x.value['visible']:
                acl_choices.append(x.name)

        return acl_choices

    @accepts(Str('acl_type', default='OPEN',
                 enum=[x.name for x in ACLDefault]))
    async def get_default_acl(self, acl_type):
        """
        Returns a default ACL depending on the usage specified by `acl_type`.
        If an admin group is defined, then an entry granting it full control will
        be placed at the top of the ACL.
        """
        acl = []
        admin_group = (await self.middleware.call('smb.config'))['admin_group']
        if acl_type == 'HOME' and (await self.middleware.call(
                'activedirectory.get_state')) == 'HEALTHY':
            acl_type = 'DOMAIN_HOME'
        if admin_group:
            acl.append({
                'tag':
                'GROUP',
                'id': (await self.middleware.call('dscache.get_uncached_group',
                                                  admin_group))['gr_gid'],
                'perms': {
                    'BASIC': 'FULL_CONTROL'
                },
                'flags': {
                    'BASIC': 'INHERIT'
                },
                'type':
                'ALLOW'
            })
        acl.extend((ACLDefault[acl_type].value)['acl'])

        return acl

    def _is_inheritable(self, flags):
        """
        Takes ACE flags and return True if any inheritance bits are set.
        """
        inheritance_flags = [
            'FILE_INHERIT', 'DIRECTORY_INHERIT', 'NO_PROPAGATE_INHERIT',
            'INHERIT_ONLY'
        ]
        for i in inheritance_flags:
            if flags.get(i):
                return True

        return False

    @private
    def canonicalize_acl_order(self, acl):
        """
        Convert flags to advanced, then separate the ACL into two lists. One for ACEs that have been inherited,
        one for aces that have not been inherited. Non-inherited ACEs take precedence
        and so they are placed first in finalized combined list. Within each list, the
        ACEs are orderd according to the following:

        1) Deny ACEs that apply to the object itself (NOINHERIT)

        2) Deny ACEs that apply to a subobject of the object (INHERIT)

        3) Allow ACEs that apply to the object itself (NOINHERIT)

        4) Allow ACEs that apply to a subobject of the object (INHERIT)

        See http://docs.microsoft.com/en-us/windows/desktop/secauthz/order-of-aces-in-a-dacl

        The "INHERITED" bit is stripped in filesystem.getacl when generating a BASIC flag type.
        It is best practice to use a non-simplified ACL for canonicalization.
        """
        inherited_aces = []
        final_acl = []
        non_inherited_aces = []
        for entry in acl:
            entry['flags'] = self.__convert_to_adv_flagset(
                entry['flags']
                ['BASIC']) if 'BASIC' in entry['flags'] else entry['flags']
            if entry['flags'].get('INHERITED'):
                inherited_aces.append(entry)
            else:
                non_inherited_aces.append(entry)

        if inherited_aces:
            inherited_aces = sorted(
                inherited_aces,
                key=lambda x:
                (x['type'] == 'ALLOW', self._is_inheritable(x['flags'])),
            )
        if non_inherited_aces:
            non_inherited_aces = sorted(
                non_inherited_aces,
                key=lambda x:
                (x['type'] == 'ALLOW', self._is_inheritable(x['flags'])),
            )
        final_acl = non_inherited_aces + inherited_aces
        return final_acl

    @accepts(
        Str('path'),
        Bool('simplified', default=True),
    )
    def getacl(self, path, simplified=True):
        """
        Return ACL of a given path.

        Simplified returns a shortened form of the ACL permset and flags

        `TRAVERSE` sufficient rights to traverse a directory, but not read contents.

        `READ` sufficient rights to traverse a directory, and read file contents.

        `MODIFIY` sufficient rights to traverse, read, write, and modify a file. Equivalent to modify_set.

        `FULL_CONTROL` all permissions.

        If the permisssions do not fit within one of the pre-defined simplified permissions types, then
        the full ACL entry will be returned.

        In all cases we replace USER_OBJ, GROUP_OBJ, and EVERYONE with owner@, group@, everyone@ for
        consistency with getfacl and setfacl. If one of aforementioned special tags is used, 'id' must
        be set to None.

        An inheriting empty everyone@ ACE is appended to non-trivial ACLs in order to enforce Windows
        expectations regarding permissions inheritance. This entry is removed from NT ACL returned
        to SMB clients when 'ixnas' samba VFS module is enabled. We also remove it here to avoid confusion.
        """
        if not os.path.exists(path):
            raise CallError('Path not found.', errno.ENOENT)

        stat = os.stat(path)

        a = acl.ACL(file=path)
        fs_acl = a.__getstate__()

        if not simplified:
            advanced_acl = []
            for entry in fs_acl:
                ace = {
                    'tag': (acl.ACLWho[entry['tag']]).value,
                    'id': entry['id'],
                    'type': entry['type'],
                    'perms': entry['perms'],
                    'flags': entry['flags'],
                }
                if ace['tag'] == 'everyone@' and self.__convert_to_basic_permset(
                        ace['perms']) == 'NOPERMS':
                    continue

                advanced_acl.append(ace)

            return {
                'uid': stat.st_uid,
                'gid': stat.st_gid,
                'acl': advanced_acl
            }

        if simplified:
            simple_acl = []
            for entry in fs_acl:
                ace = {
                    'tag': (acl.ACLWho[entry['tag']]).value,
                    'id': entry['id'],
                    'type': entry['type'],
                    'perms': {
                        'BASIC':
                        self.__convert_to_basic_permset(entry['perms'])
                    },
                    'flags': {
                        'BASIC':
                        self.__convert_to_basic_flagset(entry['flags'])
                    },
                }
                if ace['tag'] == 'everyone@' and ace['perms'][
                        'BASIC'] == 'NOPERMS':
                    continue

                for key in ['perms', 'flags']:
                    if ace[key]['BASIC'] == 'OTHER':
                        ace[key] = entry[key]

                simple_acl.append(ace)

            return {'uid': stat.st_uid, 'gid': stat.st_gid, 'acl': simple_acl}

    @accepts(
        Dict(
            'filesystem_acl', Str('path', required=True),
            Int('uid', null=True, default=None),
            Int('gid', null=True, default=None),
            List('dacl',
                 items=[
                     Dict(
                         'aclentry',
                         Str('tag',
                             enum=[
                                 'owner@', 'group@', 'everyone@', 'USER',
                                 'GROUP'
                             ]),
                         Int('id', null=True),
                         Str('type', enum=['ALLOW', 'DENY']),
                         Dict(
                             'perms',
                             Bool('READ_DATA'),
                             Bool('WRITE_DATA'),
                             Bool('APPEND_DATA'),
                             Bool('READ_NAMED_ATTRS'),
                             Bool('WRITE_NAMED_ATTRS'),
                             Bool('EXECUTE'),
                             Bool('DELETE_CHILD'),
                             Bool('READ_ATTRIBUTES'),
                             Bool('WRITE_ATTRIBUTES'),
                             Bool('DELETE'),
                             Bool('READ_ACL'),
                             Bool('WRITE_ACL'),
                             Bool('WRITE_OWNER'),
                             Bool('SYNCHRONIZE'),
                             Str('BASIC',
                                 enum=[
                                     'FULL_CONTROL', 'MODIFY', 'READ',
                                     'TRAVERSE'
                                 ]),
                         ),
                         Dict(
                             'flags',
                             Bool('FILE_INHERIT'),
                             Bool('DIRECTORY_INHERIT'),
                             Bool('NO_PROPAGATE_INHERIT'),
                             Bool('INHERIT_ONLY'),
                             Bool('INHERITED'),
                             Str('BASIC', enum=['INHERIT', 'NOINHERIT']),
                         ),
                     )
                 ],
                 default=[]),
            Dict('options', Bool('stripacl', default=False),
                 Bool('recursive', default=False),
                 Bool('traverse', default=False),
                 Bool('canonicalize', default=True))))
    @job(lock="perm_change")
    def setacl(self, job, data):
        """
        Set ACL of a given path. Takes the following parameters:
        `path` full path to directory or file.

        `dacl` "simplified" ACL here or a full ACL.

        `uid` the desired UID of the file user. If set to None (the default), then user is not changed.

        `gid` the desired GID of the file group. If set to None (the default), then group is not changed.

        `recursive` apply the ACL recursively

        `traverse` traverse filestem boundaries (ZFS datasets)

        `strip` convert ACL to trivial. ACL is trivial if it can be expressed as a file mode without
        losing any access rules.

        `canonicalize` reorder ACL entries so that they are in concanical form as described
        in the Microsoft documentation MS-DTYP 2.4.5 (ACL)

        In all cases we replace USER_OBJ, GROUP_OBJ, and EVERYONE with owner@, group@, everyone@ for
        consistency with getfacl and setfacl. If one of aforementioned special tags is used, 'id' must
        be set to None.

        An inheriting empty everyone@ ACE is appended to non-trivial ACLs in order to enforce Windows
        expectations regarding permissions inheritance. This entry is removed from NT ACL returned
        to SMB clients when 'ixnas' samba VFS module is enabled.
        """
        job.set_progress(0, 'Preparing to set acl.')
        options = data['options']
        dacl = data.get('dacl', [])

        self._common_perm_path_validate(data['path'])

        if dacl and options['stripacl']:
            raise CallError(
                'Setting ACL and stripping ACL are not permitted simultaneously.',
                errno.EINVAL)

        uid = -1 if data.get('uid', None) is None else data['uid']
        gid = -1 if data.get('gid', None) is None else data['gid']
        if options['stripacl']:
            a = acl.ACL(file=data['path'])
            a.strip()
            a.apply(data['path'])
        else:
            cleaned_acl = []
            lockace_is_present = False
            for entry in dacl:
                ace = {
                    'tag': (acl.ACLWho(entry['tag'])).name,
                    'id':
                    entry['id'],
                    'type':
                    entry['type'],
                    'perms':
                    self.__convert_to_adv_permset(entry['perms']['BASIC'])
                    if 'BASIC' in entry['perms'] else entry['perms'],
                    'flags':
                    self.__convert_to_adv_flagset(entry['flags']['BASIC'])
                    if 'BASIC' in entry['flags'] else entry['flags'],
                }
                if ace['flags']['INHERIT_ONLY'] and not ace['flags'].get(
                        'DIRECTORY_INHERIT', False) and not ace['flags'].get(
                            'FILE_INHERIT', False):
                    raise CallError(
                        'Invalid flag combination. DIRECTORY_INHERIT or FILE_INHERIT must be set if INHERIT_ONLY is set.',
                        errno.EINVAL)
                if ace['tag'] == 'EVERYONE' and self.__convert_to_basic_permset(
                        ace['perms']) == 'NOPERMS':
                    lockace_is_present = True
                cleaned_acl.append(ace)
            if options['canonicalize']:
                cleaned_acl = self.canonicalize_acl_order(cleaned_acl)

            if not lockace_is_present:
                locking_ace = {
                    'tag': 'EVERYONE',
                    'id': None,
                    'type': 'ALLOW',
                    'perms': self.__convert_to_adv_permset('NOPERMS'),
                    'flags': self.__convert_to_adv_flagset('INHERIT')
                }
                cleaned_acl.append(locking_ace)

            a = acl.ACL()
            a.__setstate__(cleaned_acl)
            a.apply(data['path'])

        if not options['recursive']:
            os.chown(data['path'], uid, gid)
            job.set_progress(100, 'Finished setting ACL.')
            return

        job.set_progress(10, f'Recursively setting ACL on {data["path"]}.')
        self._winacl(data['path'], 'clone', uid, gid, options)
        job.set_progress(100, 'Finished setting ACL.')
Ejemplo n.º 29
0
class KerberosRealmService(CRUDService):
    class Config:
        datastore = 'directoryservice.kerberosrealm'
        datastore_prefix = 'krb_'
        datastore_extend = 'kerberos.realm.kerberos_extend'
        namespace = 'kerberos.realm'
        cli_namespace = 'directory_service.kerberos.realm'

    @private
    async def kerberos_extend(self, data):
        for param in ['kdc', 'admin_server', 'kpasswd_server']:
            data[param] = data[param].split(' ') if data[param] else []

        return data

    @private
    async def kerberos_compress(self, data):
        for param in ['kdc', 'admin_server', 'kpasswd_server']:
            data[param] = ' '.join(data[param])

        return data

    @accepts(
        Dict('kerberos_realm_create',
             Str('realm', required=True),
             List('kdc'),
             List('admin_server'),
             List('kpasswd_server'),
             register=True))
    async def do_create(self, data):
        """
        Create a new kerberos realm. This will be automatically populated during the
        domain join process in an Active Directory environment. Kerberos realm names
        are case-sensitive, but convention is to only use upper-case.

        Entries for kdc, admin_server, and kpasswd_server are not required.
        If they are unpopulated, then kerberos will use DNS srv records to
        discover the correct servers. The option to hard-code them is provided
        due to AD site discovery. Kerberos has no concept of Active Directory
        sites. This means that middleware performs the site discovery and
        sets the kerberos configuration based on the AD site.
        """
        verrors = ValidationErrors()

        verrors.add_child('kerberos_realm_create', await self._validate(data))

        if verrors:
            raise verrors

        data = await self.kerberos_compress(data)
        data["id"] = await self.middleware.call(
            "datastore.insert",
            self._config.datastore,
            data,
            {"prefix": self._config.datastore_prefix},
        )
        await self.middleware.call('etc.generate', 'kerberos')
        await self.middleware.call('service.restart', 'cron')
        return await self._get_instance(data['id'])

    @accepts(Int('id', required=True),
             Patch("kerberos_realm_create", "kerberos_realm_update",
                   ("attr", {
                       "update": True
                   })))
    async def do_update(self, id, data):
        """
        Update a kerberos realm by id. This will be automatically populated during the
        domain join process in an Active Directory environment. Kerberos realm names
        are case-sensitive, but convention is to only use upper-case.
        """
        old = await self._get_instance(id)
        new = old.copy()
        new.update(data)

        data = await self.kerberos_compress(new)
        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})

        await self.middleware.call('etc.generate', 'kerberos')
        return await self._get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete a kerberos realm by ID.
        """
        await self.middleware.call("datastore.delete", self._config.datastore,
                                   id)
        await self.middleware.call('etc.generate', 'kerberos')

    @private
    async def _validate(self, data):
        verrors = ValidationErrors()
        realms = await self.query()
        for realm in realms:
            if realm['realm'].upper() == data['realm'].upper():
                verrors.add(
                    f'kerberos_realm',
                    f'kerberos realm with name {realm["realm"]} already exists.'
                )
        return verrors
Ejemplo n.º 30
0
class RsyncTaskService(TaskPathService):

    share_task_type = 'Rsync'

    class Config:
        datastore = 'tasks.rsync'
        datastore_prefix = 'rsync_'
        datastore_extend = 'rsynctask.rsync_task_extend'
        datastore_extend_context = 'rsynctask.rsync_task_extend_context'
        cli_namespace = 'task.rsync'

    ENTRY = Patch(
        'rsync_task_create',
        'rsync_task_entry',
        ('rm', {
            'name': 'validate_rpath'
        }),
        ('add', Int('id')),
        ('add', Bool('locked')),
        ('add', Dict('job', null=True, additional_attrs=True)),
    )

    @private
    async def rsync_task_extend(self, data, context):
        try:
            data['extra'] = shlex.split(data['extra'].replace('"',
                                                              r'"\"').replace(
                                                                  "'", r'"\"'))
        except ValueError:
            # This is to handle the case where the extra value is misconfigured for old cases
            # Moving on, we are going to verify that it can be split successfully using shlex
            data['extra'] = data['extra'].split()

        for field in ('mode', 'direction'):
            data[field] = data[field].upper()
        Cron.convert_db_format_to_schedule(data)
        data['job'] = context['jobs'].get(data['id'])
        return data

    @private
    async def rsync_task_extend_context(self, rows, extra):
        jobs = {}
        for j in await self.middleware.call("core.get_jobs",
                                            [("method", "=", "rsynctask.run")],
                                            {"order_by": ["id"]}):
            try:
                task_id = int(j["arguments"][0])
            except (IndexError, ValueError):
                continue

            if task_id in jobs and jobs[task_id]["state"] == "RUNNING":
                continue

            jobs[task_id] = j

        return {
            "jobs": jobs,
        }

    @private
    async def validate_rsync_task(self, data, schema):
        verrors = ValidationErrors()

        # Windows users can have spaces in their usernames
        # http://www.freebsd.org/cgi/query-pr.cgi?pr=164808

        username = data.get('user')
        if ' ' in username:
            verrors.add(f'{schema}.user', 'User names cannot have spaces')
            raise verrors

        user = None
        with contextlib.suppress(KeyError):
            user = await self.middleware.call('dscache.get_uncached_user',
                                              username)

        if not user:
            verrors.add(f'{schema}.user',
                        f'Provided user "{username}" does not exist')
            raise verrors

        await self.validate_path_field(data, schema, verrors)

        remote_host = data.get('remotehost')
        if not remote_host:
            verrors.add(f'{schema}.remotehost', 'Please specify a remote host')

        data['extra'] = ' '.join(data['extra'])
        try:
            shlex.split(data['extra'].replace('"',
                                              r'"\"').replace("'", r'"\"'))
        except ValueError as e:
            verrors.add(f'{schema}.extra', f'Please specify valid value: {e}')

        mode = data.get('mode')
        if not mode:
            verrors.add(f'{schema}.mode', 'This field is required')

        remote_module = data.get('remotemodule')
        if mode == 'MODULE' and not remote_module:
            verrors.add(f'{schema}.remotemodule', 'This field is required')

        if mode == 'SSH':
            remote_port = data.get('remoteport')
            if not remote_port:
                verrors.add(f'{schema}.remoteport', 'This field is required')

            remote_path = data.get('remotepath')
            if not remote_path:
                verrors.add(f'{schema}.remotepath', 'This field is required')

            search = os.path.join(user['pw_dir'], '.ssh', 'id_[edr]*')
            exclude_from_search = os.path.join(user['pw_dir'], '.ssh',
                                               'id_[edr]*pub')
            key_files = set(glob.glob(search)) - set(
                glob.glob(exclude_from_search))
            if not key_files:
                verrors.add(
                    f'{schema}.user',
                    'In order to use rsync over SSH you need a user'
                    ' with a private key (DSA/ECDSA/RSA) set up in home dir.')
            else:
                for file in glob.glob(search):
                    if '.pub' not in file:
                        # file holds a private key and it's permissions should be 600
                        if os.stat(file).st_mode & 0o077 != 0:
                            verrors.add(
                                f'{schema}.user',
                                f'Permissions {oct(os.stat(file).st_mode & 0o777)} for {file} are too open. Please '
                                f'correct them by running chmod 600 {file}')

            if (data['enabled'] and data['validate_rpath'] and remote_path
                    and remote_host and remote_port):
                if '@' in remote_host:
                    remote_username, remote_host = remote_host.rsplit('@', 1)
                else:
                    remote_username = username

                try:
                    async with await asyncio.wait_for(
                            asyncssh.connect(remote_host,
                                             port=remote_port,
                                             username=remote_username,
                                             client_keys=key_files,
                                             known_hosts=None),
                            timeout=5,
                    ) as conn:
                        await conn.run(f'test -d {shlex.quote(remote_path)}',
                                       check=True)
                except asyncio.TimeoutError:

                    verrors.add(
                        f'{schema}.remotehost',
                        'SSH timeout occurred. Remote path cannot be validated.'
                    )

                except OSError as e:

                    if e.errno == 113:
                        verrors.add(
                            f'{schema}.remotehost',
                            f'Connection to the remote host {remote_host} on port {remote_port} failed.'
                        )
                    else:
                        verrors.add(f'{schema}.remotehost', e.__str__())

                except asyncssh.DisconnectError as e:

                    verrors.add(
                        f'{schema}.remotehost',
                        f'Disconnect Error[ error code {e.code} ] was generated when trying to '
                        f'communicate with remote host {remote_host} and remote user {remote_username}.'
                    )

                except asyncssh.ProcessError as e:

                    if e.code == '1':
                        verrors.add(
                            f'{schema}.remotepath',
                            'The Remote Path you specified does not exist or is not a directory.'
                            'Either create one yourself on the remote machine or uncheck the '
                            'validate_rpath field')
                    else:
                        verrors.add(
                            f'{schema}.remotepath',
                            f'Connection to Remote Host was successful but failed to verify '
                            f'Remote Path. {e.__str__()}')

                except asyncssh.Error as e:

                    if e.__class__.__name__ in e.__str__():
                        exception_reason = e.__str__()
                    else:
                        exception_reason = e.__class__.__name__ + ' ' + e.__str__(
                        )
                    verrors.add(
                        f'{schema}.remotepath',
                        f'Remote Path could not be validated. An exception was raised. {exception_reason}'
                    )
            elif data['enabled'] and data['validate_rpath']:
                verrors.add(
                    f'{schema}.remotepath',
                    'Remote path could not be validated because of missing fields'
                )

        data.pop('validate_rpath', None)

        # Keeping compatibility with legacy UI
        for field in ('mode', 'direction'):
            data[field] = data[field].lower()

        return verrors, data

    @accepts(
        Dict(
            'rsync_task_create',
            Str('path', required=True, max_length=RSYNC_PATH_LIMIT),
            Str('user', required=True),
            Str('remotehost'),
            Int('remoteport'),
            Str('mode', enum=['MODULE', 'SSH'], default='MODULE'),
            Str('remotemodule'),
            Str('remotepath'),
            Bool('validate_rpath', default=True),
            Str('direction', enum=['PULL', 'PUSH'], default='PUSH'),
            Str('desc'),
            Cron(
                'schedule',
                defaults={'minute': '00'},
            ),
            Bool('recursive'),
            Bool('times'),
            Bool('compress'),
            Bool('archive'),
            Bool('delete'),
            Bool('quiet'),
            Bool('preserveperm'),
            Bool('preserveattr'),
            Bool('delayupdates'),
            List('extra', items=[Str('extra')]),
            Bool('enabled', default=True),
            register=True,
        ))
    async def do_create(self, data):
        """
        Create a Rsync Task.

        See the comment in Rsyncmod about `path` length limits.

        `remotehost` is ip address or hostname of the remote system. If username differs on the remote host,
        "username@remote_host" format should be used.

        `mode` represents different operating mechanisms for Rsync i.e Rsync Module mode / Rsync SSH mode.

        `remotemodule` is the name of remote module, this attribute should be specified when `mode` is set to MODULE.

        `remotepath` specifies the path on the remote system.

        `validate_rpath` is a boolean which when sets validates the existence of the remote path.

        `direction` specifies if data should be PULLED or PUSHED from the remote system.

        `compress` when set reduces the size of the data which is to be transmitted.

        `archive` when set makes rsync run recursively, preserving symlinks, permissions, modification times, group,
        and special files.

        `delete` when set deletes files in the destination directory which do not exist in the source directory.

        `preserveperm` when set preserves original file permissions.

        .. examples(websocket)::

          Create a Rsync Task which pulls data from a remote system every 5 minutes.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "rsynctask.create",
                "params": [{
                    "enabled": true,
                    "schedule": {
                        "minute": "5",
                        "hour": "*",
                        "dom": "*",
                        "month": "*",
                        "dow": "*"
                    },
                    "desc": "Test rsync task",
                    "user": "******",
                    "mode": "MODULE",
                    "remotehost": "[email protected]",
                    "compress": true,
                    "archive": true,
                    "direction": "PULL",
                    "path": "/mnt/vol1/rsync_dataset",
                    "remotemodule": "remote_module1"
                }]
            }
        """
        verrors, data = await self.validate_rsync_task(data,
                                                       'rsync_task_create')
        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(data)

        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})
        await self.middleware.call('service.restart', 'cron')

        return await self.get_instance(data['id'])

    @accepts(Int('id', validators=[Range(min=1)]),
             Patch('rsync_task_create', 'rsync_task_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        """
        Update Rsync Task of `id`.
        """
        old = await self.query(filters=[('id', '=', id)],
                               options={'get': True})
        old.pop('job')

        new = old.copy()
        new.update(data)

        verrors, data = await self.validate_rsync_task(new,
                                                       'rsync_task_update')
        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(new)
        new.pop(self.locked_field)

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})
        await self.middleware.call('service.restart', 'cron')

        return await self.get_instance(id)

    async def do_delete(self, id):
        """
        Delete Rsync Task of `id`.
        """
        res = await self.middleware.call('datastore.delete',
                                         self._config.datastore, id)
        await self.middleware.call('service.restart', 'cron')
        return res

    @private
    async def commandline(self, id):
        """
        Helper method to generate the rsync command avoiding code duplication.
        """
        rsync = await self.get_instance(id)
        path = shlex.quote(rsync['path'])

        line = ['rsync']
        for name, flag in (
            ('archive', '-a'),
            ('compress', '-zz'),
            ('delayupdates', '--delay-updates'),
            ('delete', '--delete-delay'),
            ('preserveattr', '-X'),
            ('preserveperm', '-p'),
            ('recursive', '-r'),
            ('times', '-t'),
        ):
            if rsync[name]:
                line.append(flag)
        if rsync['extra']:
            line.append(' '.join(rsync['extra']))

        # Do not use username if one is specified in host field
        # See #5096 for more details
        if '@' in rsync['remotehost']:
            remote = rsync['remotehost']
        else:
            remote = f'"{rsync["user"]}"@{rsync["remotehost"]}'

        if rsync['mode'] == 'MODULE':
            module_args = [path, f'{remote}::"{rsync["remotemodule"]}"']
            if rsync['direction'] != 'PUSH':
                module_args.reverse()
            line += module_args
        else:
            line += [
                '-e',
                f'"ssh -p {rsync["remoteport"]} -o BatchMode=yes -o StrictHostKeyChecking=yes"'
            ]
            path_args = [
                path, f'{remote}:"{shlex.quote(rsync["remotepath"])}"'
            ]
            if rsync['direction'] != 'PUSH':
                path_args.reverse()
            line += path_args

        if rsync['quiet']:
            line += ['>', '/dev/null', '2>&1']

        return ' '.join(line)

    @item_method
    @accepts(Int('id'))
    @returns()
    @job(lock=lambda args: args[-1], lock_queue_size=1, logs=True)
    def run(self, job, id):
        """
        Job to run rsync task of `id`.

        Output is saved to job log excerpt (not syslog).
        """
        self.middleware.call_sync('network.general.will_perform_activity',
                                  'rsync')

        rsync = self.middleware.call_sync('rsynctask.get_instance', id)
        if rsync['locked']:
            self.middleware.call_sync('rsynctask.generate_locked_alert', id)
            return

        commandline = self.middleware.call_sync('rsynctask.commandline', id)

        cp = run_command_with_user_context(commandline, rsync['user'],
                                           lambda v: job.logs_fd.write(v))

        for klass in ('RsyncSuccess',
                      'RsyncFailed') if not rsync['quiet'] else ():
            self.middleware.call_sync('alert.oneshot_delete', klass,
                                      rsync['id'])

        if cp.returncode not in RsyncReturnCode.nonfatals():
            err = None
            if cp.returncode == RsyncReturnCode.STREAMIO and rsync['compress']:
                err = (
                    "rsync command with compression enabled failed with STREAMIO error. "
                    "This may indicate that remote server lacks support for the new-style "
                    "compression used by TrueNAS.")

            if not rsync['quiet']:
                self.middleware.call_sync(
                    'alert.oneshot_create', 'RsyncFailed', {
                        'id': rsync['id'],
                        'direction': rsync['direction'],
                        'path': rsync['path'],
                    })

            if err:
                msg = f'{err} Check logs for further information'
            else:
                try:
                    rc_name = RsyncReturnCode(cp.returncode).name
                except ValueError:
                    rc_name = 'UNKNOWN'

                msg = (f'rsync command returned {cp.returncode} - {rc_name}. '
                       'Check logs for further information.')
            raise CallError(msg)

        elif not rsync['quiet']:
            self.middleware.call_sync(
                'alert.oneshot_create', 'RsyncSuccess', {
                    'id': rsync['id'],
                    'direction': rsync['direction'],
                    'path': rsync['path'],
                })