示例#1
0
class SNMPService(SystemServiceService):
    class Config:
        service = 'snmp'
        datastore_prefix = 'snmp_'

    @accepts(
        Dict('snmp_update',
             Str('location'),
             Str('contact',
                 validators=[Or(Email(), Match(r'^[-_a-zA-Z0-9\s]*$'))]),
             Bool('traps'),
             Bool('v3'),
             Str('community',
                 validators=[Match(r'^[-_.a-zA-Z0-9\s]*$')],
                 default='public'),
             Str('v3_username'),
             Str('v3_authtype', enum=['', 'MD5', 'SHA']),
             Str('v3_password'),
             Str('v3_privproto', enum=[None, 'AES', 'DES'], null=True),
             Str('v3_privpassphrase'),
             Int('loglevel', validators=[Range(min=0, max=7)]),
             Str('options'),
             update=True))
    async def do_update(self, data):
        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        if not new['v3'] and not new['community']:
            verrors.add('snmp_update.community',
                        'This field is required when SNMPv3 is disabled')

        if new['v3_authtype'] and not new['v3_password']:
            verrors.add(
                'snmp_update.v3_password',
                'This field is requires when SNMPv3 auth type is specified',
            )

        if new['v3_password'] and len(new['v3_password']) < 8:
            verrors.add('snmp_update.v3_password',
                        'Password must contain at least 8 characters')

        if new['v3_privproto'] and not new['v3_privpassphrase']:
            verrors.add(
                'snmp_update.v3_privpassphrase',
                'This field is requires when SNMPv3 private protocol is specified',
            )

        if verrors:
            raise verrors

        await self._update_service(old, new)

        return new
示例#2
0
class SNMPService(SystemServiceService):
    class Config:
        service = "snmp"
        datastore_prefix = "snmp_"

    @accepts(
        Dict(
            'snmp_update',
            Str('location'),
            Str('contact',
                validators=[Or(Email(), Match(r'^[-_a-zA-Z0-9\s]+$'))]),
            Bool('traps'),
            Bool('v3'),
            Str('community', validators=[Match(r'^[-_.a-zA-Z0-9\s]*$')]),
            Str('v3_username'),
            Str('v3_authtype', enum=['', 'MD5', 'SHA']),
            Str('v3_password'),
            Str('v3_privproto', enum=[None, 'AES', 'DES']),
            Str('v3_privpassphrase'),
            Int('loglevel', validators=[Range(min=0, max=7)]),
            Str('options'),
        ))
    async def update(self, data):
        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        if not data["v3"] and not data["community"]:
            verrors.add("snmp_update.community",
                        "This field is required when SNMPv3 is disabled")

        if data["v3_authtype"] and not data["v3_password"]:
            verrors.add(
                "snmp_update.v3_password",
                "This field is requires when SNMPv3 auth type is specified")

        if data["v3_password"] and len(data["v3_password"]) < 8:
            verrors.add("snmp_update.v3_password",
                        "Password must contain at least 8 characters")

        if data["v3_privproto"] and not data["v3_privpassphrase"]:
            verrors.add(
                "snmp_update.v3_privpassphrase",
                "This field is requires when SNMPv3 private protocol is specified"
            )

        if verrors:
            raise verrors

        await self._update_service(old, new)

        return new
示例#3
0
class TFTPService(SystemServiceService):
    class Config:
        service = "tftp"
        datastore_prefix = "tftp_"

    @accepts(
        Dict(
            'tftp_update',
            Dir('directory'),
            Bool('newfiles'),
            Int('port', validators=[Range(min=1, max=65535)]),
            Str('username'),
            Str('umask', validators=[Match(r"^[0-7]{3}$")]),
            Str('options'),
        ))
    async def update(self, data):
        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        if new["directory"]:
            await check_path_resides_within_volume(verrors, self.middleware,
                                                   "tftp_update.directory",
                                                   new["directory"])

        if verrors:
            raise verrors

        await self._update_service(old, new)

        return new
示例#4
0
class RsyncModService(CRUDService):
    class Config:
        datastore = 'services.rsyncmod'
        datastore_prefix = 'rsyncmod_'

    @accepts(
        Dict(
            'rsyncmod',
            Str('name', validators=[Match(r'[^/\]]')]),
            Str('comment'),
            Str('path'),
            Str('mode'),
            Int('maxconn'),
            Str('user'),
            Str('group'),
            List('hostsallow', items=[Str('hostsallow')]),
            List('hostsdeny', items=[Str('hostdeny')]),
            Str('auxiliary'),
            register=True,
        ))
    async def do_create(self, data):
        if data.get("hostsallow"):
            data["hostsallow"] = " ".join(data["hostsallow"])
        else:
            data["hostsallow"] = ''

        if data.get("hostsdeny"):
            data["hostsdeny"] = " ".join(data["hostsdeny"])
        else:
            data["hostsdeny"] = ''

        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})
        await self.middleware.call('service.reload', 'rsync')
        return data

    @accepts(Int('id'), Ref('rsyncmod'))
    async def do_update(self, id, data):
        module = await self.middleware.call(
            'datastore.query', self._config.datastore, [('id', '=', id)], {
                'prefix': self._config.datastore_prefix,
                'get': True
            })
        module.update(data)

        module["hostsallow"] = " ".join(module["hostsallow"])
        module["hostsdeny"] = " ".join(module["hostsdeny"])

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, data,
                                   {'prefix': self._config.datastore_prefix})
        await self.middleware.call('service.reload', 'rsync')

        return module

    @accepts(Int('id'))
    async def do_delete(self, id):
        return await self.middleware.call('datastore.delete',
                                          self._config.datastore, id)
示例#5
0
文件: tftp.py 项目: bmhughes/freenas
class TFTPService(SystemServiceService):
    class Config:
        service = "tftp"
        datastore_prefix = "tftp_"
        cli_namespace = "service.tftp"

    ENTRY = Dict(
        'tftp_entry',
        Bool('newfiles', required=True),
        Str('directory', required=True),
        Str('host', validators=[IpAddress()], required=True),
        Int('port', validators=[Port()], required=True),
        Str('options', required=True),
        Str('umask', required=True, validators=[Match(r'^[0-7]{3}$')]),
        Str('username', required=True),
        Int('id', required=True),
    )

    @accepts(
        Patch(
            'tftp_entry',
            'tftp_update',
            ('rm', {
                'name': 'id'
            }),
            ('replace', Dir('directory')),
            ('attr', {
                'update': True
            }),
        ))
    async def do_update(self, data):
        """
        Update TFTP Service Configuration.

        `newfiles` when set enables network devices to send files to the system.

        `username` sets the user account which will be used to access `directory`. It should be ensured `username`
        has access to `directory`.
        """
        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        if new["directory"]:
            await check_path_resides_within_volume(verrors, self.middleware,
                                                   "tftp_update.directory",
                                                   new["directory"])

        if verrors:
            raise verrors

        await self._update_service(old, new)

        return await self.config()
示例#6
0
def get_schema(variable_details, update, existing=NOT_PROVIDED):
    schema_details = variable_details['schema']
    schema_class = mapping[schema_details['type']]
    cur_val = existing.get(variable_details['variable'], NOT_PROVIDED) if isinstance(existing, dict) else NOT_PROVIDED

    # Validation is ensured at chart level to ensure that we don't have enum for say boolean
    obj_kwargs = {k: schema_details[k] for k in filter(
        lambda k: k in schema_details,
        ('required', 'default', 'private', 'ipv4', 'ipv6', 'cidr', 'null', 'additional_attrs', 'editable')
    )}
    if schema_details.get('immutable') and cur_val is not NOT_PROVIDED:
        obj_kwargs['default'] = cur_val
        obj_kwargs['editable'] = False

    if schema_class not in (Cron, Dict):
        obj = schema_class(variable_details['variable'], **obj_kwargs)
    else:
        obj = schema_class(
            variable_details['variable'],
            *list(chain.from_iterable(
                get_schema(var, update, cur_val or NOT_PROVIDED) for var in schema_details.get('attrs', [])
            )),
            update=update, **obj_kwargs
        )
        if schema_class == Dict:
            obj = update_conditional_defaults(obj, variable_details)

    result = []

    obj.ref = schema_details.get('$ref', [])

    if schema_class in (Str, Int):
        range_vars = ['min', 'max'] if schema_class == Int else ['min_length', 'max_length']
        range_args = {k: schema_details[v] for k, v in zip(['min', 'max'], range_vars) if schema_details.get(v)}
        if range_args:
            obj.validators.append(Range(**range_args))

        if 'enum' in schema_details:
            obj.enum = [v['value'] for v in schema_details['enum']]

        if schema_class == Str:
            if 'valid_chars' in schema_details:
                obj.validators.append(Match(schema_details['valid_chars']))

    if schema_class == List:
        obj.items = list(chain.from_iterable(get_schema(i, update) for i in schema_details['items']))
    elif 'subquestions' in schema_details:
        result.extend(list(chain.from_iterable(
            get_schema(i, update, existing) for i in schema_details['subquestions']
        )))

    result.insert(0, obj)
    return result
示例#7
0
class RAW(StorageDevice):

    TYPE = 'file'

    schema = Dict(
        'attributes',
        Str('path', required=True, validators=[Match(
            r'^[^{}]*$', explanation='Path should not contain "{", "}" characters'
        )]),
        Str('type', enum=['AHCI', 'VIRTIO'], default='AHCI'),
        Bool('exists'),
        Bool('boot', default=False),
        Int('size', default=None, null=True),
        Int('logical_sectorsize', enum=[None, 512, 4096], default=None, null=True),
        Int('physical_sectorsize', enum=[None, 512, 4096], default=None, null=True),
    )
示例#8
0
文件: cdrom.py 项目: yaplej/freenas
class CDROM(Device):

    schema = Dict(
        'attributes',
        Str('path',
            required=True,
            validators=[
                Match(
                    r'^[^{}]*$',
                    explanation='Path should not contain "{", "}" characters')
            ]),
    )

    def identity(self):
        return self.data['attributes']['path']

    def is_available(self):
        return os.path.exists(self.identity())

    def xml_linux(self, *args, **kwargs):
        disk_number = kwargs.pop('disk_number')
        return create_element(
            'disk',
            type='file',
            device='cdrom',
            attribute_dict={
                'children': [
                    create_element('driver', name='qemu', type='raw'),
                    create_element('source',
                                   file=self.data['attributes']['path']),
                    create_element('target',
                                   dev=f'sd{disk_from_number(disk_number)}',
                                   bus='sata'),
                    create_element('boot',
                                   order=str(kwargs.pop('boot_number'))),
                ]
            })
示例#9
0
class TunableService(CRUDService):
    class Config:
        datastore = 'system.tunable'
        datastore_prefix = 'tun_'
        datastore_extend = 'tunable.upper'

    def __init__(self, *args, **kwargs):
        super(TunableService, self).__init__(*args, **kwargs)
        self.__default_sysctl = {}

    @private
    async def default_sysctl_config(self):
        return self.__default_sysctl

    @private
    async def get_default_value(self, oid):
        return self.__default_sysctl[oid]

    @private
    async def set_default_value(self, oid, value):
        if oid not in self.__default_sysctl:
            self.__default_sysctl[oid] = value

    @accepts()
    async def tunable_type_choices(self):
        """
        Retrieve tunable type choices supported in the system
        """
        return {k: k for k in TUNABLE_TYPES}

    @accepts(Dict(
        'tunable_create',
        Str('var', validators=[Match(r'^[\w\.\-]+$')], required=True),
        Str('value', required=True),
        Str('type', enum=TUNABLE_TYPES, required=True),
        Str('comment'),
        Bool('enabled', default=True),
        register=True
    ))
    async def do_create(self, data):
        """
        Create a Tunable.

        `var` represents name of the sysctl/loader/rc variable.

        `type` for SCALE should be one of the following:
        1) SYSCTL     -     Configure `var` for sysctl(8)

        `type` for CORE/ENTERPRISE should be one of the following:
        1) LOADER     -     Configure `var` for loader(8)
        2) RC         -     Configure `var` for rc(8)
        3) SYSCTL     -     Configure `var` for sysctl(8)
        """
        await self.clean(data, 'tunable_create')
        await self.validate(data, 'tunable_create')
        await self.lower(data)

        data['id'] = await self.middleware.call(
            'datastore.insert',
            self._config.datastore,
            data,
            {'prefix': self._config.datastore_prefix}
        )

        await self.middleware.call('service.reload', data['type'])

        return await self._get_instance(data['id'])

    @accepts(
        Int('id'),
        Patch(
            'tunable_create',
            'tunable_update',
            ('attr', {'update': True})
        )
    )
    async def do_update(self, id, data):
        """
        Update Tunable of `id`.
        """
        old = await self.get_instance(id)

        new = old.copy()
        new.update(data)

        await self.clean(new, 'tunable_update', old=old)
        await self.validate(new, 'tunable_update')

        await self.lower(new)

        await self.middleware.call(
            'datastore.update',
            self._config.datastore,
            id,
            new,
            {'prefix': self._config.datastore_prefix}
        )

        if old['type'] == 'SYSCTL' and old['var'] in self.__default_sysctl and (
            old['var'] != new['var'] or old['type'] != new['type']
        ):
            default_value = self.__default_sysctl.pop(old['var'])
            cp = await run(['sysctl', f'{old["var"]}={default_value}'], check=False, encoding='utf8')
            if cp.returncode:
                self.middleware.logger.error(
                    'Failed to set sysctl %r -> %r : %s', old['var'], default_value, cp.stderr
                )

        await self.middleware.call('service.reload', new['type'])

        return await self.get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete Tunable of `id`.
        """
        tunable = await self.get_instance(id)
        await self.lower(tunable)
        if tunable['type'] == 'sysctl':
            # Restore the default value, if it is possible.
            value_default = self.__default_sysctl.pop(tunable['var'], None)
            if value_default:
                cp = await run(['sysctl', f'{tunable["var"]}={value_default}'], check=False, encoding='utf8')
                if cp.returncode:
                    self.middleware.logger.error(
                        'Failed to set sysctl %r -> %r : %s', tunable['var'], value_default, cp.stderr
                    )

        response = await self.middleware.call(
            'datastore.delete',
            self._config.datastore,
            id
        )

        await self.middleware.call('service.reload', tunable['type'].lower())

        return response

    @private
    async def lower(self, data):
        data['type'] = data['type'].lower()

        return data

    @private
    async def upper(self, data):
        data['type'] = data['type'].upper()

        return data

    @private
    async def clean(self, tunable, schema_name, old=None):
        verrors = ValidationErrors()
        skip_dupe = False
        tun_comment = tunable.get('comment')
        tun_value = tunable['value']
        tun_var = tunable['var']

        if tun_comment is not None:
            tunable['comment'] = tun_comment.strip()

        if '"' in tun_value or "'" in tun_value:
            verrors.add(f"{schema_name}.value",
                        'Quotes in value are not allowed')

        if schema_name == 'tunable_update' and old:
            old_tun_var = old['var']

            if old_tun_var == tun_var:
                # They aren't trying to change to a new name, just updating
                skip_dupe = True

        if not skip_dupe:
            tun_vars = await self.middleware.call(
                'datastore.query', self._config.datastore, [('tun_var', '=',
                                                             tun_var)])

            if tun_vars:
                verrors.add(f"{schema_name}.value",
                            'This variable already exists')

        if verrors:
            raise verrors

        return tunable

    @private
    async def validate(self, tunable, schema_name):
        sysctl_re = \
            re.compile('[a-z][a-z0-9_]+\.([a-z0-9_]+\.)*[a-z0-9_]+', re.I)

        loader_re = \
            re.compile('[a-z][a-z0-9_]+\.*([a-z0-9_]+\.)*[a-z0-9_]+', re.I)

        verrors = ValidationErrors()
        tun_var = tunable['var'].lower()
        tun_type = tunable['type'].lower()

        if tun_type == 'loader' or tun_type == 'rc':
            err_msg = "Value can start with a letter and end with an alphanumeric. Aphanumeric and underscore" \
                      " characters are allowed"
        else:
            err_msg = 'Value can start with a letter and end with an alphanumeric. A period (.) once is a must.' \
                      ' Alphanumeric and underscore characters are allowed'

        if (
            tun_type in ('loader', 'rc') and
            not loader_re.match(tun_var)
        ) or (
            tun_type == 'sysctl' and
            not sysctl_re.match(tun_var)
        ):
            verrors.add(f"{schema_name}.var", err_msg)

        if verrors:
            raise verrors
示例#10
0
class ChartReleaseService(CRUDService):
    class Config:
        datastore_primary_key_type = 'string'
        namespace = 'chart.release'
        cli_namespace = 'app.chart_release'

    ENTRY = Dict(
        'chart_release_entry',
        Str('name', required=True),
        Dict('info', additional_attrs=True),
        Dict('config', additional_attrs=True),
        List('hooks'),
        Int('version', required=True, description='Version of chart release'),
        Str('namespace', required=True),
        Dict(
            'chart_metadata',
            Str('name', required=True, description='Name of application'),
            Str('version', required=True,
                description='Version of application'),
            Str('latest_chart_version',
                required=True,
                description='Latest available version of application'),
            additional_attrs=True,
        ),
        Str('id', required=True),
        Str('catalog', required=True),
        Str('catalog_train', required=True),
        Str('path', required=True),
        Str('dataset', required=True),
        Str('status', required=True),
        List('used_ports',
             items=[
                 Dict(
                     'port',
                     Int('port', required=True),
                     Str('protocol', required=True),
                 )
             ],
             required=True),
        Dict(
            'pod_status',
            Int('available', required=True),
            Int('desired', required=True),
            required=True,
        ),
        Bool('update_available', required=True),
        Str('human_version',
            required=True,
            description='Human friendly version identifier for chart release'),
        Str('human_latest_version',
            required=True,
            description=
            'Human friendly latest available version identifier for chart release'
            ),
        Bool(
            'container_images_update_available',
            required=True,
            description=
            'Will be set when any image(s) being used in the chart release has a newer version available'
        ),
        Dict('portals', additional_attrs=True),
        Dict('chart_schema', null=True, additional_attrs=True),
        Dict('history', additional_attrs=True),
        Dict(
            'resources',
            Dict('storage_class', additional_attrs=True),
            List('persistent_volumes'),
            List('host_path_volumes'),
            List('locked_host_paths'),
            Dict('container_images', additional_attrs=True),
            List('truenas_certificates', items=[Int('certificate_id')]),
            List('truenas_certificate_authorities',
                 items=[Int('certificate_authority_id')]),
            *[List(r.value) for r in Resources],
        ),
    )

    @filterable
    async def query(self, filters, options):
        """
        Query available chart releases.

        `query-options.extra.retrieve_resources` is a boolean when set will retrieve existing kubernetes resources
        in the chart namespace.

        `query-options.extra.history` is a boolean when set will retrieve all chart version upgrades
        for a chart release.

        `query-options.extra.include_chart_schema` is a boolean when set will retrieve the schema being used by
        the chart release in question.

        `query-options.extra.resource_events` is a boolean when set will retrieve individual events of each resource.
        This only has effect if `query-options.extra.retrieve_resources` is set.
        """
        if not await self.middleware.call('kubernetes.validate_k8s_setup',
                                          False):
            # We use filter_list here to ensure that `options` are respected, options like get: true
            return filter_list([], filters, options)

        k8s_config = await self.middleware.call('kubernetes.config')
        update_catalog_config = {}
        catalogs = await self.middleware.call(
            'catalog.query', [], {'extra': {
                'item_details': True
            }})
        container_images = {}
        for image in await self.middleware.call('container.image.query'):
            for tag in image['repo_tags']:
                if not container_images.get(tag):
                    container_images[tag] = image

        for catalog in catalogs:
            update_catalog_config[catalog['label']] = {}
            for train in catalog['trains']:
                train_data = {}
                for catalog_item in catalog['trains'][train]:
                    max_version = catalog['trains'][train][catalog_item][
                        'latest_version'] or '0.0.0'
                    app_version = catalog['trains'][train][catalog_item][
                        'latest_app_version'] or '0.0.0'
                    train_data[catalog_item] = {
                        'chart_version': parse_version(max_version),
                        'app_version': app_version,
                    }

                update_catalog_config[catalog['label']][train] = train_data

        k8s_node_ip = await self.middleware.call('kubernetes.node_ip')
        options = options or {}
        extra = copy.deepcopy(options.get('extra', {}))
        retrieve_schema = extra.get('include_chart_schema')
        get_resources = extra.get('retrieve_resources')
        get_locked_paths = extra.get('retrieve_locked_paths')
        locked_datasets = await self.middleware.call(
            'zfs.dataset.locked_datasets') if get_locked_paths else []
        get_history = extra.get('history')
        if retrieve_schema:
            questions_context = await self.middleware.call(
                'catalog.get_normalised_questions_context')
        else:
            questions_context = None

        if filters and len(filters) == 1 and filters[0][:2] == ['id', '=']:
            extra['namespace_filter'] = [
                'metadata.namespace', '=',
                f'{CHART_NAMESPACE_PREFIX}{filters[0][-1]}'
            ]
            resources_filters = [extra['namespace_filter']]
        else:
            resources_filters = [[
                'metadata.namespace', '^', CHART_NAMESPACE_PREFIX
            ]]

        ports_used = collections.defaultdict(list)
        for node_port_svc in await self.middleware.call(
                'k8s.service.query',
            [['spec.type', '=', 'NodePort']] + resources_filters):
            release_name = node_port_svc['metadata']['namespace'][
                len(CHART_NAMESPACE_PREFIX):]
            ports_used[release_name].extend([{
                'port': p['node_port'],
                'protocol': p['protocol']
            } for p in node_port_svc['spec']['ports']])

        if get_resources:
            storage_mapping = await self.middleware.call(
                'chart.release.get_workload_storage_details')

        resources_mapping = await self.middleware.call(
            'chart.release.get_resources_with_workload_mapping', {
                'resource_events':
                extra.get('resource_events', False),
                'resource_filters':
                resources_filters,
                'resources': [
                    r.name for r in (Resources if get_resources else [
                        Resources.POD, Resources.DEPLOYMENT, Resources.
                        STATEFULSET
                    ])
                ],
            })
        resources = resources_mapping['resources']

        release_secrets = await self.middleware.call(
            'chart.release.releases_secrets', extra)
        releases = []
        for name, release in release_secrets.items():
            config = {}
            release_data = release['releases'].pop(0)
            cur_version = release_data['chart_metadata']['version']

            for rel_data in filter(
                    lambda r: r['chart_metadata']['version'] == cur_version,
                    itertools.chain(reversed(release['releases']),
                                    [release_data])):
                config.update(rel_data['config'])

            pods_status = resources_mapping['workload_status'][name]
            pod_diff = pods_status['available'] - pods_status['desired']
            status = 'ACTIVE'
            if pod_diff == 0 and pods_status['desired'] == 0:
                status = 'STOPPED'
            elif pod_diff < 0:
                status = 'DEPLOYING'

            # We will retrieve all host ports being used
            for pod in filter(lambda p: p['status']['phase'] == 'Running',
                              resources[Resources.POD.value][name]):
                for container in pod['spec']['containers']:
                    ports_used[name].extend([{
                        'port': p['host_port'],
                        'protocol': p['protocol']
                    } for p in (container['ports'] or []) if p['host_port']])

            release_data.update({
                'path':
                os.path.join('/mnt', k8s_config['dataset'], 'releases', name),
                'dataset':
                os.path.join(k8s_config['dataset'], 'releases', name),
                'config':
                config,
                'status':
                status,
                'used_ports':
                ports_used[name],
                'pod_status':
                pods_status,
            })

            container_images_normalized = {
                i_name: {
                    'id':
                    image_details.get('id'),
                    'update_available':
                    image_details.get('update_available', False)
                }
                for i_name, image_details in map(
                    lambda i: (i, container_images.get(i, {})),
                    list(
                        set(c['image'] for workload_type in ('deployments',
                                                             'statefulsets')
                            for workload in resources[workload_type][name]
                            for c in workload['spec']['template']['spec']
                            ['containers'])))
            }
            if get_resources:
                release_resources = {
                    'storage_class':
                    storage_mapping['storage_classes'][get_storage_class_name(
                        name)],
                    'persistent_volumes':
                    storage_mapping['persistent_volumes'][name],
                    'host_path_volumes':
                    await self.host_path_volumes(
                        itertools.chain(*[
                            resources[getattr(Resources, k).value][name]
                            for k in ('DEPLOYMENT', 'STATEFULSET')
                        ])),
                    **{r.value: resources[r.value][name]
                       for r in Resources},
                    'container_images':
                    container_images_normalized,
                    'truenas_certificates': [
                        v['id'] for v in release_data['config'].get(
                            'ixCertificates', {}).values()
                    ],
                    'truenas_certificate_authorities': [
                        v['id'] for v in release_data['config'].get(
                            'ixCertificateAuthorities', {}).values()
                    ],
                }
                if get_locked_paths:
                    release_resources['locked_host_paths'] = [
                        v['host_path']['path']
                        for v in release_resources['host_path_volumes']
                        if await self.middleware.call(
                            'pool.dataset.path_in_locked_datasets',
                            v['host_path']['path'], locked_datasets)
                    ]

                release_data['resources'] = release_resources

            if get_history:
                release_data['history'] = release['history']
                for k, v in release_data['history'].items():
                    r_app_version = self.normalize_app_version_of_chart_release(
                        v)
                    release_data['history'][k].update({
                        'human_version':
                        f'{r_app_version}_{parse_version(v["chart_metadata"]["version"])}',
                    })

            current_version = parse_version(
                release_data['chart_metadata']['version'])
            catalog_version_dict = update_catalog_config.get(
                release_data['catalog'],
                {}).get(release_data['catalog_train'],
                        {}).get(release_data['chart_metadata']['name'], {})
            latest_version = catalog_version_dict.get('chart_version',
                                                      current_version)
            latest_app_version = catalog_version_dict.get('app_version')
            release_data['update_available'] = latest_version > current_version

            app_version = self.normalize_app_version_of_chart_release(
                release_data)
            if release_data['chart_metadata']['name'] == 'ix-chart':
                # Latest app version for ix-chart remains same
                latest_app_version = app_version

            for key, app_v, c_v in (
                ('human_version', app_version, current_version),
                ('human_latest_version', latest_app_version, latest_version),
            ):
                if app_v:
                    release_data[key] = f'{app_v}_{c_v}'
                else:
                    release_data[key] = str(c_v)

            if retrieve_schema:
                chart_path = os.path.join(
                    release_data['path'], 'charts',
                    release_data['chart_metadata']['version'])
                if os.path.exists(chart_path):
                    release_data['chart_schema'] = await self.middleware.call(
                        'catalog.item_version_details', chart_path,
                        questions_context)
                else:
                    release_data['chart_schema'] = None

            release_data['container_images_update_available'] = any(
                details['update_available']
                for details in container_images_normalized.values())
            release_data['chart_metadata']['latest_chart_version'] = str(
                latest_version)
            release_data['portals'] = await self.middleware.call(
                'chart.release.retrieve_portals_for_chart_release',
                release_data, k8s_node_ip)

            if 'icon' not in release_data['chart_metadata']:
                release_data['chart_metadata']['icon'] = None

            releases.append(release_data)

        return filter_list(releases, filters, options)

    @private
    def normalize_app_version_of_chart_release(self, release_data):
        app_version = None
        if release_data['chart_metadata']['name'] == 'ix-chart':
            image_config = release_data['config'].get('image') or {}
            if all(k in image_config for k in ('tag', 'repository')):
                # TODO: Let's see if we can find sane versioning for `latest` from upstream
                if image_config['tag'] == 'latest':
                    app_version = f'{image_config["repository"]}:{image_config["tag"]}'
                else:
                    app_version = image_config['tag']
        else:
            app_version = release_data['chart_metadata'].get('appVersion')
        return app_version

    @private
    async def host_path_volumes(self, resources):
        host_path_volumes = []
        for resource in resources:
            for volume in filter(
                    lambda v: (v.get('host_path') or {}).get('path'),
                    resource['spec']['template']['spec']['volumes'] or []):
                host_path_volumes.append(copy.deepcopy(volume))
        return host_path_volumes

    @private
    async def normalise_and_validate_values(self,
                                            item_details,
                                            values,
                                            update,
                                            release_ds,
                                            release_data=None):
        dict_obj = await self.middleware.call(
            'chart.release.validate_values',
            item_details,
            values,
            update,
            release_data,
        )
        return await self.middleware.call(
            'chart.release.get_normalised_values', dict_obj, values, update, {
                'release': {
                    'name': release_ds.split('/')[-1],
                    'dataset': release_ds,
                    'path': os.path.join('/mnt', release_ds),
                },
                'actions': [],
            })

    @private
    async def perform_actions(self, context):
        for action in context['actions']:
            await self.middleware.call(f'chart.release.{action["method"]}',
                                       *action['args'])

    @accepts(
        Dict(
            'chart_release_create',
            Dict('values', additional_attrs=True),
            Str('catalog', required=True),
            Str('item', required=True),
            Str('release_name',
                required=True,
                validators=[Match(r'^[a-z0-9]([-a-z0-9]*[a-z0-9])?$')]),
            Str('train', default='charts'),
            Str('version', default='latest'),
        ))
    @job(lock=lambda args: f'chart_release_create_{args[0]["release_name"]}')
    async def do_create(self, job, data):
        """
        Create a chart release for a catalog item.

        `release_name` is the name which will be used to identify the created chart release.

        `catalog` is a valid catalog id where system will look for catalog `item` details.

        `train` is which train to look for under `catalog` i.e stable / testing etc.

        `version` specifies the catalog `item` version.

        `values` is configuration specified for the catalog item version in question which will be used to
        create the chart release.
        """
        await self.middleware.call('kubernetes.validate_k8s_setup')
        if await self.query([['id', '=', data['release_name']]]):
            raise CallError(
                f'Chart release with {data["release_name"]} already exists.',
                errno=errno.EEXIST)

        catalog = await self.middleware.call('catalog.get_instance',
                                             data['catalog'])
        item_details = await self.middleware.call(
            'catalog.get_item_details', data['item'], {
                'catalog': data['catalog'],
                'train': data['train'],
            })
        version = data['version']
        if version == 'latest':
            version = await self.middleware.call(
                'chart.release.get_latest_version_from_item_versions',
                item_details['versions'])

        if version not in item_details['versions']:
            raise CallError(
                f'Unable to locate "{data["version"]}" catalog item version.',
                errno=errno.ENOENT)

        item_details = item_details['versions'][version]
        await self.middleware.call('catalog.version_supported_error_check',
                                   item_details)

        k8s_config = await self.middleware.call('kubernetes.config')
        release_ds = os.path.join(k8s_config['dataset'], 'releases',
                                  data['release_name'])
        # The idea is to validate the values provided first and if it passes our validation test, we
        # can move forward with setting up the datasets and installing the catalog item
        new_values = data['values']
        new_values, context = await self.normalise_and_validate_values(
            item_details, new_values, False, release_ds)

        job.set_progress(25, 'Initial Validation completed')

        # Now that we have completed validation for the item in question wrt values provided,
        # we will now perform the following steps
        # 1) Create release datasets
        # 2) Copy chart version into release/charts dataset
        # 3) Install the helm chart
        # 4) Create storage class
        try:
            job.set_progress(30, 'Creating chart release datasets')

            for dataset in await self.release_datasets(release_ds):
                if not await self.middleware.call('zfs.dataset.query',
                                                  [['id', '=', dataset]]):
                    await self.middleware.call('zfs.dataset.create', {
                        'name': dataset,
                        'type': 'FILESYSTEM'
                    })
                    await self.middleware.call('zfs.dataset.mount', dataset)

            job.set_progress(45, 'Created chart release datasets')

            chart_path = os.path.join('/mnt', release_ds, 'charts', version)
            await self.middleware.run_in_thread(
                lambda: shutil.copytree(item_details['location'], chart_path))

            job.set_progress(55, 'Completed setting up chart release')
            # Before finally installing the release, we will perform any actions which might be required
            # for the release to function like creating/deleting ix-volumes
            await self.perform_actions(context)

            namespace_name = get_namespace(data['release_name'])

            job.set_progress(65,
                             f'Creating {namespace_name} for chart release')
            namespace_body = {
                'metadata': {
                    'labels': {
                        'catalog': data['catalog'],
                        'catalog_train': data['train'],
                        'catalog_branch': catalog['branch'],
                    },
                    'name': namespace_name,
                }
            }
            if not await self.middleware.call(
                    'k8s.namespace.query',
                [['metadata.name', '=', namespace_name]]):
                await self.middleware.call('k8s.namespace.create',
                                           {'body': namespace_body})
            else:
                await self.middleware.call('k8s.namespace.update',
                                           namespace_name,
                                           {'body': namespace_body})

            job.set_progress(75, 'Installing Catalog Item')

            new_values = await add_context_to_configuration(
                new_values, {
                    CONTEXT_KEY_NAME: {
                        **get_action_context(data['release_name']),
                        'operation': 'INSTALL',
                        'isInstall': True,
                    }
                }, self.middleware)

            await self.middleware.call(
                'chart.release.create_update_storage_class_for_chart_release',
                data['release_name'], os.path.join(release_ds, 'volumes'))

            # We will install the chart now and force the installation in an ix based namespace
            # https://github.com/helm/helm/issues/5465#issuecomment-473942223
            await self.middleware.call('chart.release.helm_action',
                                       data['release_name'], chart_path,
                                       new_values, 'install')
        except Exception:
            # Do a rollback here
            # Let's uninstall the release as well if it did get installed ( it is possible this might have happened )
            if await self.middleware.call('chart.release.query',
                                          [['id', '=', data['release_name']]]):
                delete_job = await self.middleware.call(
                    'chart.release.delete', data['release_name'])
                await delete_job.wait()
                if delete_job.error:
                    self.logger.error(
                        'Failed to uninstall helm chart release: %s',
                        delete_job.error)
            else:
                await self.post_remove_tasks(data['release_name'])

            raise
        else:
            await self.middleware.call('chart.release.refresh_events_state',
                                       data['release_name'])
            job.set_progress(100, 'Chart release created')
            return await self.get_instance(data['release_name'])

    @accepts(Str('chart_release'),
             Dict(
                 'chart_release_update',
                 Dict('values', additional_attrs=True),
             ))
    @job(lock=lambda args: f'chart_release_update_{args[0]}')
    async def do_update(self, job, chart_release, data):
        """
        Update an existing chart release.

        `values` is configuration specified for the catalog item version in question which will be used to
        create the chart release.
        """
        release = await self.get_instance(chart_release)
        release_orig = copy.deepcopy(release)
        chart_path = os.path.join(release['path'], 'charts',
                                  release['chart_metadata']['version'])
        if not os.path.exists(chart_path):
            raise CallError(
                f'Unable to locate {chart_path!r} chart version for updating {chart_release!r} chart release',
                errno=errno.ENOENT)

        version_details = await self.middleware.call(
            'catalog.item_version_details', chart_path)
        config = release['config']
        config.update(data['values'])
        # We use update=False because we want defaults to be populated again if they are not present in the payload
        # Why this is not dangerous is because the defaults will be added only if they are not present/configured for
        # the chart release.
        config, context = await self.normalise_and_validate_values(
            version_details,
            config,
            False,
            release['dataset'],
            release_orig,
        )

        job.set_progress(25, 'Initial Validation complete')

        await self.perform_actions(context)

        config = await add_context_to_configuration(
            config, {
                CONTEXT_KEY_NAME: {
                    **get_action_context(chart_release),
                    'operation': 'UPDATE',
                    'isUpdate': True,
                }
            }, self.middleware)

        await self.middleware.call('chart.release.helm_action', chart_release,
                                   chart_path, config, 'update')

        job.set_progress(90, 'Syncing secrets for chart release')
        await self.middleware.call('chart.release.sync_secrets_for_release',
                                   chart_release)
        await self.middleware.call('chart.release.refresh_events_state',
                                   chart_release)

        job.set_progress(100, 'Update completed for chart release')
        return await self.get_instance(chart_release)

    @accepts(Str('release_name'),
             Dict(
                 'options',
                 Bool('delete_unused_images', default=False),
             ))
    @job(lock=lambda args: f'chart_release_delete_{args[0]}')
    async def do_delete(self, job, release_name, options):
        """
        Delete existing chart release.

        This will delete the chart release from the kubernetes cluster and also remove any associated volumes / data.
        To clarify, host path volumes will not be deleted which live outside the chart release dataset.
        """
        # For delete we will uninstall the release first and then remove the associated datasets
        await self.middleware.call('kubernetes.validate_k8s_setup')
        chart_release = await self.get_instance(
            release_name, {'extra': {
                'retrieve_resources': True
            }})

        cp = await run([
            'helm', 'uninstall', release_name, '-n',
            get_namespace(release_name)
        ],
                       check=False)
        if cp.returncode:
            raise CallError(
                f'Unable to uninstall "{release_name}" chart release: {cp.stderr}'
            )

        job.set_progress(50, f'Uninstalled {release_name}')
        job.set_progress(75, f'Waiting for {release_name!r} pods to terminate')
        await self.middleware.call('chart.release.wait_for_pods_to_terminate',
                                   get_namespace(release_name))

        await self.post_remove_tasks(release_name, job)

        await self.middleware.call(
            'chart.release.remove_chart_release_from_events_state',
            release_name)
        await self.middleware.call(
            'chart.release.clear_chart_release_portal_cache', release_name)
        await self.middleware.call('alert.oneshot_delete',
                                   'ChartReleaseUpdate', release_name)
        if options['delete_unused_images']:
            job.set_progress(97, 'Deleting unused container images')
            failed = await self.middleware.call(
                'chart.release.delete_unused_app_images', chart_release)
            if failed:
                msg = '\n'
                for i, v in failed.items():
                    msg += f'{i+1}) {v[0]} ({v[1]})\n'
                raise CallError(
                    f'{release_name!r} was deleted but unable to delete following images:{msg}'
                )

        job.set_progress(100, f'{release_name!r} chart release deleted')
        return True

    @private
    async def post_remove_tasks(self, release_name, job=None):
        await self.remove_storage_class_and_dataset(release_name, job)
        await self.middleware.call('k8s.namespace.delete',
                                   get_namespace(release_name))

    @private
    async def remove_storage_class_and_dataset(self, release_name, job=None):
        storage_class_name = get_storage_class_name(release_name)
        if await self.middleware.call(
                'k8s.storage_class.query',
            [['metadata.name', '=', storage_class_name]]):
            if job:
                job.set_progress(85,
                                 f'Removing {release_name!r} storage class')
            try:
                await self.middleware.call('k8s.storage_class.delete',
                                           storage_class_name)
            except Exception as e:
                self.logger.error('Failed to remove %r storage class: %s',
                                  storage_class_name, e)

        k8s_config = await self.middleware.call('kubernetes.config')
        release_ds = os.path.join(k8s_config['dataset'], 'releases',
                                  release_name)

        # If the chart release was consuming any PV's, they would have to be manually removed from k8s database
        # because of chart release reclaim policy being retain
        pvc_volume_ds = os.path.join(release_ds, 'volumes')
        for pv in await self.middleware.call('k8s.pv.query', [[
                'spec.csi.volume_attributes.openebs\\.io/poolname', '=',
                pvc_volume_ds
        ]]):
            await self.middleware.call('k8s.pv.delete', pv['metadata']['name'])

        failed_zfs_volumes = []
        # We would like to delete openebs zfs volumes ( not actual zfs volumes ) in openebs namespace
        for zfs_volume in await self.middleware.call(
                'k8s.zv.query', [['spec.poolName', '=', pvc_volume_ds]]):
            try:
                await self.middleware.call('k8s.zv.delete',
                                           zfs_volume['metadata']['name'])
            except Exception:
                # It's perfectly fine if this fails as functionality wise this change is just cosmetic
                # and is essentially cleaning up leftover zfs volume entries from k8s db
                failed_zfs_volumes.append(zfs_volume['metadata']['name'])

        if failed_zfs_volumes:
            self.logger.error(
                'Failed to delete %r zfs volumes when deleting %r chart release',
                ', '.join(failed_zfs_volumes), release_name)

        if await self.middleware.call('zfs.dataset.query',
                                      [['id', '=', release_ds]]):
            if job:
                job.set_progress(95, f'Removing {release_ds!r} dataset')
            await self.middleware.call('zfs.dataset.delete', release_ds, {
                'recursive': True,
                'force': True
            })

    @private
    async def release_datasets(self, release_dataset):
        return [release_dataset] + [
            os.path.join(release_dataset, k)
            for k in ('charts', 'volumes', 'volumes/ix_volumes')
        ]

    @private
    async def get_chart_namespace_prefix(self):
        return CHART_NAMESPACE_PREFIX
示例#11
0
class NetworkConfigurationService(ConfigService):
    class Config:
        namespace = 'network.configuration'
        datastore = 'network.globalconfiguration'
        datastore_prefix = 'gc_'
        datastore_extend = 'network.configuration.network_config_extend'

    def network_config_extend(self, data):
        data['domains'] = data['domains'].split()
        data['netwait_ip'] = data['netwait_ip'].split()
        return data

    async def validate_general_settings(self, data, schema):
        verrors = ValidationErrors()

        for key in [key for key in data.keys() if 'nameserver' in key]:
            nameserver_value = data.get(key)
            if nameserver_value:
                try:
                    nameserver_ip = ipaddress.ip_address(nameserver_value)
                except ValueError as e:
                    verrors.add(f'{schema}.{key}', str(e))
                else:
                    if nameserver_ip.is_loopback:
                        verrors.add(f'{schema}.{key}',
                                    'Loopback is not a valid nameserver')
                    elif nameserver_ip.is_unspecified:
                        verrors.add(
                            f'{schema}.{key}',
                            'Unspecified addresses are not valid as nameservers'
                        )
                    elif nameserver_ip.version == 4:
                        if nameserver_value == '255.255.255.255':
                            verrors.add(
                                f'{schema}.{key}',
                                'This is not a valid nameserver address')
                        elif nameserver_value.startswith('169.254'):
                            verrors.add(
                                f'{schema}.{key}',
                                '169.254/16 subnet is not valid for nameserver'
                            )

                    nameserver_number = int(key[-1])
                    for i in range(nameserver_number - 1, 0, -1):
                        if f'nameserver{i}' in data.keys(
                        ) and not data[f'nameserver{i}']:
                            verrors.add(
                                f'{schema}.{key}',
                                f'Must fill out namserver{i} before filling out {key}'
                            )

        ipv4_gateway_value = data.get('ipv4gateway')
        if ipv4_gateway_value:
            if not await self.middleware.call(
                    'routes.ipv4gw_reachable',
                    ipaddress.ip_address(ipv4_gateway_value).exploded):
                verrors.add(f'{schema}.ipv4gateway',
                            f'Gateway {ipv4_gateway_value} is unreachable')

        netwait_ip = data.get('netwait_ip')
        if netwait_ip:
            for ip in netwait_ip:
                try:
                    ipaddress.ip_address(ip)
                except ValueError as e:
                    verrors.add(f'{schema}.netwait_ip', f'{e.__str__()}')

        if data.get('domains'):
            if len(data.get('domains')) > 5:
                verrors.add(f'{schema}.domains',
                            'No more than 5 additional domains are allowed')

        return verrors

    @accepts(
        Dict(
            'global_configuration',
            Str('hostname', validators=[Match(r'^[a-zA-Z\.\-\0-9]+$')]),
            Str('hostname_b', validators=[Match(r'^[a-zA-Z\.\-\0-9]+$')]),
            Str('hostname_virtual',
                validators=[Match(r'^[a-zA-Z\.\-\0-9]+$')]),
            Str('domain', validators=[Match(r'^[a-zA-Z\.\-\0-9]+$')]),
            List('domains', items=[Str('domains')]),
            IPAddr('ipv4gateway'),
            IPAddr('ipv6gateway', allow_zone_index=True),
            IPAddr('nameserver1'),
            IPAddr('nameserver2'),
            IPAddr('nameserver3'),
            Str('httpproxy'),
            Bool('netwait_enabled'),
            List('netwait_ip', items=[Str('netwait_ip')]),
            Str('hosts'),
        ))
    async def do_update(self, data):
        config = await self.config()
        new_config = config.copy()

        if not (not await self.middleware.call('system.is_freenas')
                and await self.middleware.call('notifier.failover_licensed')):
            for key in ['hostname_virtual', 'hostname_b']:
                data.pop(key, None)

        new_config.update(data)
        verrors = await self.validate_general_settings(
            data, 'global_configuration_update')
        if verrors:
            raise verrors

        new_config['domains'] = ' '.join(new_config.get('domains', []))
        new_config['netwait_ip'] = ' '.join(new_config.get('netwait_ip', []))

        await self.middleware.call('datastore.update',
                                   'network.globalconfiguration', config['id'],
                                   new_config, {'prefix': 'gc_'})

        new_config['domains'] = new_config['domains'].split()
        new_config['netwait_ip'] = new_config['netwait_ip'].split()

        netwait_ip_set = set(new_config.pop('netwait_ip', []))
        old_netwait_ip_set = set(config.pop('netwait_ip', []))
        data_changed = netwait_ip_set != old_netwait_ip_set

        if not data_changed:
            domains_set = set(new_config.pop('domains', []))
            old_domains_set = set(config.pop('domains', []))
            data_changed = domains_set != old_domains_set

        if (data_changed
                or len(set(new_config.items()) ^ set(config.items())) > 0):
            services_to_reload = ['hostname']
            if (new_config['domain'] != config['domain']
                    or new_config['nameserver1'] != config['nameserver1']
                    or new_config['nameserver2'] != config['nameserver2']
                    or new_config['nameserver3'] != config['nameserver3']):
                services_to_reload.append('resolvconf')

            if (new_config['ipv4gateway'] != config['ipv4gateway']
                    or new_config['ipv6gateway'] != config['ipv6gateway']):
                services_to_reload.append('networkgeneral')
                await self.middleware.call('routes.sync')

            if ('hostname_virtual' in new_config.keys()
                    and new_config['hostname_virtual'] !=
                    config['hostname_virtual']):
                srv_service_obj = await self.middleware.call(
                    'datastore.query', 'service.service',
                    [('srv_service', '=', 'nfs')])
                nfs_object = await self.middleware.call(
                    'datastore.query',
                    'services.nfs',
                )
                if len(srv_service_obj) > 0 and len(nfs_object) > 0:
                    srv_service_obj = srv_service_obj[0]
                    nfs_object = nfs_object[0]

                    if ((srv_service_obj and srv_service_obj.srv_enable) and
                        (nfs_object and (nfs_object.nfs_srv_v4
                                         and nfs_object.nfs_srv_v4_krb))):
                        await self.middleware.call('service.restart',
                                                   'ix-nfsd',
                                                   {'onetime': False})
                        services_to_reload.append('mountd')

            for service_to_reload in services_to_reload:
                await self.middleware.call('service.reload', service_to_reload,
                                           {'onetime': False})

            if new_config['httpproxy'] != config['httpproxy']:
                await self.middleware.call(
                    'core.event_send', 'network.config', 'CHANGED',
                    {'data': {
                        'httpproxy': new_config['httpproxy']
                    }})

        return await self.config()
示例#12
0
文件: ftp.py 项目: jiangge/freenas
class FTPService(SystemServiceService):
    class Config:
        service = "ftp"
        datastore_prefix = "ftp_"
        datastore_extend = "ftp.ftp_extend"

    @private
    async def ftp_extend(self, data):
        if data['ssltls_certificate']:
            data['ssltls_certificate'] = data['ssltls_certificate']['id']
        return data

    @accepts(
        Dict('ftp_update',
             Int('port', validators=[Range(min=1, max=65535)]),
             Int('clients', validators=[Range(min=1, max=10000)]),
             Int('ipconnections', validators=[Range(min=0, max=1000)]),
             Int('loginattempt', validators=[Range(min=0, max=1000)]),
             Int('timeout', validators=[Range(min=0, max=10000)]),
             Bool('rootlogin'),
             Bool('onlyanonymous'),
             Dir('anonpath', null=True),
             Bool('onlylocal'),
             Str('banner'),
             Str('filemask', validators=[Match(r"^[0-7]{3}$")]),
             Str('dirmask', validators=[Match(r"^[0-7]{3}$")]),
             Bool('fxp'),
             Bool('resume'),
             Bool('defaultroot'),
             Bool('ident'),
             Bool('reversedns'),
             Str('masqaddress'),
             Int('passiveportsmin',
                 validators=[Or(Exact(0), Range(min=1024, max=65535))]),
             Int('passiveportsmax',
                 validators=[Or(Exact(0), Range(min=1024, max=65535))]),
             Int('localuserbw', validators=[Range(min=0)]),
             Int('localuserdlbw', validators=[Range(min=0)]),
             Int('anonuserbw', validators=[Range(min=0)]),
             Int('anonuserdlbw', validators=[Range(min=0)]),
             Bool('tls'),
             Str('tls_policy',
                 enum=[
                     "on", "off", "data", "!data", "auth", "ctrl", "ctrl+data",
                     "ctrl+!data", "auth+data", "auth+!data"
                 ]),
             Bool('tls_opt_allow_client_renegotiations'),
             Bool('tls_opt_allow_dot_login'),
             Bool('tls_opt_allow_per_user'),
             Bool('tls_opt_common_name_required'),
             Bool('tls_opt_enable_diags'),
             Bool('tls_opt_export_cert_data'),
             Bool('tls_opt_no_cert_request'),
             Bool('tls_opt_no_empty_fragments'),
             Bool('tls_opt_no_session_reuse_required'),
             Bool('tls_opt_stdenvvars'),
             Bool('tls_opt_dns_name_required'),
             Bool('tls_opt_ip_address_required'),
             Int('ssltls_certificate', null=True),
             Str('options'),
             update=True))
    async def do_update(self, data):
        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        if not ((new["passiveportsmin"] == 0) == (new["passiveportsmax"]
                                                  == 0)):
            verrors.add(
                "passiveportsmin",
                "passiveportsmin and passiveportsmax should be both zero or non-zero"
            )
        if not ((new["passiveportsmin"] == 0 and new["passiveportsmax"] == 0)
                or (new["passiveportsmax"] > new["passiveportsmin"])):
            verrors.add(
                "ftp_update.passiveportsmax",
                "When specified, should be greater than passiveportsmin")

        if new["onlyanonymous"] and not new["anonpath"]:
            verrors.add("ftp_update.anonpath",
                        "This field is required for anonymous login")

        if new["anonpath"]:
            await check_path_resides_within_volume(verrors, self.middleware,
                                                   "ftp_update.anonpath",
                                                   new["anonpath"])

        if new["tls"]:
            if not new["ssltls_certificate"]:
                verrors.add(
                    "ftp_update.ssltls_certificate",
                    "Please provide a valid certificate id when TLS is enabled"
                )
            else:
                verrors.extend((await self.middleware.call(
                    "certificate.cert_services_validation",
                    new["ssltls_certificate"], "ftp_update.ssltls_certificate",
                    False)))

        if new["masqaddress"]:
            await resolve_hostname(self.middleware, verrors,
                                   "ftp_update.masqaddress",
                                   new["masqaddress"])

        if verrors:
            raise verrors

        await self._update_service(old, new)

        if not old['tls'] and new['tls']:
            await self.middleware.call('service.start', 'ssl')

        return new
示例#13
0
class BootEnvService(CRUDService):

    BE_TOOL = 'zectl' if osc.IS_LINUX else 'beadm'

    @filterable
    def query(self, filters=None, options=None):
        """
        Query all Boot Environments with `query-filters` and `query-options`.
        """
        results = []

        cp = subprocess.run([self.BE_TOOL, 'list', '-H'],
                            capture_output=True,
                            text=True)
        datasets_origins = [
            d['properties']['origin']['parsed']
            for d in self.middleware.call_sync('zfs.dataset.query')
        ]
        boot_pool = self.middleware.call_sync('boot.pool_name')
        for line in cp.stdout.strip().split('\n'):
            fields = line.split('\t')
            name = fields[0]
            if len(fields) > 5 and fields[5] != '-':
                name = fields[5]
            be = {
                'id':
                name,
                'realname':
                fields[0],
                'name':
                name,
                'active':
                fields[1],
                'activated':
                'n' in fields[1].lower(),
                'can_activate':
                False,
                'mountpoint':
                fields[2],
                'space':
                None if osc.IS_LINUX else fields[3],
                'created':
                datetime.strptime(fields[3 if osc.IS_LINUX else 4],
                                  '%Y-%m-%d %H:%M'),
                'keep':
                None,
                'rawspace':
                None
            }

            ds = self.middleware.call_sync('zfs.dataset.query', [
                ('id', '=', rf'{boot_pool}/ROOT/{fields[0]}'),
            ], {'extra': {
                'snapshots': True
            }})
            if ds:
                ds = ds[0]
                snapshot = None
                origin = ds['properties']['origin']['parsed']
                if '@' in origin:
                    snapshot = self.middleware.call_sync(
                        'zfs.snapshot.query', [('id', '=', origin)])
                    if snapshot:
                        snapshot = snapshot[0]
                if f'{self.BE_TOOL}:keep' in ds['properties']:
                    if ds['properties'][f'{self.BE_TOOL}:keep'][
                            'value'] == 'True':
                        be['keep'] = True
                    elif ds['properties'][f'{self.BE_TOOL}:keep'][
                            'value'] == 'False':
                        be['keep'] = False

                # When a BE is deleted, following actions happen
                # 1) It's descendants ( if any ) are promoted once
                # 2) BE is deleted
                # 3) Filesystems dependent on BE's origin are promoted
                # 4) Origin is deleted
                #
                # Now we would like to find out the space which will be freed when a BE is removed.
                # We classify a BE as of being 2 types,
                # 1) BE without descendants
                # 2) BE with descendants
                #
                # For (1), space freed is "usedbydataset" property and space freed by it's "origin".
                # For (2), space freed is "usedbydataset" property and space freed by it's "origin" but this cannot
                # actively determined because all the descendants are promoted once for this BE and at the end origin
                # of current BE would be determined by last descendant promoted. So we ignore this for now and rely
                # only on the space it is currently consuming as a best effort to predict.
                # There is also "usedbysnaps" property, for that we will retrieve all snapshots of the dataset,
                # find if any of them do not have a dataset cloned, that space will also be freed when we delete
                # this dataset. And we will also factor in the space consumed by children.

                be['rawspace'] = ds['properties']['usedbydataset'][
                    'parsed'] + ds['properties']['usedbychildren']['parsed']

                children = False
                for snap in ds['snapshots']:
                    if snap['name'] not in datasets_origins:
                        be['rawspace'] += snap['properties']['used']['parsed']
                    else:
                        children = True

                if snapshot and not children:
                    # This indicates the current BE is a leaf and it is safe to add the BE's origin
                    # space to the space freed when it is deleted.
                    be['rawspace'] += snapshot['properties']['used']['parsed']

                if be['rawspace'] < 1024:
                    be['space'] = f'{be["rawspace"]}B'
                elif 1024 <= be['rawspace'] < 1048576:
                    be['space'] = f'{be["rawspace"] / 1024}K'
                elif 1048576 <= be['rawspace'] < 1073741824:
                    be['space'] = f'{be["rawspace"] / 1048576}M'
                elif 1073741824 <= be['rawspace'] < 1099511627776:
                    be['space'] = f'{be["rawspace"] / 1073741824}G'
                elif 1099511627776 <= be['rawspace'] < 1125899906842624:
                    be['space'] = f'{be["rawspace"] / 1099511627776}T'
                elif 1125899906842624 <= be['rawspace'] < 1152921504606846976:
                    be['space'] = f'{be["rawspace"] / 1125899906842624}P'
                elif 1152921504606846976 <= be[
                        'rawspace'] < 1152921504606846976:
                    be['space'] = f'{be["rawspace"] / 1152921504606846976}E'
                else:
                    be['space'] = f'{be["rawspace"] / 1152921504606846976}Z'

                be['space'] = f'{round(float(be["space"][:-1]), 2)}{be["space"][-1]}'

                be['can_activate'] = 'truenas:kernel_version' not in ds[
                    'properties']

            results.append(be)
        return filter_list(results, filters, options)

    @item_method
    @accepts(Str('id'))
    def activate(self, oid):
        """
        Activates boot environment `id`.
        """
        be = self.middleware.call_sync('bootenv.get_instance', oid)
        if not be['can_activate']:
            raise CallError(
                'TrueNAS SCALE BEs cannot be activated from TrueNAS 12, you need to run the update process again'
            )

        try:
            subprocess.run([self.BE_TOOL, 'activate', oid],
                           capture_output=True,
                           text=True,
                           check=True)
        except subprocess.CalledProcessError as cpe:
            raise CallError(f'Failed to activate BE: {cpe.stdout.strip()}')
        else:
            return True

    @item_method
    @accepts(Str('id'), Dict(
        'attributes',
        Bool('keep', default=False),
    ))
    async def set_attribute(self, oid, attrs):
        """
        Sets attributes boot environment `id`.

        Currently only `keep` attribute is allowed.
        """
        boot_pool = await self.middleware.call('boot.pool_name')
        boot_env = await self.get_instance(oid)
        dsname = f'{boot_pool}/ROOT/{boot_env["realname"]}'
        ds = await self.middleware.call('zfs.dataset.query',
                                        [('id', '=', dsname)])
        if not ds:
            raise CallError(f'BE {oid!r} does not exist.', errno.ENOENT)
        await self.middleware.call(
            'zfs.dataset.update', dsname, {
                'properties': {
                    f'{self.BE_TOOL}:keep': {
                        'value': str(attrs['keep'])
                    }
                },
            })
        return True

    @accepts(
        Dict(
            'bootenv_create',
            Str('name', required=True, validators=[Match(RE_BE_NAME)]),
            Str('source'),
        ))
    async def do_create(self, data):
        """
        Create a new boot environment using `name`.

        If a new boot environment is desired which is a clone of another boot environment, `source` can be passed.
        Then, a new boot environment of `name` is created using boot environment `source` by cloning it.

        Ensure that `name` and `source` are valid boot environment names.
        """
        verrors = ValidationErrors()
        await self._clean_be_name(verrors, 'bootenv_create', data['name'])
        verrors.check()

        args = [self.BE_TOOL, 'create']
        source = data.get('source')
        if source:
            args += [
                '-e',
                os.path.join(await self.middleware.call('boot.pool_name'),
                             'ROOT', source) if osc.IS_LINUX else source
            ]
        args.append(data['name'])
        try:
            await run(args, encoding='utf8', check=True)
        except subprocess.CalledProcessError as cpe:
            raise CallError(f'Failed to create boot environment: {cpe.stdout}')
        return data['name']

    @accepts(Str('id'),
             Dict(
                 'bootenv_update',
                 Str('name', required=True, validators=[Match(RE_BE_NAME)]),
             ))
    async def do_update(self, oid, data):
        """
        Update `id` boot environment name with a new provided valid `name`.
        """
        be = await self._get_instance(oid)

        verrors = ValidationErrors()
        await self._clean_be_name(verrors, 'bootenv_update', data['name'])
        verrors.check()

        try:
            await run(self.BE_TOOL,
                      'rename',
                      oid,
                      data['name'],
                      encoding='utf8',
                      check=True)
        except subprocess.CalledProcessError as cpe:
            raise CallError(f'Failed to update boot environment: {cpe.stdout}')
        return data['name']

    async def _clean_be_name(self, verrors, schema, name):
        beadm_names = (await (await Popen(
            f"{self.BE_TOOL} list -H | awk '{{print ${1 if osc.IS_LINUX else 7}}}'",
            shell=True,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
        )).communicate())[0].decode().split('\n')
        if name in filter(None, beadm_names):
            verrors.add(f'{schema}.name', f'The name "{name}" already exists',
                        errno.EEXIST)

    @accepts(Str('id'))
    @job(lock=lambda args: f'bootenv_delete_{args[0]}')
    async def do_delete(self, job, oid):
        """
        Delete `id` boot environment. This removes the clone from the system.
        """
        be = await self._get_instance(oid)
        try:
            await run(self.BE_TOOL,
                      'destroy',
                      '-F',
                      be['id'],
                      encoding='utf8',
                      check=True)
        except subprocess.CalledProcessError as cpe:
            raise CallError(f'Failed to delete boot environment: {cpe.stdout}')
        return True
示例#14
0
文件: zfs.py 项目: bmhughes/freenas
class ZFSSnapshot(CRUDService):
    class Config:
        datastore_primary_key_type = 'string'
        namespace = 'zfs.snapshot'
        process_pool = True
        cli_namespace = 'storage.snapshot'

    @filterable
    def query(self, filters, options):
        """
        Query all ZFS Snapshots with `query-filters` and `query-options`.
        """
        # Special case for faster listing of snapshot names (#53149)
        if (options and options.get('select') == ['name']
                and (not filters
                     or filter_getattrs(filters).issubset({'name', 'pool'}))):
            with libzfs.ZFS() as zfs:
                snaps = zfs.snapshots_serialized(['name'])

            if filters or len(options) > 1:
                return filter_list(snaps, filters, options)
            return snaps

        extra = copy.deepcopy(options['extra'])
        properties = extra.get('properties')
        with libzfs.ZFS() as zfs:
            # Handle `id` filter to avoid getting all snapshots first
            kwargs = dict(holds=False, mounted=False, props=properties)
            if filters and len(filters) == 1 and len(
                    filters[0]) == 3 and filters[0][0] in (
                        'id', 'name') and filters[0][1] == '=':
                kwargs['datasets'] = [filters[0][2]]

            snapshots = zfs.snapshots_serialized(**kwargs)

        # FIXME: awful performance with hundreds/thousands of snapshots
        select = options.pop('select', None)
        result = filter_list(snapshots, filters, options)

        if not select or 'retention' in select:
            if isinstance(result, list):
                result = self.middleware.call_sync(
                    'zettarepl.annotate_snapshots', result)
            elif isinstance(result, dict):
                result = self.middleware.call_sync(
                    'zettarepl.annotate_snapshots', [result])[0]

        if select:
            if isinstance(result, list):
                result = [{k: v
                           for k, v in item.items() if k in select}
                          for item in result]
            elif isinstance(result, dict):
                result = {k: v for k, v in result.items() if k in select}

        return result

    @accepts(
        Dict(
            'snapshot_create',
            Str('dataset', required=True, empty=False),
            Str('name', empty=False),
            Str('naming_schema',
                empty=False,
                validators=[ReplicationSnapshotNamingSchema()]),
            Bool('recursive', default=False),
            Bool('vmware_sync', default=False),
            Dict('properties', additional_attrs=True),
        ))
    def do_create(self, data):
        """
        Take a snapshot from a given dataset.
        """

        dataset = data['dataset']
        recursive = data['recursive']
        properties = data['properties']

        verrors = ValidationErrors()

        if 'name' in data and 'naming_schema' in data:
            verrors.add(
                'snapshot_create.naming_schema',
                'You can\'t specify name and naming schema at the same time')
        elif 'name' in data:
            name = data['name']
        elif 'naming_schema' in data:
            # We can't do `strftime` here because we are in the process pool and `TZ` environment variable update
            # is not propagated here.
            name = self.middleware.call_sync('replication.new_snapshot_name',
                                             data['naming_schema'])
        else:
            verrors.add('snapshot_create.naming_schema',
                        'You must specify either name or naming schema')

        if verrors:
            raise verrors

        vmware_context = None
        if data['vmware_sync']:
            vmware_context = self.middleware.call_sync('vmware.snapshot_begin',
                                                       dataset, recursive)

        try:
            with libzfs.ZFS() as zfs:
                ds = zfs.get_dataset(dataset)
                ds.snapshot(f'{dataset}@{name}',
                            recursive=recursive,
                            fsopts=properties)

                if vmware_context and vmware_context['vmsynced']:
                    ds.properties['freenas:vmsynced'] = libzfs.ZFSUserProperty(
                        'Y')

            self.logger.info(f"Snapshot taken: {dataset}@{name}")
        except libzfs.ZFSException as err:
            self.logger.error(f'Failed to snapshot {dataset}@{name}: {err}')
            raise CallError(f'Failed to snapshot {dataset}@{name}: {err}')
        else:
            return self.middleware.call_sync('zfs.snapshot.get_instance',
                                             f'{dataset}@{name}')
        finally:
            if vmware_context:
                self.middleware.call_sync('vmware.snapshot_end',
                                          vmware_context)

    @accepts(Str('id'),
             Dict(
                 'snapshot_update',
                 List(
                     'user_properties_update',
                     items=[
                         Dict(
                             'user_property',
                             Str('key',
                                 required=True,
                                 validators=[Match(r'.*:.*')]),
                             Str('value'),
                             Bool('remove'),
                         )
                     ],
                 ),
             ))
    def do_update(self, snap_id, data):
        verrors = ValidationErrors()
        props = data['user_properties_update']
        for index, prop in enumerate(props):
            if prop.get('remove') and 'value' in prop:
                verrors.add(
                    f'snapshot_update.user_properties_update.{index}.remove',
                    'Must not be set when value is specified')
        verrors.check()

        try:
            with libzfs.ZFS() as zfs:
                snap = zfs.get_snapshot(snap_id)
                user_props = self.middleware.call_sync(
                    'pool.dataset.get_create_update_user_props', props, True)
                self.middleware.call_sync(
                    'zfs.dataset.update_zfs_object_props', user_props, snap)
        except libzfs.ZFSException as e:
            raise CallError(str(e))
        else:
            return self.middleware.call_sync('zfs.snapshot.get_instance',
                                             snap_id)

    @accepts(
        Dict('snapshot_remove', Str('dataset', required=True),
             Str('name', required=True), Bool('defer_delete')))
    def remove(self, data):
        """
        Remove a snapshot from a given dataset.

        Returns:
            bool: True if succeed otherwise False.
        """
        self.logger.debug(
            'zfs.snapshot.remove is deprecated, use zfs.snapshot.delete')
        snapshot_name = data['dataset'] + '@' + data['name']
        try:
            self.do_delete(snapshot_name,
                           {'defer': data.get('defer_delete') or False})
        except Exception:
            return False
        return True

    @accepts(
        Str('id'),
        Dict(
            'options',
            Bool('defer', default=False),
            Bool('recursive', default=False),
        ),
    )
    def do_delete(self, id, options):
        """
        Delete snapshot of name `id`.

        `options.defer` will defer the deletion of snapshot.
        """
        try:
            with libzfs.ZFS() as zfs:
                snap = zfs.get_snapshot(id)
                snap.delete(defer=options['defer'],
                            recursive=options['recursive'])
        except libzfs.ZFSException as e:
            raise CallError(str(e))
        else:
            return True

    @accepts(
        Dict('snapshot_clone', Str('snapshot', required=True, empty=False),
             Str('dataset_dst', required=True, empty=False),
             Dict(
                 'dataset_properties',
                 additional_attrs=True,
             )))
    def clone(self, data):
        """
        Clone a given snapshot to a new dataset.

        Returns:
            bool: True if succeed otherwise False.
        """

        snapshot = data.get('snapshot', '')
        dataset_dst = data.get('dataset_dst', '')
        props = data['dataset_properties']

        try:
            with libzfs.ZFS() as zfs:
                snp = zfs.get_snapshot(snapshot)
                snp.clone(dataset_dst, props)
                dataset = zfs.get_dataset(dataset_dst)
                if dataset.type.name == 'FILESYSTEM':
                    dataset.mount_recursive()
            self.logger.info("Cloned snapshot {0} to dataset {1}".format(
                snapshot, dataset_dst))
            return True
        except libzfs.ZFSException as err:
            self.logger.error("{0}".format(err))
            raise CallError(f'Failed to clone snapshot: {err}')

    @accepts(
        Str('id'),
        Dict(
            'options',
            Bool('recursive', default=False),
            Bool('recursive_clones', default=False),
            Bool('force', default=False),
            Bool('recursive_rollback', default=False),
        ),
    )
    def rollback(self, id, options):
        """
        Rollback to a given snapshot `id`.

        `options.recursive` will destroy any snapshots and bookmarks more recent than the one
        specified.

        `options.recursive_clones` is just like `recursive` but will also destroy any clones.

        `options.force` will force unmount of any clones.

        `options.recursive_rollback` will do a complete recursive rollback of each child snapshots for `id`. If
        any child does not have specified snapshot, this operation will fail.
        """
        args = []
        if options['force']:
            args += ['-f']
        if options['recursive']:
            args += ['-r']
        if options['recursive_clones']:
            args += ['-R']

        if options['recursive_rollback']:
            dataset, snap_name = id.rsplit('@', 1)
            datasets = set({
                f'{ds["id"]}@{snap_name}'
                for ds in self.middleware.call_sync('zfs.dataset.query', [[
                    'OR', [['id', '^', f'{dataset}/'], ['id', '=', dataset]]
                ]])
            })

            for snap in filter(
                    lambda sn: self.middleware.call_sync(
                        'zfs.snapshot.query', [['id', '=', sn]]), datasets):
                self.rollback_impl(args, snap)

        else:
            self.rollback_impl(args, id)

    @private
    def rollback_impl(self, args, id):
        try:
            subprocess.run(
                ['zfs', 'rollback'] + args + [id],
                text=True,
                capture_output=True,
                check=True,
            )
        except subprocess.CalledProcessError as e:
            raise CallError(f'Failed to rollback snapshot: {e.stderr.strip()}')
示例#15
0
class S3Service(SystemServiceService):
    class Config:
        service = "s3"
        datastore_prefix = "s3_"
        datastore_extend = "s3.config_extend"
        cli_namespace = "service.s3"

    ENTRY = Dict(
        's3_entry',
        Str('bindip', required=True),
        Int('bindport', validators=[Range(min=1, max=65535)], required=True),
        Str('access_key', max_length=20, required=True),
        Str('secret_key', max_length=40, required=True),
        Bool('browser', required=True),
        Str('storage_path', required=True),
        Int('certificate', null=True, required=True),
        Int('id', required=True),
    )

    @accepts()
    @returns(Dict('s3_bindip_choices', additional_attrs=True))
    async def bindip_choices(self):
        """
        Return ip choices for S3 service to use.
        """
        return {
            d['address']: d['address']
            for d in await self.middleware.call('interface.ip_in_use', {
                'static': True,
                'any': True
            })
        }

    @private
    async def config_extend(self, s3):
        s3['storage_path'] = s3.pop('disks', None)
        s3.pop('mode', None)
        if s3.get('certificate'):
            s3['certificate'] = s3['certificate']['id']
        return s3

    @accepts(
        Patch(
            's3_entry',
            's3_update',
            ('edit', {
                'name':
                'access_key',
                'method':
                lambda x: setattr(x, 'validators', [
                    Match(r'^\w+$',
                          explanation=
                          'Should only contain alphanumeric characters')
                ])
            }),
            ('edit', {
                'name':
                'secret_key',
                'method':
                lambda x: setattr(x, 'validators', [
                    Match(r'^\w+$',
                          explanation=
                          'Should only contain alphanumeric characters')
                ])
            }),
            ('rm', {
                'name': 'id'
            }),
            ('attr', {
                'update': True
            }),
        ))
    async def do_update(self, data):
        """
        Update S3 Service Configuration.

        `access_key` must only contain alphanumeric characters and should be between 5 and 20 characters.

        `secret_key` must only contain alphanumeric characters and should be between 8 and 40 characters.

        `browser` when set, enables the web user interface for the S3 Service.

        `certificate` is a valid certificate id which exists in the system. This is used to enable secure
        S3 connections.
        """
        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        for attr, minlen, maxlen in (
            ('access_key', 5, 20),
            ('secret_key', 8, 40),
        ):
            curlen = len(new.get(attr, ''))
            if curlen < minlen or curlen > maxlen:
                verrors.add(
                    f's3_update.{attr}',
                    f'Attribute should be {minlen} to {maxlen} in length')

        if not new['storage_path'] and await self.middleware.call(
                'service.started', 's3'):
            verrors.add('s3_update.storage_path',
                        'S3 must be stopped before unsetting storage path.')
        elif new['storage_path']:
            await check_path_resides_within_volume(verrors, self.middleware,
                                                   's3_update.storage_path',
                                                   new['storage_path'])

            if not verrors:
                if new['storage_path'].rstrip('/').count('/') < 3:
                    verrors.add(
                        's3_update.storage_path',
                        'Top level datasets are not allowed. i.e /mnt/tank/dataset is allowed'
                    )
                else:
                    # If the storage_path does not exist, let's create it
                    if not os.path.exists(new['storage_path']):
                        os.makedirs(new['storage_path'])

        if new['certificate']:
            verrors.extend(
                (await
                 self.middleware.call('certificate.cert_services_validation',
                                      new['certificate'],
                                      's3_update.certificate', False)))

        if new['bindip'] not in await self.bindip_choices():
            verrors.add('s3_update.bindip',
                        'Please provide a valid ip address')

        if verrors:
            raise verrors

        new['disks'] = new.pop('storage_path')

        await self._update_service(old, new)

        if new['disks'] and (await self.middleware.call(
                'filesystem.stat', new['disks']))['user'] != 'minio':
            await self.middleware.call(
                'filesystem.setperm', {
                    'path':
                    new['disks'],
                    'mode':
                    str(775),
                    'uid': (await self.middleware.call(
                        'dscache.get_uncached_user', 'minio'))['pw_uid'],
                    'gid': (await self.middleware.call(
                        'dscache.get_uncached_group', 'minio'))['gr_gid'],
                    'options': {
                        'recursive': True,
                        'traverse': False
                    }
                })

        return await self.config()
示例#16
0
class BootEnvService(CRUDService):
    @filterable
    def query(self, filters=None, options=None):
        """
        Query all Boot Environments with `query-filters` and `query-options`.
        """
        results = []
        for clone in Update.ListClones():
            clone['id'] = clone['name']
            results.append(clone)
        return filter_list(results, filters, options)

    @item_method
    @accepts(Str('id'))
    def activate(self, oid):
        """
        Activates boot environment `id`.
        """
        return Update.ActivateClone(oid)

    @item_method
    @accepts(Str('id'), Dict(
        'attributes',
        Bool('keep', default=False),
    ))
    def set_attribute(self, oid, attrs):
        """
        Sets attributes boot environment `id`.

        Currently only `keep` attribute is allowed.
        """
        clone = Update.FindClone(oid)
        return Update.CloneSetAttr(clone, **attrs)

    @accepts(
        Dict(
            'bootenv_create',
            Str('name', required=True, validators=[Match(RE_BE_NAME)]),
            Str('source'),
        ))
    def do_create(self, data):
        """
        Create a new boot environment using `name`.

        If a new boot environment is desired which is a clone of another boot environment, `source` can be passed.
        Then, a new boot environment of `name` is created using boot environment `source` by cloning it.

        Ensure that `name` and `source` are valid boot environment names.
        """
        verrors = ValidationErrors()
        self._clean_be_name(verrors, 'bootenv_create', data['name'])
        if verrors:
            raise verrors

        kwargs = {}
        source = data.get('source')
        if source:
            kwargs['bename'] = source
        clone = Update.CreateClone(data['name'], **kwargs)
        if clone is False:
            raise CallError('Failed to create boot environment')
        return data['name']

    @accepts(Str('id'),
             Dict(
                 'bootenv_update',
                 Str('name', required=True, validators=[Match(RE_BE_NAME)]),
             ))
    def do_update(self, oid, data):
        """
        Update `id` boot environment name with a new provided valid `name`.
        """

        verrors = ValidationErrors()
        self._clean_be_name(verrors, 'bootenv_update', data['name'])
        if verrors:
            raise verrors

        if not Update.RenameClone(oid, data['name']):
            raise CallError('Failed to update boot environment')
        return data['name']

    def _clean_be_name(self, verrors, schema, name):
        beadm_names = subprocess.Popen(
            "beadm list | awk '{print $7}'",
            shell=True,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
            encoding='utf8',
        ).communicate()[0].split('\n')
        if name in filter(None, beadm_names):
            verrors.add(f'{schema}.name', f'The name "{name}" already exists',
                        errno.EEXIST)

    @accepts(Str('id'))
    @job(lock=lambda args: f'bootenv_delete_{args[0]}')
    def do_delete(self, job, oid):
        """
        Delete `id` boot environment. This removes the clone from the system.
        """
        return Update.DeleteClone(oid)
示例#17
0
class CatalogService(CRUDService):
    class Config:
        datastore = 'services.catalog'
        datastore_extend = 'catalog.catalog_extend'
        datastore_extend_context = 'catalog.catalog_extend_context'
        datastore_primary_key = 'label'
        cli_namespace = 'app.catalog'

    @private
    async def catalog_extend_context(self, extra):
        k8s_dataset = (await
                       self.middleware.call('kubernetes.config'))['dataset']
        catalogs_dir = os.path.join(
            '/mnt', k8s_dataset,
            'catalogs') if k8s_dataset else f'{TMP_IX_APPS_DIR}/catalogs'
        return {
            'catalogs_dir': catalogs_dir,
            'extra': extra or {},
        }

    @private
    async def catalog_extend(self, catalog, context):
        catalog.update({
            'location':
            os.path.join(
                context['catalogs_dir'],
                convert_repository_to_path(catalog['repository'],
                                           catalog['branch'])),
            'id':
            catalog['label'],
        })
        extra = context['extra']
        if extra.get('item_details'):
            try:
                catalog['trains'] = await self.middleware.call(
                    'catalog.items',
                    catalog['label'],
                    {'cache': extra.get('cache', True)},
                )
            except Exception:
                # We do not want this to fail as it will block `catalog.query` otherwise. The error would
                # already be logged as this is being called periodically as well.
                catalog.update({
                    'trains': {},
                    'healthy': False,
                })
            else:
                catalog['healthy'] = all(
                    app['healthy'] for train in catalog['trains']
                    for app in catalog['trains'][train].values())
        return catalog

    @private
    async def common_validation(self, catalog, schema, data):
        found_trains = set(catalog['trains'])
        diff = set(data['preferred_trains']) - found_trains
        verrors = ValidationErrors()
        if diff:
            verrors.add(
                f'{schema}.preferred_trains',
                f'{", ".join(diff)} trains were not found in catalog.')
        if not data['preferred_trains']:
            verrors.add(
                f'{schema}.preferred_trains',
                'At least 1 preferred train must be specified for a catalog.')

        verrors.check()

    @accepts(
        Dict(
            'catalog_create',
            Bool('force', default=False),
            List('preferred_trains'),
            Str('label',
                required=True,
                empty=False,
                validators=[Match(r'^\w+[\w.-]*$')],
                max_length=60),
            Str('repository', required=True, empty=False),
            Str('branch', default='master'),
            register=True,
        ))
    async def do_create(self, data):
        """
        `catalog_create.preferred_trains` specifies trains which will be displayed in the UI directly for a user.
        """
        verrors = ValidationErrors()
        # We normalize the label
        data['label'] = data['label'].upper()

        if await self.query([['id', '=', data['label']]]):
            verrors.add('catalog_create.label',
                        'A catalog with specified label already exists',
                        errno=errno.EEXIST)

        if await self.query([['repository', '=', data['repository']],
                             ['branch', '=', data['branch']]]):
            for k in ('repository', 'branch'):
                verrors.add(
                    f'catalog_create.{k}',
                    'A catalog with same repository/branch already exists',
                    errno=errno.EEXIST)

        verrors.check()

        if not data['preferred_trains']:
            data['preferred_trains'] = ['charts']

        if not data.pop('force'):
            # We will validate the catalog now to ensure it's valid wrt contents / format
            path = os.path.join(
                TMP_IX_APPS_DIR, 'validate_catalogs',
                convert_repository_to_path(data['repository'], data['branch']))
            try:
                await self.middleware.call('catalog.update_git_repository', {
                    **data, 'location': path
                }, True)
                await self.middleware.call(
                    'catalog.validate_catalog_from_path', path)
                await self.common_validation(
                    {
                        'trains':
                        await self.middleware.call('catalog.get_trains', path)
                    }, 'catalog_create', data)

            finally:
                await self.middleware.run_in_thread(shutil.rmtree,
                                                    path,
                                                    ignore_errors=True)

        await self.middleware.call('datastore.insert', self._config.datastore,
                                   data)

        asyncio.ensure_future(
            self.middleware.call('catalog.sync', data['label']))

        return await self.get_instance(data['label'])

    @accepts(Str('id'),
             Dict('catalog_update', List('preferred_trains'), update=True))
    async def do_update(self, id, data):
        catalog = await self.query([['id', '=', id]], {
            'extra': {
                'item_details': True
            },
            'get': True
        })
        await self.common_validation(catalog, 'catalog_update', data)

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, data)

        return await self.get_instance(id)

    @accepts(
        Str('id'), )
    def do_delete(self, id):
        catalog = self.middleware.call_sync('catalog.get_instance', id)
        if catalog['builtin']:
            raise CallError('Builtin catalogs cannot be deleted')

        ret = self.middleware.call_sync('datastore.delete',
                                        self._config.datastore, id)

        if os.path.exists(catalog['location']):
            shutil.rmtree(catalog['location'], ignore_errors=True)

        # Let's delete any unhealthy alert if we had one
        self.middleware.call_sync('alert.oneshot_delete', 'CatalogNotHealthy',
                                  id)

        return ret

    @private
    async def official_catalog_label(self):
        return OFFICIAL_LABEL
示例#18
0
class TFTPService(SystemServiceService):
    class Config:
        service = "tftp"
        datastore_prefix = "tftp_"
        cli_namespace = "service.tftp"

    ENTRY = Dict(
        'tftp_entry',
        Bool('newfiles', required=True),
        Str('directory', required=True),
        Str('host', validators=[IpAddress()], required=True),
        Int('port', validators=[Port()], required=True),
        Str('options', required=True),
        Str('umask', required=True, validators=[Match(r'^[0-7]{3}$')]),
        Str('username', required=True),
        Int('id', required=True),
    )

    @accepts()
    @returns(Dict('tftp_host_choices', additional_attrs=True))
    async def host_choices(self):
        """
        Return host choices for TFTP service to use.
        """
        return {
            d['address']: d['address']
            for d in await self.middleware.call('interface.ip_in_use', {
                'static': True,
                'any': True
            })
        }

    @accepts(
        Patch(
            'tftp_entry',
            'tftp_update',
            ('rm', {
                'name': 'id'
            }),
            ('replace', Dir('directory')),
            ('attr', {
                'update': True
            }),
        ))
    async def do_update(self, data):
        """
        Update TFTP Service Configuration.

        `newfiles` when set enables network devices to send files to the system.

        `username` sets the user account which will be used to access `directory`. It should be ensured `username`
        has access to `directory`.
        """
        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        if new["directory"]:
            await check_path_resides_within_volume(verrors, self.middleware,
                                                   "tftp_update.directory",
                                                   new["directory"])

        if new['host'] not in await self.host_choices():
            verrors.add('tftp_update.host',
                        'Please provide a valid ip address')

        if verrors:
            raise verrors

        await self._update_service(old, new)

        return await self.config()
示例#19
0
class ChartReleaseService(CRUDService):
    class Config:
        datastore_primary_key_type = 'string'
        namespace = 'chart.release'
        cli_namespace = 'app.chart_release'

    ENTRY = Dict(
        'chart_release_entry',
        Str('name', required=True),
        Dict('info', additional_attrs=True),
        Dict('config', additional_attrs=True),
        List('hooks'),
        Int('version', required=True, description='Version of chart release'),
        Str('namespace', required=True),
        Dict(
            'chart_metadata',
            Str('name', required=True, description='Name of application'),
            Str('version', required=True,
                description='Version of application'),
            Str('latest_chart_version',
                required=True,
                description='Latest available version of application'),
            additional_attrs=True,
        ),
        Str('id', required=True),
        Str('catalog', required=True),
        Str('catalog_train', required=True),
        Str('path', required=True),
        Str('dataset', required=True),
        Str('status', required=True),
        List('used_ports',
             items=[
                 Dict(
                     'port',
                     Int('port', required=True),
                     Str('protocol', required=True),
                 )
             ],
             required=True),
        Dict(
            'pod_status',
            Int('available', required=True),
            Int('desired', required=True),
            required=True,
        ),
        Bool('update_available', required=True),
        Str('human_version',
            required=True,
            description='Human friendly version identifier for chart release'),
        Str('human_latest_version',
            required=True,
            description=
            'Human friendly latest available version identifier for chart release'
            ),
        Bool(
            'container_images_update_available',
            required=True,
            description=
            'Will be set when any image(s) being used in the chart release has a newer version available'
        ),
        Dict('portals', additional_attrs=True),
        Dict('chart_schema', null=True, additional_attrs=True),
        Dict('history', additional_attrs=True),
        Dict(
            'resources',
            Dict('storage_class', additional_attrs=True),
            List('persistent_volumes'),
            List('host_path_volumes'),
            Dict('container_images', additional_attrs=True),
            List('truenas_certificates', items=[Int('certificate_id')]),
            List('truenas_certificate_authorities',
                 items=[Int('certificate_authority_id')]),
            *[List(r.value) for r in Resources],
        ),
    )

    @filterable
    async def query(self, filters, options):
        """
        Query available chart releases.

        `query-options.extra.retrieve_resources` is a boolean when set will retrieve existing kubernetes resources
        in the chart namespace.

        `query-options.extra.history` is a boolean when set will retrieve all chart version upgrades
        for a chart release.

        `query-options.extra.include_chart_schema` is a boolean when set will retrieve the schema being used by
        the chart release in question.

        `query-options.extra.resource_events` is a boolean when set will retrieve individual events of each resource.
        This only has effect if `query-options.extra.retrieve_resources` is set.
        """
        k8s_config = await self.middleware.call('kubernetes.config')
        if not await self.middleware.call(
                'service.started', 'kubernetes') or not k8s_config['dataset']:
            # We use filter_list here to ensure that `options` are respected, options like get: true
            return filter_list([], filters, options)

        update_catalog_config = {}
        catalogs = await self.middleware.call(
            'catalog.query', [], {'extra': {
                'item_details': True
            }})
        container_images = {}
        for image in await self.middleware.call('container.image.query'):
            for tag in image['repo_tags']:
                if not container_images.get(tag):
                    container_images[tag] = image

        for catalog in catalogs:
            update_catalog_config[catalog['label']] = {}
            for train in catalog['trains']:
                train_data = {}
                for catalog_item in catalog['trains'][train]:
                    max_version = catalog['trains'][train][catalog_item][
                        'latest_version'] or '0.0.0'
                    app_version = catalog['trains'][train][catalog_item][
                        'latest_app_version'] or '0.0.0'
                    train_data[catalog_item] = {
                        'chart_version': parse_version(max_version),
                        'app_version': app_version,
                    }

                update_catalog_config[catalog['label']][train] = train_data

        k8s_node_ip = await self.middleware.call('kubernetes.node_ip')
        options = options or {}
        extra = copy.deepcopy(options.get('extra', {}))
        retrieve_schema = extra.get('include_chart_schema')
        get_resources = extra.get('retrieve_resources')
        get_history = extra.get('history')
        if retrieve_schema:
            questions_context = await self.middleware.call(
                'catalog.get_normalised_questions_context')
        else:
            questions_context = None

        if filters and len(filters) == 1 and filters[0][:2] == ['id', '=']:
            extra['namespace_filter'] = [
                'metadata.namespace', '=',
                f'{CHART_NAMESPACE_PREFIX}{filters[0][-1]}'
            ]
            resources_filters = [extra['namespace_filter']]
        else:
            resources_filters = [[
                'metadata.namespace', '^', CHART_NAMESPACE_PREFIX
            ]]

        ports_used = collections.defaultdict(list)
        for node_port_svc in await self.middleware.call(
                'k8s.service.query',
            [['spec.type', '=', 'NodePort']] + resources_filters):
            release_name = node_port_svc['metadata']['namespace'][
                len(CHART_NAMESPACE_PREFIX):]
            ports_used[release_name].extend([{
                'port': p['node_port'],
                'protocol': p['protocol']
            } for p in node_port_svc['spec']['ports']])

        storage_classes = collections.defaultdict(lambda: None)
        for storage_class in await self.middleware.call(
                'k8s.storage_class.query'):
            storage_classes[storage_class['metadata']['name']] = storage_class

        persistent_volumes = collections.defaultdict(list)

        # If the chart release was consuming any PV's, they would have to be manually removed from k8s database
        # because of chart release reclaim policy being retain
        for pv in await self.middleware.call('k8s.pv.query', [[
                'spec.csi.volume_attributes.openebs\\.io/poolname', '^',
                f'{os.path.join(k8s_config["dataset"], "releases")}/'
        ]]):
            dataset = pv['spec']['csi']['volume_attributes'][
                'openebs.io/poolname']
            rl = dataset.split('/', 4)
            if len(rl) > 4:
                persistent_volumes[rl[3]].append(pv)

        resources = {r.value: collections.defaultdict(list) for r in Resources}
        workload_status = collections.defaultdict(lambda: {
            'desired': 0,
            'available': 0
        })

        for resource in Resources:
            for r_data in await self.middleware.call(
                    f'k8s.{resource.name.lower()}.query', resources_filters,
                {'extra': {
                    'events': extra.get('resource_events', False)
                }}):
                release_name = r_data['metadata']['namespace'][
                    len(CHART_NAMESPACE_PREFIX):]
                resources[resource.value][release_name].append(r_data)
                if resource in (Resources.DEPLOYMENT, Resources.STATEFULSET):
                    workload_status[release_name]['desired'] += (
                        r_data['status']['replicas'] or 0)
                    workload_status[release_name]['available'] += (
                        r_data['status']['ready_replicas'] or 0)

        release_secrets = await self.middleware.call(
            'chart.release.releases_secrets', extra)
        releases = []
        for name, release in release_secrets.items():
            config = {}
            release_data = release['releases'].pop(0)
            cur_version = release_data['chart_metadata']['version']

            for rel_data in filter(
                    lambda r: r['chart_metadata']['version'] == cur_version,
                    itertools.chain(reversed(release['releases']),
                                    [release_data])):
                config.update(rel_data['config'])

            pods_status = workload_status[name]
            pod_diff = pods_status['available'] - pods_status['desired']
            status = 'ACTIVE'
            if pod_diff == 0 and pods_status['desired'] == 0:
                status = 'STOPPED'
            elif pod_diff < 0:
                status = 'DEPLOYING'

            # We will retrieve all host ports being used
            for pod in filter(lambda p: p['status']['phase'] == 'Running',
                              resources[Resources.POD.value][name]):
                for container in pod['spec']['containers']:
                    ports_used[name].extend([{
                        'port': p['host_port'],
                        'protocol': p['protocol']
                    } for p in (container['ports'] or []) if p['host_port']])

            release_data.update({
                'path':
                os.path.join('/mnt', k8s_config['dataset'], 'releases', name),
                'dataset':
                os.path.join(k8s_config['dataset'], 'releases', name),
                'config':
                config,
                'status':
                status,
                'used_ports':
                ports_used[name],
                'pod_status':
                pods_status,
            })

            release_resources = {
                'storage_class':
                storage_classes[get_storage_class_name(name)],
                'persistent_volumes':
                persistent_volumes[name],
                'host_path_volumes':
                await
                self.host_path_volumes(resources[Resources.POD.value][name]),
                **{r.value: resources[r.value][name]
                   for r in Resources},
            }
            release_resources = {
                **release_resources,
                'container_images': {
                    i_name: {
                        'id':
                        image_details.get('id'),
                        'update_available':
                        image_details.get('update_available', False)
                    }
                    for i_name, image_details in map(
                        lambda i: (i, container_images.get(i, {})),
                        list(
                            set(c['image']
                                for workload_type in ('deployments',
                                                      'statefulsets') for
                                workload in release_resources[workload_type]
                                for c in workload['spec']['template']['spec']
                                ['containers'])))
                },
                'truenas_certificates': [
                    v['id'] for v in release_data['config'].get(
                        'ixCertificates', {}).values()
                ],
                'truenas_certificate_authorities': [
                    v['id'] for v in release_data['config'].get(
                        'ixCertificateAuthorities', {}).values()
                ],
            }
            if get_resources:
                release_data['resources'] = release_resources

            if get_history:
                release_data['history'] = release['history']

            current_version = parse_version(
                release_data['chart_metadata']['version'])
            catalog_version_dict = update_catalog_config.get(
                release_data['catalog'],
                {}).get(release_data['catalog_train'],
                        {}).get(release_data['chart_metadata']['name'], {})
            latest_version = catalog_version_dict.get('chart_version',
                                                      current_version)
            latest_app_version = catalog_version_dict.get('app_version')
            release_data['update_available'] = latest_version > current_version

            app_version = None
            if release_data['chart_metadata']['name'] == 'ix-chart':
                image_config = release_data['config'].get('image') or {}
                if all(k in image_config for k in ('tag', 'repository')):
                    # TODO: Let's see if we can find sane versioning for `latest` from upstream
                    if image_config['tag'] == 'latest':
                        app_version = f'{image_config["repository"]}:{image_config["tag"]}'
                    else:
                        app_version = image_config['tag']
                # Latest app version for ix-chart remains same
                latest_app_version = app_version
            else:
                app_version = release_data['chart_metadata'].get('appVersion')

            for key, app_v, c_v in (
                ('human_version', app_version, current_version),
                ('human_latest_version', latest_app_version, latest_version),
            ):
                if app_v:
                    release_data[key] = f'{app_v}_{c_v}'
                else:
                    release_data[key] = str(c_v)

            if retrieve_schema:
                chart_path = os.path.join(
                    release_data['path'], 'charts',
                    release_data['chart_metadata']['version'])
                if os.path.exists(chart_path):
                    release_data['chart_schema'] = await self.middleware.call(
                        'catalog.item_version_details', chart_path,
                        questions_context)
                else:
                    release_data['chart_schema'] = None

            release_data['container_images_update_available'] = any(
                details['update_available']
                for details in release_resources['container_images'].values())
            release_data['chart_metadata']['latest_chart_version'] = str(
                latest_version)
            release_data['portals'] = await self.middleware.call(
                'chart.release.retrieve_portals_for_chart_release',
                release_data, k8s_node_ip)

            if 'icon' not in release_data['chart_metadata']:
                release_data['chart_metadata']['icon'] = None

            releases.append(release_data)

        return filter_list(releases, filters, options)

    @private
    def retrieve_portals_for_chart_release(self, release_data, node_ip=None):
        questions_yaml_path = os.path.join(
            release_data['path'], 'charts',
            release_data['chart_metadata']['version'], 'questions.yaml')
        if not os.path.exists(questions_yaml_path):
            return {}

        with open(questions_yaml_path, 'r') as f:
            portals = yaml.safe_load(f.read()).get('portals') or {}

        if not portals:
            return portals

        if not node_ip:
            node_ip = self.middleware.call_sync('kubernetes.node_ip')

        def tag_func(key):
            return self.parse_tag(release_data, key, node_ip)

        cleaned_portals = {}
        for portal_type, schema in portals.items():
            t_portals = []
            path = schema.get('path') or '/'
            for protocol in filter(bool, map(tag_func, schema['protocols'])):
                for host in filter(bool, map(tag_func, schema['host'])):
                    for port in filter(bool, map(tag_func, schema['ports'])):
                        t_portals.append(f'{protocol}://{host}:{port}{path}')

            cleaned_portals[portal_type] = t_portals

        return cleaned_portals

    @private
    def parse_tag(self, release_data, tag, node_ip):
        tag = self.parse_k8s_resource_tag(release_data, tag)
        if not tag:
            return
        if tag == '$node_ip':
            return node_ip
        elif tag.startswith('$variable-'):
            return get(release_data['config'], tag[len('$variable-'):])

        return tag

    @private
    def parse_k8s_resource_tag(self, release_data, tag):
        # Format expected here is "$kubernetes-resource_RESOURCE-TYPE_RESOURCE-NAME_KEY-NAME"
        if not tag.startswith('$kubernetes-resource'):
            return tag

        if tag.count('_') < 3:
            return

        _, resource_type, resource_name, key = tag.split('_', 3)
        if resource_type not in ('configmap', 'secret'):
            return

        resource = self.middleware.call_sync(
            f'k8s.{resource_type}.query',
            [['metadata.namespace', '=', release_data['namespace']],
             ['metadata.name', '=', resource_name]])
        if not resource or 'data' not in resource[0] or not isinstance(
                resource[0]['data'].get(key), (int, str)):
            # Chart creator did not create the resource or we have a malformed
            # secret/configmap, nothing we can do on this end
            return
        else:
            value = resource[0]['data'][key]

        if resource_type == 'secret':
            value = base64.b64decode(value)

        return str(value)

    @private
    async def host_path_volumes(self, pods):
        host_path_volumes = []
        for pod in pods:
            for volume in filter(lambda v: v.get('host_path'),
                                 pod['spec']['volumes'] or []):
                host_path_volumes.append(copy.deepcopy(volume))
        return host_path_volumes

    @private
    async def normalise_and_validate_values(self,
                                            item_details,
                                            values,
                                            update,
                                            release_ds,
                                            release_data=None):
        dict_obj = await self.middleware.call(
            'chart.release.validate_values',
            item_details,
            values,
            update,
            release_data,
        )
        return await self.middleware.call(
            'chart.release.get_normalised_values', dict_obj, values, update, {
                'release': {
                    'name': release_ds.split('/')[-1],
                    'dataset': release_ds,
                    'path': os.path.join('/mnt', release_ds),
                },
                'actions': [],
            })

    @private
    async def perform_actions(self, context):
        for action in context['actions']:
            await self.middleware.call(f'chart.release.{action["method"]}',
                                       *action['args'])

    @accepts(
        Dict(
            'chart_release_create',
            Dict('values', additional_attrs=True),
            Str('catalog', required=True),
            Str('item', required=True),
            Str('release_name',
                required=True,
                validators=[Match(r'^[a-z0-9]([-a-z0-9]*[a-z0-9])?$')]),
            Str('train', default='charts'),
            Str('version', default='latest'),
        ))
    @job(lock=lambda args: f'chart_release_create_{args[0]["release_name"]}')
    async def do_create(self, job, data):
        """
        Create a chart release for a catalog item.

        `release_name` is the name which will be used to identify the created chart release.

        `catalog` is a valid catalog id where system will look for catalog `item` details.

        `train` is which train to look for under `catalog` i.e stable / testing etc.

        `version` specifies the catalog `item` version.

        `values` is configuration specified for the catalog item version in question which will be used to
        create the chart release.
        """
        await self.middleware.call('kubernetes.validate_k8s_setup')
        if await self.query([['id', '=', data['release_name']]]):
            raise CallError(
                f'Chart release with {data["release_name"]} already exists.',
                errno=errno.EEXIST)

        catalog = await self.middleware.call('catalog.get_instance',
                                             data['catalog'])
        item_details = await self.middleware.call(
            'catalog.get_item_details', data['item'], {
                'catalog': data['catalog'],
                'train': data['train'],
            })
        version = data['version']
        if version == 'latest':
            version = await self.middleware.call(
                'chart.release.get_latest_version_from_item_versions',
                item_details['versions'])

        if version not in item_details['versions']:
            raise CallError(
                f'Unable to locate "{data["version"]}" catalog item version.',
                errno=errno.ENOENT)

        item_details = item_details['versions'][version]
        await self.middleware.call('catalog.version_supported_error_check',
                                   item_details)

        k8s_config = await self.middleware.call('kubernetes.config')
        release_ds = os.path.join(k8s_config['dataset'], 'releases',
                                  data['release_name'])
        # The idea is to validate the values provided first and if it passes our validation test, we
        # can move forward with setting up the datasets and installing the catalog item
        new_values = data['values']
        new_values, context = await self.normalise_and_validate_values(
            item_details, new_values, False, release_ds)

        job.set_progress(25, 'Initial Validation completed')

        # Now that we have completed validation for the item in question wrt values provided,
        # we will now perform the following steps
        # 1) Create release datasets
        # 2) Copy chart version into release/charts dataset
        # 3) Install the helm chart
        # 4) Create storage class
        try:
            job.set_progress(30, 'Creating chart release datasets')

            for dataset in await self.release_datasets(release_ds):
                if not await self.middleware.call('zfs.dataset.query',
                                                  [['id', '=', dataset]]):
                    await self.middleware.call('zfs.dataset.create', {
                        'name': dataset,
                        'type': 'FILESYSTEM'
                    })
                    await self.middleware.call('zfs.dataset.mount', dataset)

            job.set_progress(45, 'Created chart release datasets')

            chart_path = os.path.join('/mnt', release_ds, 'charts', version)
            await self.middleware.run_in_thread(
                lambda: shutil.copytree(item_details['location'], chart_path))

            job.set_progress(55, 'Completed setting up chart release')
            # Before finally installing the release, we will perform any actions which might be required
            # for the release to function like creating/deleting ix-volumes
            await self.perform_actions(context)

            namespace_name = get_namespace(data['release_name'])

            job.set_progress(65,
                             f'Creating {namespace_name} for chart release')
            namespace_body = {
                'metadata': {
                    'labels': {
                        'catalog': data['catalog'],
                        'catalog_train': data['train'],
                        'catalog_branch': catalog['branch'],
                    },
                    'name': namespace_name,
                }
            }
            if not await self.middleware.call(
                    'k8s.namespace.query',
                [['metadata.name', '=', namespace_name]]):
                await self.middleware.call('k8s.namespace.create',
                                           {'body': namespace_body})
            else:
                await self.middleware.call('k8s.namespace.update',
                                           namespace_name,
                                           {'body': namespace_body})

            job.set_progress(75, 'Installing Catalog Item')

            new_values[CONTEXT_KEY_NAME].update({
                **get_action_context(data['release_name']),
                'operation':
                'INSTALL',
                'isInstall':
                True,
            })

            await self.middleware.call(
                'chart.release.create_update_storage_class_for_chart_release',
                data['release_name'], os.path.join(release_ds, 'volumes'))

            # We will install the chart now and force the installation in an ix based namespace
            # https://github.com/helm/helm/issues/5465#issuecomment-473942223
            await self.middleware.call('chart.release.helm_action',
                                       data['release_name'], chart_path,
                                       new_values, 'install')
        except Exception:
            # Do a rollback here
            # Let's uninstall the release as well if it did get installed ( it is possible this might have happened )
            if await self.middleware.call('chart.release.query',
                                          [['id', '=', data['release_name']]]):
                delete_job = await self.middleware.call(
                    'chart.release.delete', data['release_name'])
                await delete_job.wait()
                if delete_job.error:
                    self.logger.error(
                        'Failed to uninstall helm chart release: %s',
                        delete_job.error)
            else:
                await self.post_remove_tasks(data['release_name'])

            raise
        else:
            await self.middleware.call('chart.release.refresh_events_state',
                                       data['release_name'])
            job.set_progress(100, 'Chart release created')
            return await self.get_instance(data['release_name'])

    @accepts(Str('chart_release'),
             Dict(
                 'chart_release_update',
                 Dict('values', additional_attrs=True),
             ))
    @job(lock=lambda args: f'chart_release_update_{args[0]}')
    async def do_update(self, job, chart_release, data):
        """
        Update an existing chart release.

        `values` is configuration specified for the catalog item version in question which will be used to
        create the chart release.
        """
        release = await self.get_instance(chart_release)
        release_orig = copy.deepcopy(release)
        chart_path = os.path.join(release['path'], 'charts',
                                  release['chart_metadata']['version'])
        if not os.path.exists(chart_path):
            raise CallError(
                f'Unable to locate {chart_path!r} chart version for updating {chart_release!r} chart release',
                errno=errno.ENOENT)

        version_details = await self.middleware.call(
            'catalog.item_version_details', chart_path)
        config = release['config']
        config.update(data['values'])
        # We use update=False because we want defaults to be populated again if they are not present in the payload
        # Why this is not dangerous is because the defaults will be added only if they are not present/configured for
        # the chart release.
        config, context = await self.normalise_and_validate_values(
            version_details,
            config,
            False,
            release['dataset'],
            release_orig,
        )

        job.set_progress(25, 'Initial Validation complete')

        await self.perform_actions(context)

        config[CONTEXT_KEY_NAME].update({
            **get_action_context(chart_release),
            'operation':
            'UPDATE',
            'isUpdate':
            True,
        })

        await self.middleware.call('chart.release.helm_action', chart_release,
                                   chart_path, config, 'update')

        job.set_progress(90, 'Syncing secrets for chart release')
        await self.middleware.call('chart.release.sync_secrets_for_release',
                                   chart_release)
        await self.middleware.call('chart.release.refresh_events_state',
                                   chart_release)

        job.set_progress(100, 'Update completed for chart release')
        return await self.get_instance(chart_release)

    @accepts(Str('release_name'))
    @job(lock=lambda args: f'chart_release_delete_{args[0]}')
    async def do_delete(self, job, release_name):
        """
        Delete existing chart release.

        This will delete the chart release from the kubernetes cluster and also remove any associated volumes / data.
        To clarify, host path volumes will not be deleted which live outside the chart release dataset.
        """
        # For delete we will uninstall the release first and then remove the associated datasets
        await self.middleware.call('kubernetes.validate_k8s_setup')
        await self.get_instance(release_name)

        cp = await run([
            'helm', 'uninstall', release_name, '-n',
            get_namespace(release_name)
        ],
                       check=False)
        if cp.returncode:
            raise CallError(
                f'Unable to uninstall "{release_name}" chart release: {cp.stderr}'
            )

        job.set_progress(50, f'Uninstalled {release_name}')
        job.set_progress(75, f'Waiting for {release_name!r} pods to terminate')
        await self.middleware.call('chart.release.wait_for_pods_to_terminate',
                                   get_namespace(release_name))

        await self.post_remove_tasks(release_name, job)

        await self.middleware.call(
            'chart.release.remove_chart_release_from_events_state',
            release_name)
        await self.middleware.call('alert.oneshot_delete',
                                   'ChartReleaseUpdate', release_name)

        job.set_progress(100, f'{release_name!r} chart release deleted')
        return True

    @private
    def helm_action(self, chart_release, chart_path, config, tn_action):
        args = ['-f']
        if os.path.exists(os.path.join(chart_path, 'ix_values.yaml')):
            args.extend([os.path.join(chart_path, 'ix_values.yaml'), '-f'])

        action = tn_action if tn_action == 'install' else 'upgrade'

        with tempfile.NamedTemporaryFile(mode='w+') as f:
            f.write(yaml.dump(config))
            f.flush()

            cp = subprocess.Popen(
                [
                    'helm', action, chart_release, chart_path, '-n',
                    get_namespace(chart_release)
                ] + args + [f.name],
                stdout=subprocess.DEVNULL,
                stderr=subprocess.PIPE,
                env=dict(os.environ, KUBECONFIG='/etc/rancher/k3s/k3s.yaml'),
            )
            stderr = cp.communicate()[1]
            if cp.returncode:
                raise CallError(
                    f'Failed to {tn_action} chart release: {stderr.decode()}')

    @accepts(Str('release_name'))
    @returns(ENTRY)
    @job(lock=lambda args: f'chart_release_redeploy_{args[0]}')
    async def redeploy(self, job, release_name):
        """
        Redeploy will initiate a rollout of new pods according to upgrade strategy defined by the chart release
        workloads. A good example for redeploying is updating kubernetes pods with an updated container image.
        """
        update_job = await self.middleware.call('chart.release.update',
                                                release_name, {'values': {}})
        return await job.wrap(update_job)

    @private
    async def post_remove_tasks(self, release_name, job=None):
        await self.remove_storage_class_and_dataset(release_name, job)
        await self.middleware.call('k8s.namespace.delete',
                                   get_namespace(release_name))

    @private
    async def remove_storage_class_and_dataset(self, release_name, job=None):
        storage_class_name = get_storage_class_name(release_name)
        if await self.middleware.call(
                'k8s.storage_class.query',
            [['metadata.name', '=', storage_class_name]]):
            if job:
                job.set_progress(85,
                                 f'Removing {release_name!r} storage class')
            try:
                await self.middleware.call('k8s.storage_class.delete',
                                           storage_class_name)
            except Exception as e:
                self.logger.error('Failed to remove %r storage class: %s',
                                  storage_class_name, e)

        k8s_config = await self.middleware.call('kubernetes.config')
        release_ds = os.path.join(k8s_config['dataset'], 'releases',
                                  release_name)

        # If the chart release was consuming any PV's, they would have to be manually removed from k8s database
        # because of chart release reclaim policy being retain
        for pv in await self.middleware.call('k8s.pv.query', [[
                'spec.csi.volume_attributes.openebs\\.io/poolname', '=',
                os.path.join(release_ds, 'volumes')
        ]]):
            await self.middleware.call('k8s.pv.delete', pv['metadata']['name'])

        if await self.middleware.call('zfs.dataset.query',
                                      [['id', '=', release_ds]]):
            if job:
                job.set_progress(95, f'Removing {release_ds!r} dataset')
            await self.middleware.call('zfs.dataset.delete', release_ds, {
                'recursive': True,
                'force': True
            })

    @private
    async def release_datasets(self, release_dataset):
        return [release_dataset] + [
            os.path.join(release_dataset, k)
            for k in ('charts', 'volumes', 'volumes/ix_volumes')
        ]

    @private
    async def get_chart_namespace_prefix(self):
        return CHART_NAMESPACE_PREFIX
示例#20
0
文件: s3.py 项目: sufideen/freenas
class S3Service(SystemServiceService):
    class Config:
        service = "s3"
        datastore_prefix = "s3_"
        datastore_extend = "s3.config_extend"

    @private
    async def config_extend(self, s3):
        s3['storage_path'] = s3.pop('disks', None)
        s3.pop('mode', None)
        return s3

    @accepts(
        Dict(
            's3_update',
            Str('bindip'),
            Int('bindport', validators=[Range(min=1, max=65535)]),
            Str('access_key', validators=[Match("^\w+$")]),
            Str('secret_key', validators=[Match("^\w+$")]),
            Bool('browser'),
            Str('storage_path'),
            Int('certificate', null=True),
            update=True,
        ))
    async def do_update(self, data):
        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        for attr, minlen, maxlen in (
            ('access_key', 5, 20),
            ('secret_key', 8, 40),
        ):
            curlen = len(new.get(attr, ''))
            if curlen < minlen or curlen > maxlen:
                verrors.add(
                    f's3_update.{attr}',
                    f'Attribute should be {minlen} to {maxlen} in length')

        if not new['storage_path']:
            verrors.add('s3_update.storage_path', 'Storage path is required')

        await check_path_resides_within_volume(verrors, self.middleware,
                                               's3_update.storage_path',
                                               new['storage_path'])

        if not verrors and new['storage_path'].rstrip('/').count('/') < 3:
            verrors.add(
                's3_update.storage_path',
                'Top level datasets are not allowed. i.e /mnt/tank/dataset is allowed'
            )
        else:
            # If the storage_path does not exist, let's create it
            if not os.path.exists(new['storage_path']):
                os.makedirs(new['storage_path'])

        if verrors:
            raise verrors

        new['disks'] = new.pop('storage_path')

        await self._update_service(old, new)

        if await self.middleware.call('notifier.mp_get_owner',
                                      new['disks']) != 'minio':
            await self.middleware.call('notifier.winacl_reset', new['disks'],
                                       'minio', 'minio')

        return await self.config()
示例#21
0
class S3Service(SystemServiceService):
    class Config:
        service = "s3"
        datastore_prefix = "s3_"
        datastore_extend = "s3.config_extend"

    @private
    async def config_extend(self, s3):
        s3['storage_path'] = s3.pop('disks', None)
        s3.pop('mode', None)
        if s3.get('certificate'):
            s3['certificate'] = s3['certificate']['id']
        return s3

    @accepts(
        Dict(
            's3_update',
            Str('bindip'),
            Int('bindport', validators=[Range(min=1, max=65535)]),
            Str('access_key',
                validators=[
                    Match("^\w+$",
                          explanation=
                          "Should only contain alphanumeric characters")
                ],
                max_length=20),
            Str('secret_key',
                validators=[
                    Match("^\w+$",
                          explanation=
                          "Should only contain alphanumeric characters")
                ],
                max_length=40),
            Bool('browser'),
            Str('storage_path'),
            Int('certificate', null=True),
            update=True,
        ))
    async def do_update(self, data):
        """
        Update S3 Service Configuration.

        `access_key` must only contain alphanumeric characters and should be between 5 and 20 characters.

        `secret_key` must only contain alphanumeric characters and should be between 8 and 40 characters.

        `browser` when set, enables the web user interface for the S3 Service.

        `certificate` is a valid certificate id which exists in the system. This is used to enable secure
        S3 connections.
        """
        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        for attr, minlen, maxlen in (
            ('access_key', 5, 20),
            ('secret_key', 8, 40),
        ):
            curlen = len(new.get(attr, ''))
            if curlen < minlen or curlen > maxlen:
                verrors.add(
                    f's3_update.{attr}',
                    f'Attribute should be {minlen} to {maxlen} in length')

        if not new['storage_path']:
            verrors.add('s3_update.storage_path', 'Storage path is required')
        else:
            await check_path_resides_within_volume(verrors, self.middleware,
                                                   's3_update.storage_path',
                                                   new['storage_path'])

            if not verrors:
                if new['storage_path'].rstrip('/').count('/') < 3:
                    verrors.add(
                        's3_update.storage_path',
                        'Top level datasets are not allowed. i.e /mnt/tank/dataset is allowed'
                    )
                else:
                    # If the storage_path does not exist, let's create it
                    if not os.path.exists(new['storage_path']):
                        os.makedirs(new['storage_path'])

        if new['certificate']:
            verrors.extend(
                (await
                 self.middleware.call('certificate.cert_services_validation',
                                      new['certificate'],
                                      's3_update.certificate', False)))

        if verrors:
            raise verrors

        new['disks'] = new.pop('storage_path')

        await self._update_service(old, new)

        if (await self.middleware.call('filesystem.stat',
                                       new['disks']))['user'] != 'minio':
            await self.middleware.call(
                'filesystem.setperm', {
                    'path':
                    new['disks'],
                    'mode':
                    str(775),
                    'uid': (await self.middleware.call(
                        'dscache.get_uncached_user', 'minio'))['pw_uid'],
                    'gid': (await self.middleware.call(
                        'dscache.get_uncached_group', 'minio'))['gr_gid'],
                    'options': {
                        'recursive': True,
                        'traverse': False
                    }
                })

        return await self.config()
示例#22
0
文件: rsync.py 项目: tejp/freenas
class RsyncModService(CRUDService):

    class Config:
        datastore = 'services.rsyncmod'
        datastore_prefix = 'rsyncmod_'
        datastore_extend = 'rsyncmod.rsync_mod_extend'

    @private
    async def rsync_mod_extend(self, data):
        data['hostsallow'] = data['hostsallow'].split()
        data['hostsdeny'] = data['hostsdeny'].split()
        data['mode'] = data['mode'].upper()
        return data

    @private
    async def common_validation(self, data, schema_name):
        verrors = ValidationErrors()

        await check_path_resides_within_volume(verrors, self.middleware, f'{schema_name}.path', data.get('path'))

        for entity in ('user', 'group'):
            value = data.get(entity)
            if value not in map(
                    lambda e: e[entity if entity == 'group' else 'username'],
                    await self.middleware.call(f'{entity}.query')
            ):
                verrors.add(
                    f'{schema_name}.{entity}',
                    f'Please specify a valid {entity}'
                )

        verrors.check()

        data['hostsallow'] = ' '.join(data['hostsallow'])
        data['hostsdeny'] = ' '.join(data['hostsdeny'])
        data['mode'] = data['mode'].lower()

        return data

    @accepts(Dict(
        'rsyncmod_create',
        Str('name', validators=[Match(r'[^/\]]')]),
        Str('comment'),
        Str('path', required=True),
        Str('mode', enum=['RO', 'RW', 'WO']),
        Int('maxconn'),
        Str('user', default='nobody'),
        Str('group', default='nobody'),
        List('hostsallow', items=[Str('hostsallow')], default=[]),
        List('hostsdeny', items=[Str('hostdeny')], default=[]),
        Str('auxiliary'),
        register=True,
    ))
    async def do_create(self, data):
        """
        Create a Rsyncmod module.

        `path` represents the path to pool/dataset.

        `maxconn` is an integer value representing the maximum number of simultaneous connections. Zero represents
        unlimited.

        `hostsallow` is a list of patterns to match hostname/ip address of a connecting client. If list is empty,
        all hosts are allowed.

        `hostsdeny` is a list of patterns to match hostname/ip address of a connecting client. If the pattern is
        matched, access is denied to the client. If no client should be denied, this should be left empty.

        `auxiliary` attribute can be used to pass on any additional parameters from rsyncd.conf(5).
        """

        data = await self.common_validation(data, 'rsyncmod_create')

        data['id'] = await self.middleware.call(
            'datastore.insert',
            self._config.datastore,
            data,
            {'prefix': self._config.datastore_prefix}
        )

        await self._service_change('rsync', 'reload')

        return await self._get_instance(data['id'])

    @accepts(Int('id'), Patch('rsyncmod_create', 'rsyncmod_update', ('attr', {'update': True})))
    async def do_update(self, id, data):
        """
        Update Rsyncmod module of `id`.
        """
        module = await self._get_instance(id)
        module.update(data)

        module = await self.common_validation(module, 'rsyncmod_update')

        await self.middleware.call(
            'datastore.update',
            self._config.datastore,
            id,
            module,
            {'prefix': self._config.datastore_prefix}
        )

        await self._service_change('rsync', 'reload')

        return await self._get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete Rsyncmod module of `id`.
        """
        return await self.middleware.call('datastore.delete', self._config.datastore, id)
示例#23
0
class NetworkConfigurationService(ConfigService):
    class Config:
        namespace = 'network.configuration'
        datastore = 'network.globalconfiguration'
        datastore_prefix = 'gc_'
        datastore_extend = 'network.configuration.network_config_extend'
        cli_namespace = 'network.configuration'

    ENTRY = Dict(
        'network_configuration_entry',
        Int('id', required=True),
        Str('hostname', required=True, validators=[Hostname()]),
        Str(
            'domain',
            validators=[Match(r'^[a-zA-Z\.\-\0-9]*$')],
        ),
        IPAddr('ipv4gateway', required=True),
        IPAddr('ipv6gateway', required=True, allow_zone_index=True),
        IPAddr('nameserver1', required=True),
        IPAddr('nameserver2', required=True),
        IPAddr('nameserver3', required=True),
        Str('httpproxy', required=True),
        Bool('netwait_enabled', required=True),
        List('netwait_ip', required=True, items=[Str('netwait_ip')]),
        Str('hosts', required=True),
        List('domains', required=True, items=[Str('domain')]),
        Dict(
            'service_announcement',
            Bool('netbios'),
            Bool('mdns'),
            Bool('wsd'),
            register=True,
        ),
        Dict('activity',
             Str('type', enum=['ALLOW', 'DENY'], required=True),
             List('activities', items=[Str('activity')]),
             strict=True),
        Str('hostname_local', required=True, validators=[Hostname()]),
        Str('hostname_b', validators=[Hostname()], null=True),
        Str('hostname_virtual', validators=[Hostname()], null=True),
        Dict(
            'state',
            IPAddr('ipv4gateway', required=True),
            IPAddr('ipv6gateway', required=True, allow_zone_index=True),
            IPAddr('nameserver1', required=True),
            IPAddr('nameserver2', required=True),
            IPAddr('nameserver3', required=True),
        ),
    )

    @private
    def network_config_extend(self, data):
        # hostname_local will be used when the hostname of the current machine
        # needs to be used so it works with either TrueNAS SCALE or SCALE_ENTERPRISE
        data['hostname_local'] = data['hostname']

        if not self.middleware.call_sync('system.is_enterprise'):
            data.pop('hostname_b')
            data.pop('hostname_virtual')
        else:
            if self.middleware.call_sync('failover.node') == 'B':
                data['hostname_local'] = data['hostname_b']

        data['domains'] = data['domains'].split()
        data['netwait_ip'] = data['netwait_ip'].split()

        data['state'] = {
            'ipv4gateway': '',
            'ipv6gateway': '',
            'nameserver1': '',
            'nameserver2': '',
            'nameserver3': '',
        }
        summary = self.middleware.call_sync('network.general.summary')
        for default_route in summary['default_routes']:
            try:
                ipaddress.IPv4Address(default_route)
            except ValueError:
                if not data['state']['ipv6gateway']:
                    data['state']['ipv6gateway'] = default_route
            else:
                if not data['state']['ipv4gateway']:
                    data['state']['ipv4gateway'] = default_route
        for i, nameserver in enumerate(summary['nameservers'][:3]):
            data['state'][f'nameserver{i + 1}'] = nameserver

        return data

    @private
    async def validate_general_settings(self, data, schema):
        verrors = ValidationErrors()

        for key in [key for key in data.keys() if 'nameserver' in key]:
            nameserver_value = data.get(key)
            if nameserver_value:
                try:
                    nameserver_ip = ipaddress.ip_address(nameserver_value)
                except ValueError as e:
                    verrors.add(f'{schema}.{key}', str(e))
                else:
                    if nameserver_ip.is_loopback:
                        verrors.add(f'{schema}.{key}',
                                    'Loopback is not a valid nameserver')
                    elif nameserver_ip.is_unspecified:
                        verrors.add(
                            f'{schema}.{key}',
                            'Unspecified addresses are not valid as nameservers'
                        )
                    elif nameserver_ip.version == 4:
                        if nameserver_value == '255.255.255.255':
                            verrors.add(
                                f'{schema}.{key}',
                                'This is not a valid nameserver address')
                        elif nameserver_value.startswith('169.254'):
                            verrors.add(
                                f'{schema}.{key}',
                                '169.254/16 subnet is not valid for nameserver'
                            )

                    nameserver_number = int(key[-1])
                    for i in range(nameserver_number - 1, 0, -1):
                        if f'nameserver{i}' in data.keys(
                        ) and not data[f'nameserver{i}']:
                            verrors.add(
                                f'{schema}.{key}',
                                f'Must fill out namserver{i} before filling out {key}'
                            )

        ipv4_gateway_value = data.get('ipv4gateway')
        if ipv4_gateway_value:
            if not await self.middleware.call(
                    'route.ipv4gw_reachable',
                    ipaddress.ip_address(ipv4_gateway_value).exploded):
                verrors.add(f'{schema}.ipv4gateway',
                            f'Gateway {ipv4_gateway_value} is unreachable')

        netwait_ip = data.get('netwait_ip')
        if netwait_ip:
            for ip in netwait_ip:
                try:
                    ipaddress.ip_address(ip)
                except ValueError as e:
                    verrors.add(f'{schema}.netwait_ip', f'{e.__str__()}')

        if data.get('domains'):
            if len(data.get('domains')) > 5:
                verrors.add(f'{schema}.domains',
                            'No more than 5 additional domains are allowed')

        return verrors

    @accepts(Ref('service_announcement'))
    @private
    async def toggle_announcement(self, data):
        announce_srv = {'mdns': 'mdns', 'netbios': 'nmbd', 'wsd': 'wsdd'}
        for srv, enabled in data.items():
            service_name = announce_srv[srv]
            started = await self.middleware.call('service.started',
                                                 service_name)
            verb = None

            if enabled:
                verb = 'restart' if started else 'start'
            else:
                verb = 'stop' if started else None

            if not verb:
                continue

            await self.middleware.call(f'service.{verb}', service_name)

    @accepts(
        Patch(
            'network_configuration_entry',
            'global_configuration_update',
            ('rm', {
                'name': 'id'
            }),
            ('rm', {
                'name': 'hostname_local'
            }),
            ('rm', {
                'name': 'state'
            }),
            ('attr', {
                'update': True
            }),
        ), )
    async def do_update(self, data):
        """
        Update Network Configuration Service configuration.

        `ipv4gateway` if set is used instead of the default gateway provided by DHCP.

        `nameserver1` is primary DNS server.

        `nameserver2` is secondary DNS server.

        `nameserver3` is tertiary DNS server.

        `httpproxy` attribute must be provided if a proxy is to be used for network operations.

        `netwait_enabled` is a boolean attribute which when set indicates that network services will not start at
        boot unless they are able to ping the addresses listed in `netwait_ip` list.

        `service_announcement` determines the broadcast protocols that will be used to advertise the server.
        `netbios` enables the NetBIOS name server (NBNS), which starts concurrently with the SMB service. SMB clients
        will only perform NBNS lookups if SMB1 is enabled. NBNS may be required for legacy SMB clients.
        `mdns` enables multicast DNS service announcements for enabled services. `wsd` enables Web Service
        Discovery support.
        """
        config = await self.config()
        config.pop('state')

        new_config = config.copy()
        new_config.update(data)
        new_config['service_announcement'] = config[
            'service_announcement'] | data.get('service_announcement', {})
        if new_config == config:
            # nothing changed so return early
            return await self.config()

        verrors = await self.validate_general_settings(
            data, 'global_configuration_update')

        filters = [('timemachine', '=', True), ('enabled', '=', True)]
        if not new_config[
                'service_announcement']['mdns'] and await self.middleware.call(
                    'sharing.smb.query', filters):
            verrors.add(
                'global_configuration_update.service_announcement.mdns',
                'NAS is configured as a time machine target. mDNS is required.'
            )

        lhost_changed = rhost_changed = False
        this_node = await self.middleware.call('failover.node')
        if this_node in ('MANUAL', 'A'):
            lhost_changed = config['hostname'] != new_config['hostname']
            rhost_changed = config.get('hostname_b') and config[
                'hostname_b'] != new_config['hostname_b']
        elif this_node == 'B':
            lhost_changed = config['hostname_b'] != new_config['hostname_b']
            rhost_changed = config['hostname'] != new_config['hostname']

        vhost_changed = config.get('hostname_virtual') and config[
            'hostname_virtual'] != new_config['hostname_virtual']
        if vhost_changed and await self.middleware.call(
                'activedirectory.get_state') != "DISABLED":
            verrors.add(
                'global_configuration_update.hostname_virtual',
                'This parameter may not be changed after joining Active Directory (AD). '
                'If it must be changed, the proper procedure is to leave the AD domain '
                'and then alter the parameter before re-joining the domain.')

        verrors.check()

        # pop the `hostname_local` key since that's created in the _extend method
        # and doesn't exist in the database
        new_config.pop('hostname_local', None)

        # normalize the `domains` and `netwait_ip` keys
        new_config['domains'] = ' '.join(new_config.get('domains', []))
        new_config['netwait_ip'] = ' '.join(new_config.get('netwait_ip', []))

        # update the db
        await self.middleware.call('datastore.update',
                                   'network.globalconfiguration', config['id'],
                                   new_config, {'prefix': 'gc_'})

        service_actions = set()
        if lhost_changed:
            await self.middleware.call('etc.generate', 'hostname')
            service_actions.add(('collectd', 'restart'))
            service_actions.add(('nscd', 'reload'))

        if rhost_changed:
            try:
                await self.middleware.call('failover.call_remote',
                                           'etc.generate', ['hostname'])
            except Exception:
                self.logger.warning(
                    'Failed to set hostname on standby storage controller',
                    exc_info=True)

        # dns domain name changed
        licensed = await self.middleware.call('failover.licensed')
        domainname_changed = new_config['domain'] != config['domain']
        if domainname_changed:
            await self.middleware.call('etc.generate', 'hosts')
            service_actions.add(('collectd', 'restart'))
            service_actions.add(('nscd', 'reload'))
            if licensed:
                try:
                    await self.middleware.call('failover.call_remote',
                                               'etc.generate', ['hosts'])
                except Exception:
                    self.logger.warning(
                        'Failed to set domain name on standby storage controller',
                        exc_info=True)

        # anything related to resolv.conf changed
        dnssearch_changed = new_config['domains'] != config['domains']
        dns1_changed = new_config['nameserver1'] != config['nameserver1']
        dns2_changed = new_config['nameserver2'] != config['nameserver2']
        dns3_changed = new_config['nameserver3'] != config['nameserver3']
        dnsservers_changed = any((dns1_changed, dns2_changed, dns3_changed))
        if dnssearch_changed or dnsservers_changed:
            await self.middleware.call('dns.sync')
            service_actions.add(('nscd', 'reload'))
            if licensed:
                try:
                    await self.middleware.call('failover.call_remote',
                                               'dns.sync')
                except Exception:
                    self.logger.warning(
                        'Failed to generate resolv.conf on standby storage controller',
                        exc_info=True)

            def reload_cli():
                for process in psutil.process_iter(['pid', 'cmdline']):
                    cmdline = process.cmdline()
                    if len(cmdline) >= 2 and cmdline[1] == '/usr/bin/cli':
                        with contextlib.suppress(Exception):
                            process.send_signal(signal.SIGUSR1)

            await self.middleware.run_in_thread(reload_cli)

        # default gateways changed
        ipv4gw_changed = new_config['ipv4gateway'] != config['ipv4gateway']
        ipv6gw_changed = new_config['ipv6gateway'] != config['ipv6gateway']
        if ipv4gw_changed or ipv6gw_changed:
            await self.middleware.call('route.sync')
            if licensed:
                try:
                    await self.middleware.call('failover.call_remote',
                                               'route.sync')
                except Exception:
                    self.logger.warning(
                        'Failed to generate routes on standby storage controller',
                        exc_info=True)

        # kerberized NFS needs to be restarted if these change
        if lhost_changed or vhost_changed or domainname_changed:
            if await self.middleware.call('kerberos.keytab.has_nfs_principal'):
                service_actions.add(('nfs', 'restart'))

        # proxy server has changed
        if new_config['httpproxy'] != config['httpproxy']:
            await self.middleware.call(
                'core.event_send', 'network.config', 'CHANGED',
                {'data': {
                    'httpproxy': new_config['httpproxy']
                }})

        # allowing outbound network activity has been changed
        if new_config['activity'] != config['activity']:
            await self.middleware.call('zettarepl.update_tasks')

        # handle the various service announcement daemons
        announce_changed = new_config['service_announcement'] != config[
            'service_announcement']
        announce_srv = {'mdns': 'mdns', 'netbios': 'nmbd', 'wsd': 'wsdd'}
        if any((lhost_changed, vhost_changed)) or announce_changed:
            # lhost_changed is the local hostname and vhost_changed is the virtual hostname
            # and if either of these change then we need to toggle the service announcement
            # daemons irregardless whether or not these were toggled on their own
            for srv, enabled in new_config['service_announcement'].items():
                service_name = announce_srv[srv]
                started = await self.middleware.call('service.started',
                                                     service_name)
                verb = None

                if enabled:
                    verb = 'restart' if started else 'start'
                else:
                    verb = 'stop' if started else None

                if not verb:
                    continue

                service_actions.add((service_name, verb))

        for service, verb in service_actions:
            await self.middleware.call(f'service.{verb}', service)

        await self.middleware.call('network.configuration.toggle_announcement',
                                   new_config['service_announcement'])

        return await self.config()
示例#24
0
class FTPService(SystemServiceService):

    class Config:
        service = "ftp"
        datastore_prefix = "ftp_"
        datastore_extend = "ftp.ftp_extend"
        cli_namespace = "service.ftp"

    @private
    async def ftp_extend(self, data):
        if data['ssltls_certificate']:
            data['ssltls_certificate'] = data['ssltls_certificate']['id']
        return data

    @accepts(Dict(
        'ftp_update',
        Int('port', validators=[Range(min=1, max=65535)]),
        Int('clients', validators=[Range(min=1, max=10000)]),
        Int('ipconnections', validators=[Range(min=0, max=1000)]),
        Int('loginattempt', validators=[Range(min=0, max=1000)]),
        Int('timeout', validators=[Range(min=0, max=10000)]),
        Bool('rootlogin'),
        Bool('onlyanonymous'),
        Dir('anonpath', null=True),
        Bool('onlylocal'),
        Str('banner', max_length=None),
        Str('filemask', validators=[Match(r"^[0-7]{3}$")]),
        Str('dirmask', validators=[Match(r"^[0-7]{3}$")]),
        Bool('fxp'),
        Bool('resume'),
        Bool('defaultroot'),
        Bool('ident'),
        Bool('reversedns'),
        Str('masqaddress'),
        Int('passiveportsmin', validators=[Or(Exact(0), Range(min=1024, max=65535))]),
        Int('passiveportsmax', validators=[Or(Exact(0), Range(min=1024, max=65535))]),
        Int('localuserbw', validators=[Range(min=0)]),
        Int('localuserdlbw', validators=[Range(min=0)]),
        Int('anonuserbw', validators=[Range(min=0)]),
        Int('anonuserdlbw', validators=[Range(min=0)]),
        Bool('tls'),
        Str('tls_policy', enum=["on", "off", "data", "!data", "auth", "ctrl",
                                "ctrl+data", "ctrl+!data", "auth+data", "auth+!data"]),
        Bool('tls_opt_allow_client_renegotiations'),
        Bool('tls_opt_allow_dot_login'),
        Bool('tls_opt_allow_per_user'),
        Bool('tls_opt_common_name_required'),
        Bool('tls_opt_enable_diags'),
        Bool('tls_opt_export_cert_data'),
        Bool('tls_opt_no_cert_request'),
        Bool('tls_opt_no_empty_fragments'),
        Bool('tls_opt_no_session_reuse_required'),
        Bool('tls_opt_stdenvvars'),
        Bool('tls_opt_dns_name_required'),
        Bool('tls_opt_ip_address_required'),
        Int('ssltls_certificate', null=True),
        Str('options', max_length=None),
        update=True
    ))
    async def do_update(self, data):
        """
        Update ftp service configuration.

        `clients` is an integer value which sets the maximum number of simultaneous clients allowed. It defaults to 32.

        `ipconnections` is an integer value which shows the maximum number of connections per IP address. It defaults
        to 0 which equals to unlimited.

        `timeout` is the maximum client idle time in seconds before client is disconnected.

        `rootlogin` is a boolean value which when configured to true enables login as root. This is generally
        discouraged because of the security risks.

        `onlyanonymous` allows anonymous FTP logins with access to the directory specified by `anonpath`.

        `banner` is a message displayed to local login users after they successfully authenticate. It is not displayed
        to anonymous login users.

        `filemask` sets the default permissions for newly created files which by default are 077.

        `dirmask` sets the default permissions for newly created directories which by default are 077.

        `resume` if set allows FTP clients to resume interrupted transfers.

        `fxp` if set to true indicates that File eXchange Protocol is enabled. Generally it is discouraged as it
        makes the server vulnerable to FTP bounce attacks.

        `defaultroot` when set ensures that for local users, home directory access is only granted if the user
        is a member of group wheel.

        `ident` is a boolean value which when set to true indicates that IDENT authentication is required. If identd
        is not running on the client, this can result in timeouts.

        `masqaddress` is the public IP address or hostname which is set if FTP clients cannot connect through a
        NAT device.

        `localuserbw` is a positive integer value which indicates maximum upload bandwidth in KB/s for local user.
        Default of zero indicates unlimited upload bandwidth ( from the FTP server configuration ).

        `localuserdlbw` is a positive integer value which indicates maximum download bandwidth in KB/s for local user.
        Default of zero indicates unlimited download bandwidth ( from the FTP server configuration ).

        `anonuserbw` is a positive integer value which indicates maximum upload bandwidth in KB/s for anonymous user.
        Default of zero indicates unlimited upload bandwidth ( from the FTP server configuration ).

        `anonuserdlbw` is a positive integer value which indicates maximum download bandwidth in KB/s for anonymous
        user. Default of zero indicates unlimited download bandwidth ( from the FTP server configuration ).

        `tls` is a boolean value which when set indicates that encrypted connections are enabled. This requires a
        certificate to be configured first with the certificate service and the id of certificate is passed on in
        `ssltls_certificate`.

        `tls_policy` defines whether the control channel, data channel, both channels, or neither channel of an FTP
        session must occur over SSL/TLS.

        `tls_opt_enable_diags` is a boolean value when set, logs verbosely. This is helpful when troubleshooting a
        connection.

        `options` is a string used to add proftpd(8) parameters not covered by ftp service.
        """
        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        if not ((new["passiveportsmin"] == 0) == (new["passiveportsmax"] == 0)):
            verrors.add("passiveportsmin", "passiveportsmin and passiveportsmax should be both zero or non-zero")
        if not ((new["passiveportsmin"] == 0 and new["passiveportsmax"] == 0) or
                (new["passiveportsmax"] > new["passiveportsmin"])):
            verrors.add("ftp_update.passiveportsmax", "When specified, should be greater than passiveportsmin")

        if new["onlyanonymous"]:
            if not new["anonpath"]:
                verrors.add("ftp_update.anonpath", "This field is required for anonymous login")
            else:
                await check_path_resides_within_volume(verrors, self.middleware, "ftp_update.anonpath", new["anonpath"])

        if new["tls"]:
            if not new["ssltls_certificate"]:
                verrors.add(
                    "ftp_update.ssltls_certificate",
                    "Please provide a valid certificate id when TLS is enabled"
                )
            else:
                verrors.extend((await self.middleware.call(
                    "certificate.cert_services_validation", new["ssltls_certificate"],
                    "ftp_update.ssltls_certificate", False
                )))

        if new["masqaddress"]:
            await resolve_hostname(self.middleware, verrors, "ftp_update.masqaddress", new["masqaddress"])

        if verrors:
            raise verrors

        await self._update_service(old, new)

        if not old['tls'] and new['tls']:
            await self.middleware.call('service.start', 'ssl')

        return new
示例#25
0
class RsyncModService(CRUDService):

    class Config:
        datastore = 'services.rsyncmod'
        datastore_prefix = 'rsyncmod_'
        datastore_extend = 'rsyncmod.rsync_mod_extend'

    @private
    async def rsync_mod_extend(self, data):
        data['hostsallow'] = data['hostsallow'].split()
        data['hostsdeny'] = data['hostsdeny'].split()
        data['mode'] = data['mode'].upper()
        return data

    @private
    async def common_validation(self, data, schema_name):
        verrors = ValidationErrors()

        await check_path_resides_within_volume(verrors, self.middleware, f'{schema_name}.path', data.get('path'))

        for entity in ('user', 'group'):
            value = data.get(entity)
            if value not in map(
                    lambda e: e[entity if entity == 'group' else 'username'],
                    await self.middleware.call(f'{entity}.query')
            ):
                verrors.add(
                    f'{schema_name}.{entity}',
                    f'Please specify a valid {entity}'
                )

        verrors.check()

        data['hostsallow'] = ' '.join(data['hostsallow'])
        data['hostsdeny'] = ' '.join(data['hostsdeny'])
        data['mode'] = data['mode'].lower()

        return data

    @accepts(Dict(
        'rsyncmod_create',
        Str('name', validators=[Match(r'[^/\]]')]),
        Str('comment'),
        Str('path', required=True),
        Str('mode', enum=['RO', 'RW', 'WO']),
        Int('maxconn'),
        Str('user', default='nobody'),
        Str('group', default='nobody'),
        List('hostsallow', items=[Str('hostsallow')], default=[]),
        List('hostsdeny', items=[Str('hostdeny')], default=[]),
        Str('auxiliary'),
        register=True,
    ))
    async def do_create(self, data):

        data = await self.common_validation(data, 'rsyncmod_create')

        data['id'] = await self.middleware.call(
            'datastore.insert',
            self._config.datastore,
            data,
            {'prefix': self._config.datastore_prefix}
        )

        await self._service_change('rsync', 'reload')

        return await self._get_instance(data['id'])

    @accepts(Int('id'), Patch('rsyncmod_create', 'rsyncmod_update', ('attr', {'update': True})))
    async def do_update(self, id, data):
        module = await self._get_instance(id)
        module.update(data)

        module = await self.common_validation(module, 'rsyncmod_update')

        await self.middleware.call(
            'datastore.update',
            self._config.datastore,
            id,
            module,
            {'prefix': self._config.datastore_prefix}
        )

        await self._service_change('rsync', 'reload')

        return await self._get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        return await self.middleware.call('datastore.delete', self._config.datastore, id)
示例#26
0
class TunableService(CRUDService):
    class Config:
        datastore = 'system.tunable'
        datastore_prefix = 'tun_'
        datastore_extend = 'tunable.upper'

    @accepts(
        Dict('tunable_create',
             Str('var', validators=[Match(r'^[\w\.]+$')], required=True),
             Str('value', required=True),
             Str('type', enum=['LOADER', 'RC', 'SYSCTL'], required=True),
             Str('comment'),
             Bool('enabled', default=True),
             register=True))
    async def do_create(self, data):
        await self.clean(data, 'tunable_create')
        await self.validate(data, 'tunable_create')
        await self.lower(data)

        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})

        await self.middleware.call('service.reload', data['type'])

        return await self._get_instance(data['id'])

    @accepts(Int('id'),
             Patch('tunable_create', 'tunable_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        old = await self._get_instance(id)

        new = old.copy()
        new.update(data)

        await self.clean(new, 'tunable_update', old=old)
        await self.validate(new, 'tunable_update')

        await self.lower(new)

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})

        await self.middleware.call('service.reload', new['type'])

        return await self._get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        tunable = await self._get_instance(id)

        response = await self.middleware.call('datastore.delete',
                                              self._config.datastore, id)

        await self.middleware.call('service.reload', tunable['type'].lower())

        return response

    @private
    async def lower(self, data):
        data['type'] = data['type'].lower()

        return data

    @private
    async def upper(self, data):
        data['type'] = data['type'].upper()

        return data

    @private
    async def clean(self, tunable, schema_name, old=None):
        verrors = ValidationErrors()
        skip_dupe = False
        tun_comment = tunable.get('comment')
        tun_value = tunable['value']
        tun_var = tunable['var']

        if tun_comment is not None:
            tunable['comment'] = tun_comment.strip()

        if '"' in tun_value or "'" in tun_value:
            verrors.add(f"{schema_name}.value",
                        'Quotes in value are not allowed')

        if schema_name == 'tunable_update' and old:
            old_tun_var = old['var']

            if old_tun_var == tun_var:
                # They aren't trying to change to a new name, just updating
                skip_dupe = True

        if not skip_dupe:
            tun_vars = await self.middleware.call('datastore.query',
                                                  self._config.datastore,
                                                  [('tun_var', '=', tun_var)])

            if tun_vars:
                verrors.add(f"{schema_name}.value",
                            'This variable already exists')

        if verrors:
            raise verrors

        return tunable

    @private
    async def validate(self, tunable, schema_name):
        sysctl_re = \
            re.compile('[a-z][a-z0-9_]+\.([a-z0-9_]+\.)*[a-z0-9_]+', re.I)

        loader_re = \
            re.compile('[a-z][a-z0-9_]+\.*([a-z0-9_]+\.)*[a-z0-9_]+', re.I)

        verrors = ValidationErrors()
        tun_var = tunable['var'].lower()
        tun_type = tunable['type'].lower()

        if tun_type == 'loader' or tun_type == 'rc':
            err_msg = "Value can start with a letter and end with an alphanumeric. Aphanumeric and underscore" \
                      " characters are allowed"
        else:
            err_msg = 'Value can start with a letter and end with an alphanumeric. A period (.) once is a must.' \
                      ' Alphanumeric and underscore characters are allowed'

        if (tun_type in ('loader', 'rc') and not loader_re.match(tun_var)) or (
                tun_type == 'sysctl' and not sysctl_re.match(tun_var)):
            verrors.add(f"{schema_name}.var", err_msg)

        if verrors:
            raise verrors
示例#27
0
class SharingNFSService(SharingService):

    path_field = 'paths'
    share_task_type = 'NFS'

    class Config:
        namespace = "sharing.nfs"
        datastore = "sharing.nfs_share"
        datastore_prefix = "nfs_"
        datastore_extend = "sharing.nfs.extend"
        cli_namespace = "sharing.nfs"

    async def human_identifier(self, share_task):
        return ', '.join(share_task[self.path_field])

    @private
    async def sharing_task_determine_locked(self, data, locked_datasets):
        for path in data[self.path_field]:
            if await self.middleware.call(
                    'pool.dataset.path_in_locked_datasets', path,
                    locked_datasets):
                return True
        else:
            return False

    @accepts(
        Dict(
            "sharingnfs_create",
            List("paths", items=[Dir("path")], empty=False),
            List("aliases", items=[Str("path", validators=[Match(r"^/.*")])]),
            Str("comment", default=""),
            List("networks", items=[IPAddr("network", network=True)]),
            List("hosts", items=[Str("host")]),
            Bool("alldirs", default=False),
            Bool("ro", default=False),
            Bool("quiet", default=False),
            Str("maproot_user", required=False, default=None, null=True),
            Str("maproot_group", required=False, default=None, null=True),
            Str("mapall_user", required=False, default=None, null=True),
            Str("mapall_group", required=False, default=None, null=True),
            List(
                "security",
                items=[
                    Str("provider", enum=["SYS", "KRB5", "KRB5I", "KRB5P"])
                ],
            ),
            Bool("enabled", default=True),
            register=True,
            strict=True,
        ))
    async def do_create(self, data):
        """
        Create a NFS Share.

        `paths` is a list of valid paths which are configured to be shared on this share.

        `aliases` is a list of aliases for each path (or an empty list if aliases are not used).

        `networks` is a list of authorized networks that are allowed to access the share having format
        "network/mask" CIDR notation. If empty, all networks are allowed.

        `hosts` is a list of IP's/hostnames which are allowed to access the share. If empty, all IP's/hostnames are
        allowed.

        `alldirs` is a boolean value which when set indicates that the client can mount any subdirectories of the
        selected pool or dataset.
        """
        verrors = ValidationErrors()

        await self.validate(data, "sharingnfs_create", verrors)

        if verrors:
            raise verrors

        await self.compress(data)
        data["id"] = await self.middleware.call(
            "datastore.insert",
            self._config.datastore,
            data,
            {"prefix": self._config.datastore_prefix},
        )
        await self.extend(data)

        await self._service_change("nfs", "reload")

        return await self.get_instance(data["id"])

    @accepts(Int("id"),
             Patch("sharingnfs_create", "sharingnfs_update", ("attr", {
                 "update": True
             })))
    async def do_update(self, id, data):
        """
        Update NFS Share of `id`.
        """
        verrors = ValidationErrors()
        old = await self.get_instance(id)

        new = old.copy()
        new.update(data)

        await self.validate(new, "sharingnfs_update", verrors, old=old)

        if verrors:
            raise verrors

        await self.compress(new)
        await self.middleware.call("datastore.update", self._config.datastore,
                                   id, new,
                                   {"prefix": self._config.datastore_prefix})

        await self._service_change("nfs", "reload")

        return await self.get_instance(id)

    @accepts(Int("id"))
    async def do_delete(self, id):
        """
        Delete NFS Share of `id`.
        """
        await self.middleware.call("datastore.delete", self._config.datastore,
                                   id)
        await self._service_change("nfs", "reload")

    @private
    async def validate(self, data, schema_name, verrors, old=None):
        if len(data["aliases"]):
            if not osc.IS_LINUX:
                verrors.add(
                    f"{schema_name}.aliases",
                    "This field is only supported on SCALE",
                )

            if len(data["aliases"]) != len(data["paths"]):
                verrors.add(
                    f"{schema_name}.aliases",
                    "This field should be either empty of have the same number of elements as paths",
                )

        if data["alldirs"] and len(data["paths"]) > 1:
            verrors.add(
                f"{schema_name}.alldirs",
                "This option can only be used for shares that contain single path"
            )

        # if any of the `paths` that were passed to us by user are within the gluster volume
        # mountpoint then we need to pass the `gluster_bypass` kwarg so that we don't raise a
        # validation error complaining about using a gluster path within the zpool mountpoint
        bypass = any('.glusterfs' in i
                     for i in data["paths"] + data["aliases"])

        # need to make sure that the nfs share is within the zpool mountpoint
        for idx, i in enumerate(data["paths"]):
            await check_path_resides_within_volume(
                verrors,
                self.middleware,
                f'{schema_name}.paths.{idx}',
                i,
                gluster_bypass=bypass)

        await self.middleware.run_in_thread(self.validate_paths, data,
                                            schema_name, verrors)

        filters = []
        if old:
            filters.append(["id", "!=", old["id"]])
        other_shares = await self.middleware.call("sharing.nfs.query", filters)
        dns_cache = await self.resolve_hostnames(
            sum([share["hosts"]
                 for share in other_shares], []) + data["hosts"])
        await self.middleware.run_in_thread(self.validate_hosts_and_networks,
                                            other_shares, data, schema_name,
                                            verrors, dns_cache)

        for k in ["maproot", "mapall"]:
            if not data[f"{k}_user"] and not data[f"{k}_group"]:
                pass
            elif not data[f"{k}_user"] and data[f"{k}_group"]:
                verrors.add(
                    f"{schema_name}.{k}_user",
                    "This field is required when map group is specified")
            else:
                user = group = None
                with contextlib.suppress(KeyError):
                    user = await self.middleware.call(
                        'dscache.get_uncached_user', data[f'{k}_user'])

                if not user:
                    verrors.add(f"{schema_name}.{k}_user", "User not found")

                if data[f'{k}_group']:
                    with contextlib.suppress(KeyError):
                        group = await self.middleware.call(
                            'dscache.get_uncached_group', data[f'{k}_group'])

                    if not group:
                        verrors.add(f"{schema_name}.{k}_group",
                                    "Group not found")

        if data["maproot_user"] and data["mapall_user"]:
            verrors.add(f"{schema_name}.mapall_user",
                        "maproot_user disqualifies mapall_user")

        if data["security"]:
            nfs_config = await self.middleware.call("nfs.config")
            if not nfs_config["v4"]:
                verrors.add(f"{schema_name}.security",
                            "This is not allowed when NFS v4 is disabled")

    @private
    def validate_paths(self, data, schema_name, verrors):
        if osc.IS_LINUX:
            # Ganesha does not have such a restriction, each path is a different share
            return

        dev = None
        for i, path in enumerate(data["paths"]):
            stat = os.stat(path)
            if dev is None:
                dev = stat.st_dev
            else:
                if dev != stat.st_dev:
                    verrors.add(
                        f'{schema_name}.paths.{i}',
                        'Paths for a NFS share must reside within the same filesystem'
                    )

    @private
    async def resolve_hostnames(self, hostnames):
        hostnames = list(set(hostnames))

        async def resolve(hostname):
            try:
                return (await asyncio.wait_for(
                    self.middleware.run_in_thread(socket.getaddrinfo, hostname,
                                                  None), 5))[0][4][0]
            except Exception as e:
                self.logger.warning("Unable to resolve host %r: %r", hostname,
                                    e)
                return None

        resolved_hostnames = await asyncio_map(resolve, hostnames, 8)

        return dict(zip(hostnames, resolved_hostnames))

    @private
    def validate_hosts_and_networks(self, other_shares, data, schema_name,
                                    verrors, dns_cache):
        dev = os.stat(data["paths"][0]).st_dev

        used_networks = set()
        for share in other_shares:
            try:
                share_dev = os.stat(share["paths"][0]).st_dev
            except Exception:
                self.logger.warning("Failed to stat first path for %r",
                                    share,
                                    exc_info=True)
                continue

            if share_dev == dev:
                for host in share["hosts"]:
                    host = dns_cache[host]
                    if host is None:
                        continue

                    try:
                        network = ipaddress.ip_network(host)
                    except Exception:
                        self.logger.warning("Got invalid host %r", host)
                        continue
                    else:
                        used_networks.add(network)

                for network in share["networks"]:
                    try:
                        network = ipaddress.ip_network(network, strict=False)
                    except Exception:
                        self.logger.warning("Got invalid network %r", network)
                        continue
                    else:
                        used_networks.add(network)

                if not share["hosts"] and not share["networks"]:
                    used_networks.add(ipaddress.ip_network("0.0.0.0/0"))
                    used_networks.add(ipaddress.ip_network("::/0"))

        for host in set(data["hosts"]):
            host = dns_cache[host]
            if host is None:
                continue

            network = ipaddress.ip_network(host)
            if network in used_networks:
                verrors.add(
                    f"{schema_name}.hosts",
                    f"Another NFS share already exports this dataset for {host}"
                )

            used_networks.add(network)

        for network in set(data["networks"]):
            network = ipaddress.ip_network(network, strict=False)

            if network in used_networks:
                verrors.add(
                    f"{schema_name}.networks",
                    f"Another NFS share already exports this dataset for {network}"
                )

            used_networks.add(network)

        if not data["hosts"] and not data["networks"]:
            if used_networks:
                verrors.add(
                    f"{schema_name}.networks",
                    "Another NFS share already exports this dataset for some network"
                )

    @private
    async def extend(self, data):
        data["networks"] = data.pop("network").split()
        data["hosts"] = data["hosts"].split()
        data["security"] = [s.upper() for s in data["security"]]
        return data

    @private
    async def compress(self, data):
        data["network"] = " ".join(data.pop("networks"))
        data["hosts"] = " ".join(data["hosts"])
        data["security"] = [s.lower() for s in data["security"]]
        data.pop(self.locked_field, None)
        return data
示例#28
0
class BootEnvService(CRUDService):
    @filterable
    def query(self, filters=None, options=None):
        results = []
        for clone in Update.ListClones():
            clone['id'] = clone['name']
            results.append(clone)
        return filter_list(results, filters, options)

    @item_method
    @accepts(Str('id'))
    def activate(self, oid):
        """
        Activates boot environment `id`.
        """
        return Update.ActivateClone(oid)

    @item_method
    @accepts(Str('id'), Dict(
        'attributes',
        Bool('keep', default=False),
    ))
    def set_attribute(self, oid, attrs):
        """
        Sets attributes boot environment `id`.

        Currently only `keep` attribute is allowed.
        """
        clone = Update.FindClone(oid)
        return Update.CloneSetAttr(clone, **attrs)

    @accepts(
        Dict(
            'bootenv_create',
            Str('name', required=True, validators=[Match(RE_BE_NAME)]),
            Str('source'),
        ))
    def do_create(self, data):

        verrors = ValidationErrors()
        self._clean_be_name(verrors, 'bootenv_create', data['name'])
        if verrors:
            raise verrors

        kwargs = {}
        source = data.get('source')
        if source:
            kwargs['bename'] = source
        clone = Update.CreateClone(data['name'], **kwargs)
        if clone is False:
            raise CallError('Failed to create boot environment')
        return data['name']

    @accepts(Str('id'),
             Dict(
                 'bootenv_update',
                 Str('name', required=True, validators=[Match(RE_BE_NAME)]),
             ))
    def do_update(self, oid, data):

        verrors = ValidationErrors()
        self._clean_be_name(verrors, 'bootenv_update', data['name'])
        if verrors:
            raise verrors

        if not Update.RenameClone(oid, data['name']):
            raise CallError('Failed to update boot environment')
        return data['name']

    def _clean_be_name(self, verrors, schema, name):
        beadm_names = subprocess.Popen(
            "beadm list | awk '{print $7}'",
            shell=True,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
            encoding='utf8',
        ).communicate()[0].split('\n')
        if name in filter(None, beadm_names):
            verrors.add(f'{schema}.name', f'The name "{name}" already exists',
                        errno.EEXIST)

    @accepts(Str('id'))
    def do_delete(self, oid):
        return Update.DeleteClone(oid)
示例#29
0
class RsyncModService(CRUDService):
    class Config:
        datastore = 'services.rsyncmod'
        datastore_prefix = 'rsyncmod_'
        datastore_extend = 'rsyncmod.rsync_mod_extend'

    @private
    async def rsync_mod_extend(self, data):
        data['hostsallow'] = data['hostsallow'].split()
        data['hostsdeny'] = data['hostsdeny'].split()
        data['mode'] = data['mode'].upper()
        return data

    @private
    async def common_validation(self, data, schema_name):
        verrors = ValidationErrors()

        await check_path_resides_within_volume(verrors, self.middleware,
                                               f'{schema_name}.path',
                                               data.get('path'))

        for entity in ('user', 'group'):
            value = data.get(entity)
            try:
                await self.middleware.call(f'{entity}.get_{entity}_obj',
                                           {f'{entity}name': value})
            except Exception:
                verrors.add(f'{schema_name}.{entity}',
                            f'Please specify a valid {entity}')

        verrors.check()

        data['hostsallow'] = ' '.join(data['hostsallow'])
        data['hostsdeny'] = ' '.join(data['hostsdeny'])
        data['mode'] = data['mode'].lower()

        return data

    @accepts(
        Dict(
            'rsyncmod_create',
            Str('name', validators=[Match(r'[^/\]]')]),
            Str('comment'),
            Str('path', required=True, max_length=RSYNC_PATH_LIMIT),
            Str('mode', enum=['RO', 'RW', 'WO']),
            Int('maxconn'),
            Str('user', default='nobody'),
            Str('group', default='nobody'),
            List('hostsallow', items=[Str('hostsallow')], default=[]),
            List('hostsdeny', items=[Str('hostdeny')], default=[]),
            Str('auxiliary', max_length=None),
            register=True,
        ))
    async def do_create(self, data):
        """
        Create a Rsyncmod module.

        `path` represents the path to a dataset. Path length is limited to 1023 characters maximum as per the limit
        enforced by FreeBSD. It is possible that we reach this max length recursively while transferring data. In that
        case, the user must ensure the maximum path will not be too long or modify the recursed path to shorter
        than the limit.

        `maxconn` is an integer value representing the maximum number of simultaneous connections. Zero represents
        unlimited.

        `hostsallow` is a list of patterns to match hostname/ip address of a connecting client. If list is empty,
        all hosts are allowed.

        `hostsdeny` is a list of patterns to match hostname/ip address of a connecting client. If the pattern is
        matched, access is denied to the client. If no client should be denied, this should be left empty.

        `auxiliary` attribute can be used to pass on any additional parameters from rsyncd.conf(5).
        """

        data = await self.common_validation(data, 'rsyncmod_create')

        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})

        await self._service_change('rsync', 'reload')

        return await self._get_instance(data['id'])

    @accepts(Int('id'),
             Patch('rsyncmod_create', 'rsyncmod_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        """
        Update Rsyncmod module of `id`.
        """
        module = await self._get_instance(id)
        module.update(data)

        module = await self.common_validation(module, 'rsyncmod_update')

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, module,
                                   {'prefix': self._config.datastore_prefix})

        await self._service_change('rsync', 'reload')

        return await self._get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete Rsyncmod module of `id`.
        """
        return await self.middleware.call('datastore.delete',
                                          self._config.datastore, id)
示例#30
0
class CatalogService(CRUDService):
    class Config:
        datastore = 'services.catalog'
        datastore_extend = 'catalog.catalog_extend'
        datastore_extend_context = 'catalog.catalog_extend_context'
        datastore_primary_key = 'label'
        datastore_primary_key_type = 'string'
        cli_namespace = 'app.catalog'

    ENTRY = Dict(
        'catalog_entry',
        Str('label',
            required=True,
            validators=[Match(r'^\w+[\w.-]*$')],
            max_length=60),
        Str('repository', required=True, empty=False),
        Str('branch', required=True, empty=False),
        Str('location', required=True),
        Str('id', required=True),
        List('preferred_trains'),
        Dict('trains', additional_attrs=True),
        Bool('healthy'),
        Bool('error'),
        Bool('builtin'),
        Bool('cached'),
        Dict(
            'caching_progress',
            Str('description', null=True),
            Any('extra', null=True),
            Float('percent', null=True),
            null=True,
        ),
        Dict('caching_job', null=True, additional_attrs=True),
    )

    @private
    async def catalog_extend_context(self, rows, extra):
        k8s_dataset = (await
                       self.middleware.call('kubernetes.config'))['dataset']
        catalogs_dir = os.path.join(
            '/mnt', k8s_dataset,
            'catalogs') if k8s_dataset else f'{TMP_IX_APPS_DIR}/catalogs'
        context = {
            'catalogs_dir': catalogs_dir,
            'extra': extra or {},
            'catalogs_context': {},
        }
        if extra.get('item_details'):
            item_sync_params = await self.middleware.call(
                'catalog.sync_items_params')
            item_jobs = await self.middleware.call(
                'core.get_jobs',
                [['method', '=', 'catalog.items'], ['state', '=', 'RUNNING']])
            for row in rows:
                label = row['label']
                catalog_info = {
                    'item_job':
                    await self.middleware.call(
                        'catalog.items', label, {
                            'cache':
                            True,
                            'cache_only':
                            await self.official_catalog_label() !=
                            row['label'],
                            'retrieve_all_trains':
                            extra.get('retrieve_all_trains', True),
                            'trains':
                            extra.get('trains', []),
                        }),
                    'cached':
                    label == OFFICIAL_LABEL or await self.middleware.call(
                        'catalog.cached', label, False) or await
                    self.middleware.call('catalog.cached', label, True),
                    'normalized_progress':
                    None,
                }
                if not catalog_info['cached']:
                    caching_job = filter_list(
                        item_jobs,
                        [['arguments', '=', [row['label'], item_sync_params]]])
                    if not caching_job:
                        caching_job_obj = await self.middleware.call(
                            'catalog.items', label, item_sync_params)
                        caching_job = caching_job_obj.__encode__()
                    else:
                        caching_job = caching_job[0]

                    catalog_info['normalized_progress'] = {
                        'caching_job': caching_job,
                        'caching_progress': caching_job['progress'],
                    }
                context['catalogs_context'][label] = catalog_info

        return context

    @private
    async def normalize_data_from_item_job(self, label, catalog_context):
        normalized = {
            'trains': {},
            'cached': catalog_context['cached'],
            'healthy': False,
            'error': True,
            'caching_progress': None,
            'caching_job': None,
        }
        item_job = catalog_context['item_job']
        await item_job.wait()
        if not item_job.error:
            normalized.update({
                'trains':
                item_job.result,
                'healthy':
                all(app['healthy'] for train in item_job.result
                    for app in item_job.result[train].values()),
                'cached':
                label == OFFICIAL_LABEL
                or await self.middleware.call('catalog.cached', label, False)
                or await self.middleware.call('catalog.cached', label, True),
                'error':
                False,
                'caching_progress':
                None,
                'caching_job':
                None,
            })
        return normalized

    @private
    async def catalog_extend(self, catalog, context):
        catalog.update({
            'location':
            os.path.join(
                context['catalogs_dir'],
                convert_repository_to_path(catalog['repository'],
                                           catalog['branch'])),
            'id':
            catalog['label'],
        })
        extra = context['extra']
        if extra.get('item_details'):
            catalog_context = context['catalogs_context'][catalog['label']]
            catalog.update(await self.normalize_data_from_item_job(
                catalog['id'], catalog_context))
            if catalog['cached']:
                return catalog
            else:
                catalog.update(catalog_context['normalized_progress'])
        return catalog

    @private
    async def common_validation(self, catalog, schema, data):
        found_trains = set(catalog['trains'])
        diff = set(data['preferred_trains']) - found_trains
        verrors = ValidationErrors()
        if diff:
            verrors.add(
                f'{schema}.preferred_trains',
                f'{", ".join(diff)} trains were not found in catalog.')
        if not data['preferred_trains']:
            verrors.add(
                f'{schema}.preferred_trains',
                'At least 1 preferred train must be specified for a catalog.')

        verrors.check()

    @accepts(
        Patch(
            'catalog_entry',
            'catalog_create',
            ('add', Bool('force', default=False)),
            ('rm', {
                'name': 'id'
            }),
            ('rm', {
                'name': 'trains'
            }),
            ('rm', {
                'name': 'healthy'
            }),
            ('rm', {
                'name': 'error'
            }),
            ('rm', {
                'name': 'builtin'
            }),
            ('rm', {
                'name': 'location'
            }),
            ('rm', {
                'name': 'cached'
            }),
            ('rm', {
                'name': 'caching_progress'
            }),
            ('rm', {
                'name': 'caching_job'
            }),
        ), )
    @job(lock=lambda args: f'catalog_create_{args[0]["label"]}')
    async def do_create(self, job, data):
        """
        `catalog_create.preferred_trains` specifies trains which will be displayed in the UI directly for a user.
        """
        verrors = ValidationErrors()
        # We normalize the label
        data['label'] = data['label'].upper()

        if await self.query([['id', '=', data['label']]]):
            verrors.add('catalog_create.label',
                        'A catalog with specified label already exists',
                        errno=errno.EEXIST)

        if await self.query([['repository', '=', data['repository']],
                             ['branch', '=', data['branch']]]):
            for k in ('repository', 'branch'):
                verrors.add(
                    f'catalog_create.{k}',
                    'A catalog with same repository/branch already exists',
                    errno=errno.EEXIST)

        verrors.check()

        if not data['preferred_trains']:
            data['preferred_trains'] = ['stable']

        if not data.pop('force'):
            job.set_progress(40, f'Validating {data["label"]!r} catalog')
            # We will validate the catalog now to ensure it's valid wrt contents / format
            path = os.path.join(
                TMP_IX_APPS_DIR, 'validate_catalogs',
                convert_repository_to_path(data['repository'], data['branch']))
            try:
                await self.middleware.call('catalog.update_git_repository', {
                    **data, 'location': path
                }, True)
                await self.middleware.call(
                    'catalog.validate_catalog_from_path', path)
                await self.common_validation(
                    {
                        'trains':
                        await self.middleware.call(
                            'catalog.retrieve_train_names', path)
                    }, 'catalog_create', data)
            except CallError as e:
                verrors.add('catalog_create.label',
                            f'Failed to validate catalog: {e}')
            finally:
                await self.middleware.run_in_thread(shutil.rmtree,
                                                    path,
                                                    ignore_errors=True)
        else:
            job.set_progress(50, 'Skipping validation of catalog')

        verrors.check()

        job.set_progress(60, 'Completed Validation')

        await self.middleware.call('datastore.insert', self._config.datastore,
                                   data)
        job.set_progress(70, f'Successfully added {data["label"]!r} catalog')

        job.set_progress(80, f'Syncing {data["label"]} catalog')
        sync_job = await self.middleware.call('catalog.sync', data['label'])
        await sync_job.wait()
        if sync_job.error:
            raise CallError(
                f'Catalog was added successfully but failed to sync: {sync_job.error}'
            )

        job.set_progress(100, f'Successfully synced {data["label"]!r} catalog')

        return await self.get_instance(data['label'])

    @accepts(Str('id'),
             Dict('catalog_update', List('preferred_trains'), update=True))
    async def do_update(self, id, data):
        catalog = await self.query([['id', '=', id]], {
            'extra': {
                'item_details': True
            },
            'get': True
        })
        await self.common_validation(catalog, 'catalog_update', data)

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, data)

        return await self.get_instance(id)

    def do_delete(self, id):
        catalog = self.middleware.call_sync('catalog.get_instance', id)
        if catalog['builtin']:
            raise CallError('Builtin catalogs cannot be deleted')

        ret = self.middleware.call_sync('datastore.delete',
                                        self._config.datastore, id)

        if os.path.exists(catalog['location']):
            shutil.rmtree(catalog['location'], ignore_errors=True)

        # Let's delete any unhealthy alert if we had one
        self.middleware.call_sync('alert.oneshot_delete', 'CatalogNotHealthy',
                                  id)
        self.middleware.call_sync('alert.oneshot_delete', 'CatalogSyncFailed',
                                  id)

        # Remove cached content of the catalog in question so that if a catalog is created again
        # with same label but different repo/branch, we don't reuse old cache
        self.middleware.call_sync('cache.pop', get_cache_key(id, True))
        self.middleware.call_sync('cache.pop', get_cache_key(id, False))

        return ret

    @private
    async def official_catalog_label(self):
        return OFFICIAL_LABEL