Пример #1
0
 def check_errors(self, func, *args, **kwargs):
     verrors = ValidationErrors()
     try:
         func(*args, **kwargs)
     except CatalogValidationErrors as e:
         verrors.extend(e)
     verrors.check()
Пример #2
0
def validate_attributes(schema,
                        data,
                        additional_attrs=False,
                        attr_key="attributes",
                        dict_kwargs=None):
    from middlewared.schema import Dict, Error
    from middlewared.service import ValidationErrors
    verrors = ValidationErrors()
    dict_kwargs = dict_kwargs or {}

    schema = Dict("attributes",
                  *schema,
                  additional_attrs=additional_attrs,
                  **dict_kwargs)

    try:
        data[attr_key] = schema.clean(data[attr_key])
    except Error as e:
        verrors.add(e.attribute, e.errmsg, e.errno)

    try:
        schema.validate(data[attr_key])
    except ValidationErrors as e:
        verrors.extend(e)

    return verrors
Пример #3
0
def validate_schema(schema, data, additional_attrs=False, dict_kwargs=None):
    from middlewared.schema import Dict, Error
    from middlewared.service import ValidationErrors
    verrors = ValidationErrors()
    dict_kwargs = dict_kwargs or {}

    schema = Dict("attributes",
                  *schema,
                  additional_attrs=additional_attrs,
                  **dict_kwargs)

    try:
        schema.clean(data)
    except Error as e:
        verrors.add(e.attribute, e.errmsg, e.errno)
    except ValidationErrors as e:
        verrors.extend(e)
    else:
        try:
            schema.validate(data)
        except ValidationErrors as e:
            verrors.extend(e)

    for verror in verrors.errors:
        if not verror.attribute.startswith("attributes."):
            raise ValueError(
                f"Got an invalid attribute name: {verror.attribute!r}")

        verror.attribute = verror.attribute[len("attributes."):]

    return verrors
Пример #4
0
    async def do_update(self, data):
        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        for attr, minlen, maxlen in (
            ('access_key', 5, 20),
            ('secret_key', 8, 40),
        ):
            curlen = len(new.get(attr, ''))
            if curlen < minlen or curlen > maxlen:
                verrors.add(
                    f's3_update.{attr}',
                    f'Attribute should be {minlen} to {maxlen} in length')

        if not new['storage_path']:
            verrors.add('s3_update.storage_path', 'Storage path is required')
        else:
            await check_path_resides_within_volume(verrors, self.middleware,
                                                   's3_update.storage_path',
                                                   new['storage_path'])

            if not verrors:
                if new['storage_path'].rstrip('/').count('/') < 3:
                    verrors.add(
                        's3_update.storage_path',
                        'Top level datasets are not allowed. i.e /mnt/tank/dataset is allowed'
                    )
                else:
                    # If the storage_path does not exist, let's create it
                    if not os.path.exists(new['storage_path']):
                        os.makedirs(new['storage_path'])

        if new['certificate']:
            verrors.extend(
                (await
                 self.middleware.call('certificate.cert_services_validation',
                                      new['certificate'],
                                      's3_update.certificate', False)))

        if verrors:
            raise verrors

        new['disks'] = new.pop('storage_path')

        await self._update_service(old, new)

        if (await self.middleware.call('filesystem.stat',
                                       new['disks']))['user'] != 'minio':
            await self.middleware.call('notifier.winacl_reset', new['disks'],
                                       'minio', 'minio')

        return await self.config()
Пример #5
0
    async def do_update(self, data):
        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        if not ((new["passiveportsmin"] == 0) == (new["passiveportsmax"]
                                                  == 0)):
            verrors.add(
                "passiveportsmin",
                "passiveportsmin and passiveportsmax should be both zero or non-zero"
            )
        if not ((new["passiveportsmin"] == 0 and new["passiveportsmax"] == 0)
                or (new["passiveportsmax"] > new["passiveportsmin"])):
            verrors.add(
                "ftp_update.passiveportsmax",
                "When specified, should be greater than passiveportsmin")

        if new["onlyanonymous"] and not new["anonpath"]:
            verrors.add("ftp_update.anonpath",
                        "This field is required for anonymous login")

        if new["anonpath"]:
            await check_path_resides_within_volume(verrors, self.middleware,
                                                   "ftp_update.anonpath",
                                                   new["anonpath"])

        if new["tls"]:
            if not new["ssltls_certificate"]:
                verrors.add(
                    "ftp_update.ssltls_certificate",
                    "Please provide a valid certificate id when TLS is enabled"
                )
            else:
                verrors.extend((await self.middleware.call(
                    "certificate.cert_services_validation",
                    new["ssltls_certificate"], "ftp_update.ssltls_certificate",
                    False)))

        if new["masqaddress"]:
            await resolve_hostname(self.middleware, verrors,
                                   "ftp_update.masqaddress",
                                   new["masqaddress"])

        if verrors:
            raise verrors

        await self._update_service(old, new)

        if not old['tls'] and new['tls']:
            await self.middleware.call('service.start', 'ssl')

        return new
Пример #6
0
def validate_attributes(schema, data, additional_attrs=False):
    verrors = ValidationErrors()

    schema = Dict("attributes", *schema, additional_attrs=additional_attrs)

    try:
        data["attributes"] = schema.clean(data["attributes"])
    except Error as e:
        verrors.add(e.attribute, e.errmsg, e.errno)

    try:
        schema.validate(data["attributes"])
    except ValidationErrors as e:
        verrors.extend(e)

    return verrors
Пример #7
0
def validate_attributes(schema, data, additional_attrs=False):
    verrors = ValidationErrors()

    schema = Dict("attributes", *schema, additional_attrs=additional_attrs)

    try:
        data["attributes"] = schema.clean(data["attributes"])
    except Error as e:
        verrors.add(e.attribute, e.errmsg, e.errno)

    try:
        schema.validate(data["attributes"])
    except ValidationErrors as e:
        verrors.extend(e)

    return verrors
Пример #8
0
    async def lock(self, job, oid, passphrase):
        """
        Lock encrypted pool `id`.
        """
        pool = await self.middleware.call('pool.get_instance', oid)

        verrors = ValidationErrors()

        if pool['encrypt'] == 0:
            verrors.add('id', 'Pool is not encrypted.')
        elif pool['status'] == 'OFFLINE':
            verrors.add('id', 'Pool already locked.')

        if not verrors:
            verrors.extend(await
                           self.middleware.call('pool.pool_lock_pre_check',
                                                pool, passphrase))

        if verrors:
            raise verrors

        await self.middleware.call_hook('pool.pre_lock', pool=pool)

        sysds = await self.middleware.call('systemdataset.config')
        if sysds['pool'] == pool['name']:
            sysds_update_job = await self.middleware.call(
                'systemdataset.update', {
                    'pool': None,
                    'pool_exclude': pool['name'],
                })
            await sysds_update_job.wait()
            if sysds_update_job.error:
                raise CallError(sysds_update_job.error)

        await self.middleware.call('zfs.pool.export', pool['name'])

        for ed in await self.middleware.call(
                'datastore.query', 'storage.encrypteddisk',
            [('encrypted_volume', '=', pool['id'])]):
            await self.middleware.call('disk.geli_detach_single',
                                       ed['encrypted_provider'])

        await self.middleware.call_hook('pool.post_lock', pool=pool)
        await self.middleware.call('service.restart', 'system_datasets')
        return True
Пример #9
0
def validate_attributes(schema, data, additional_attrs=False, attr_key="attributes"):
    from middlewared.schema import Dict, Error
    from middlewared.service import ValidationErrors
    verrors = ValidationErrors()

    schema = Dict("attributes", *schema, additional_attrs=additional_attrs)

    try:
        data[attr_key] = schema.clean(data[attr_key])
    except Error as e:
        verrors.add(e.attribute, e.errmsg, e.errno)

    try:
        schema.validate(data[attr_key])
    except ValidationErrors as e:
        verrors.extend(e)

    return verrors
Пример #10
0
    async def validate_device(self, device, old=None, vm_instance=None):
        # We allow vm_instance to be passed for cases where VM devices are being updated via VM and
        # the device checks should be performed with the modified vm_instance object not the one db holds
        # vm_instance should be provided at all times when handled by VMService, if VMDeviceService is interacting,
        # then it means the device is configured with a VM and we can retrieve the VM's data from db
        if not vm_instance:
            vm_instance = await self.middleware.call('vm.get_instance',
                                                     device['vm'])

        verrors = ValidationErrors()
        schema = self.DEVICE_ATTRS.get(device['dtype'])
        if schema:
            try:
                device['attributes'] = schema.clean(device['attributes'])
            except Error as e:
                verrors.add(f'attributes.{e.attribute}', e.errmsg, e.errno)

            try:
                schema.validate(device['attributes'])
            except ValidationErrors as e:
                verrors.extend(e)

            if verrors:
                raise verrors

        # vm_instance usages SHOULD NOT rely on device `id` field to uniquely identify objects as it's possible
        # VMService is creating a new VM with devices and the id's don't exist yet

        if device.get('dtype') == 'DISK':
            create_zvol = device['attributes'].get('create_zvol')
            path = device['attributes'].get('path')
            if create_zvol:
                for attr in ('zvol_name', 'zvol_volsize'):
                    if not device['attributes'].get(attr):
                        verrors.add(f'attributes.{attr}',
                                    'This field is required.')
                parentzvol = (device['attributes'].get('zvol_name')
                              or '').rsplit('/', 1)[0]
                if parentzvol and not await self.middleware.call(
                        'pool.dataset.query', [('id', '=', parentzvol)]):
                    verrors.add(
                        'attributes.zvol_name',
                        f'Parent dataset {parentzvol} does not exist.',
                        errno.ENOENT)
                zvol = await self.middleware.call(
                    'pool.dataset.query',
                    [['id', '=', device['attributes'].get('zvol_name')]])
                if not verrors and create_zvol and zvol:
                    verrors.add(
                        'attributes.zvol_name',
                        f'{device["attributes"]["zvol_name"]} already exists.')
                elif zvol and zvol[0]['locked']:
                    verrors.add('attributes.zvol_name',
                                f'{zvol[0]["id"]} is locked.')
            elif not path:
                verrors.add('attributes.path', 'Disk path is required.')
            elif path and not os.path.exists(path):
                verrors.add('attributes.path',
                            f'Disk path {path} does not exist.', errno.ENOENT)

            if path and len(path) > 63:
                # SPECNAMELEN is not long enough (63) in 12, 13 will be 255
                verrors.add(
                    'attributes.path',
                    f'Disk path {path} is too long, reduce to less than 63 characters',
                    errno.ENAMETOOLONG)
            if not await self.disk_uniqueness_integrity_check(
                    device, vm_instance):
                verrors.add(
                    'attributes.path',
                    f'{vm_instance["name"]} has "{path}" already configured')
        elif device.get('dtype') == 'RAW':
            path = device['attributes'].get('path')
            exists = device['attributes'].get('exists', True)
            if not path:
                verrors.add('attributes.path', 'Path is required.')
            else:
                if exists and not os.path.exists(path):
                    verrors.add('attributes.path', 'Path must exist.')
                if not exists:
                    if os.path.exists(path):
                        verrors.add('attributes.path', 'Path must not exist.')
                    elif not device['attributes'].get('size'):
                        verrors.add(
                            'attributes.size',
                            'Please provide a valid size for the raw file.')
                if (old and old['attributes'].get('size') !=
                        device['attributes'].get('size')
                        and not device['attributes'].get('size')):
                    verrors.add(
                        'attributes.size',
                        'Please provide a valid size for the raw file.')
                await check_path_resides_within_volume(
                    verrors,
                    self.middleware,
                    'attributes.path',
                    path,
                )
                if not await self.disk_uniqueness_integrity_check(
                        device, vm_instance):
                    verrors.add(
                        'attributes.path',
                        f'{vm_instance["name"]} has "{path}" already configured'
                    )
        elif device.get('dtype') == 'CDROM':
            path = device['attributes'].get('path')
            if not path:
                verrors.add('attributes.path', 'Path is required.')
            elif not os.path.exists(path):
                verrors.add('attributes.path',
                            f'Unable to locate CDROM device at {path}')
            elif not await self.disk_uniqueness_integrity_check(
                    device, vm_instance):
                verrors.add(
                    'attributes.path',
                    f'{vm_instance["name"]} has "{path}" already configured')
        elif device.get('dtype') == 'NIC':
            nic = device['attributes'].get('nic_attach')
            if nic:
                nic_choices = await self.middleware.call(
                    'vm.device.nic_attach_choices')
                if nic not in nic_choices:
                    verrors.add('attributes.nic_attach', 'Not a valid choice.')
            await self.failover_nic_check(device, verrors, 'attributes')
        elif device.get('dtype') == 'PCI':
            pptdev = device['attributes'].get('pptdev')
            if osc.IS_FREEBSD and not RE_PPTDEV_NAME.findall(pptdev):
                verrors.add('attribute.pptdev',
                            'Please specify correct PCI device for passthru.')
            if pptdev not in await self.middleware.call(
                    'vm.device.pptdev_choices'):
                verrors.add(
                    'attribute.pptdev',
                    'Not a valid choice. The PCI device is not available for passthru.'
                )
            if not await self.middleware.call('vm.device.iommu_enabled'):
                verrors.add('attribute.pptdev', 'IOMMU support is required.')
        elif device.get('dtype') == 'VNC':
            if vm_instance:
                if osc.IS_FREEBSD and vm_instance['bootloader'] != 'UEFI':
                    verrors.add('dtype',
                                'VNC only works with UEFI bootloader.')
                if all(not d.get('id') for d in vm_instance['devices']):
                    # VM is being created so devices don't have an id yet. We can just count no of VNC devices
                    # and add a validation error if it's more then one
                    if len([
                            d for d in vm_instance['devices']
                            if d['dtype'] == 'VNC'
                    ]) > 1:
                        verrors.add('dtype',
                                    'Only one VNC device is allowed per VM')
                elif any(d['dtype'] == 'VNC' and d['id'] != device.get('id')
                         for d in vm_instance['devices']):
                    verrors.add('dtype',
                                'Only one VNC device is allowed per VM')
            all_ports = [
                d['attributes'].get('vnc_port')
                for d in (await self.middleware.call('vm.device.query',
                                                     [['dtype', '=', 'VNC']]))
                if d['id'] != device.get('id')
            ]
            if device['attributes'].get('vnc_port'):
                if device['attributes']['vnc_port'] in all_ports:
                    verrors.add('attributes.vnc_port',
                                'Specified vnc port is already in use')
            else:
                device['attributes']['vnc_port'] = (
                    await
                    self.middleware.call('vm.vnc_port_wizard'))['vnc_port']

        if device['dtype'] in ('RAW', 'DISK') and device['attributes'].get('physical_sectorsize')\
                and not device['attributes'].get('logical_sectorsize'):
            verrors.add(
                'attributes.logical_sectorsize',
                'This field must be provided when physical_sectorsize is specified.'
            )

        if verrors:
            raise verrors

        return device
Пример #11
0
    async def validate_general_settings(self, data, schema):
        verrors = ValidationErrors()

        language = data.get('language')
        if language:
            system_languages = self.language_choices()
            if language not in system_languages.keys():
                verrors.add(
                    f'{schema}.language',
                    f'Specified "{language}" language not found, kindly correct it'
                )

        # kbd map needs work

        timezone = data.get('timezone')
        if timezone:
            timezones = await self.timezone_choices()
            if timezone not in timezones:
                verrors.add(f'{schema}.timezone',
                            'Please select a correct timezone')

        ip_addresses = await self.middleware.call('interface.ip_in_use')
        ip4_addresses_list = [
            alias_dict['address'] for alias_dict in ip_addresses
            if alias_dict['type'] == 'INET'
        ]
        ip6_addresses_list = [
            alias_dict['address'] for alias_dict in ip_addresses
            if alias_dict['type'] == 'INET6'
        ]

        ip4_addresses = data.get('ui_address')
        for ip4_address in ip4_addresses:
            if (ip4_address and ip4_address != '0.0.0.0'
                    and ip4_address not in ip4_addresses_list):
                verrors.add(
                    f'{schema}.ui_address',
                    f'{ip4_address} ipv4 address is not associated with this machine'
                )

        ip6_addresses = data.get('ui_v6address')
        for ip6_address in ip6_addresses:
            if (ip6_address and ip6_address != '::'
                    and ip6_address not in ip6_addresses_list):
                verrors.add(
                    f'{schema}.ui_v6address',
                    f'{ip6_address} ipv6 address is not associated with this machine'
                )

        for key, wildcard, ips in [('ui_address', '0.0.0.0', ip4_addresses),
                                   ('ui_v6address', '::', ip6_addresses)]:
            if wildcard in ips and len(ips) > 1:
                verrors.add(
                    f'{schema}.{key}',
                    f'When "{wildcard}" has been selected, selection of other addresses is not allowed'
                )

        syslog_server = data.get('syslogserver')
        if syslog_server:
            match = re.match(r"^[\w\.\-]+(\:\d+)?$", syslog_server)
            if not match:
                verrors.add(f'{schema}.syslogserver',
                            'Invalid syslog server format')
            elif ':' in syslog_server:
                port = int(syslog_server.split(':')[-1])
                if port < 0 or port > 65535:
                    verrors.add(f'{schema}.syslogserver',
                                'Port specified should be between 0 - 65535')

        certificate_id = data.get('ui_certificate')
        cert = await self.middleware.call('certificate.query',
                                          [["id", "=", certificate_id]])
        if not cert:
            verrors.add(
                f'{schema}.ui_certificate',
                'Please specify a valid certificate which exists in the system'
            )
        else:
            cert = cert[0]
            verrors.extend(await self.middleware.call(
                'certificate.cert_services_validation', certificate_id,
                f'{schema}.ui_certificate', False))

            if cert['fingerprint']:
                syslog.openlog(logoption=syslog.LOG_PID,
                               facility=syslog.LOG_USER)
                syslog.syslog(
                    syslog.LOG_ERR,
                    'Fingerprint of the certificate used in UI : ' +
                    cert['fingerprint'])
                syslog.closelog()

        return verrors
Пример #12
0
    async def do_update(self, data):
        """
        Update S3 Service Configuration.

        `access_key` must only contain alphanumeric characters and should be between 5 and 20 characters.

        `secret_key` must only contain alphanumeric characters and should be between 8 and 40 characters.

        `browser` when set, enables the web user interface for the S3 Service.

        `certificate` is a valid certificate id which exists in the system. This is used to enable secure
        S3 connections.
        """
        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        for attr, minlen, maxlen in (
            ('access_key', 5, 20),
            ('secret_key', 8, 40),
        ):
            curlen = len(new.get(attr, ''))
            if curlen < minlen or curlen > maxlen:
                verrors.add(
                    f's3_update.{attr}',
                    f'Attribute should be {minlen} to {maxlen} in length')

        if not new['storage_path']:
            verrors.add('s3_update.storage_path', 'Storage path is required')
        else:
            await check_path_resides_within_volume(verrors, self.middleware,
                                                   's3_update.storage_path',
                                                   new['storage_path'])

            if not verrors:
                if new['storage_path'].rstrip('/').count('/') < 3:
                    verrors.add(
                        's3_update.storage_path',
                        'Top level datasets are not allowed. i.e /mnt/tank/dataset is allowed'
                    )
                else:
                    # If the storage_path does not exist, let's create it
                    if not os.path.exists(new['storage_path']):
                        os.makedirs(new['storage_path'])

        if new['certificate']:
            verrors.extend(
                (await
                 self.middleware.call('certificate.cert_services_validation',
                                      new['certificate'],
                                      's3_update.certificate', False)))

        if verrors:
            raise verrors

        new['disks'] = new.pop('storage_path')

        await self._update_service(old, new)

        if (await self.middleware.call('filesystem.stat',
                                       new['disks']))['user'] != 'minio':
            await self.middleware.call(
                'filesystem.setperm', {
                    'path':
                    new['disks'],
                    'mode':
                    str(775),
                    'uid': (await self.middleware.call(
                        'dscache.get_uncached_user', 'minio'))['pw_uid'],
                    'gid': (await self.middleware.call(
                        'dscache.get_uncached_group', 'minio'))['gr_gid'],
                    'options': {
                        'recursive': True,
                        'traverse': False
                    }
                })

        return await self.config()
Пример #13
0
    async def __validate_fields(self, schema, data):
        verrors = ValidationErrors()

        serial_choice = data.get('serialport')
        if data.get('serialconsole'):
            if not serial_choice:
                verrors.add(
                    f'{schema}.serialport',
                    'Please specify a serial port when serial console option is checked'
                )
            elif serial_choice not in await self.middleware.call(
                    'system.advanced.serial_port_choices'):
                verrors.add(
                    f'{schema}.serialport',
                    'Serial port specified has not been identified by the system'
                )

        ups_port = (await self.middleware.call('ups.config'))['port']
        if not verrors and os.path.join('/dev', serial_choice
                                        or '') == ups_port:
            verrors.add(
                f'{schema}.serialport',
                'Serial port must be different then the port specified for UPS Service'
            )

        syslog_server = data.get('syslogserver')
        if syslog_server:
            match = re.match(r"^[\w\.\-]+(\:\d+)?$", syslog_server)
            if not match:
                verrors.add(f'{schema}.syslogserver',
                            'Invalid syslog server format')
            elif ':' in syslog_server:
                port = int(syslog_server.split(':')[-1])
                if port < 0 or port > 65535:
                    verrors.add(f'{schema}.syslogserver',
                                'Port must be in the range of 0 to 65535.')

        if data['syslog_transport'] == 'TLS':
            if not data['syslog_tls_certificate_authority']:
                verrors.add(
                    f'{schema}.syslog_tls_certificate_authority',
                    'This is required when using TLS as syslog transport')
            ca_cert = await self.middleware.call(
                'certificateauthority.query',
                [['id', '=', data['syslog_tls_certificate_authority']]])
            if not ca_cert:
                verrors.add(f'{schema}.syslog_tls_certificate_authority',
                            'Unable to locate specified CA')
            elif ca_cert[0]['revoked']:
                verrors.add(f'{schema}.syslog_tls_certificate_authority',
                            'Specified CA has been revoked')

            if data['syslog_tls_certificate']:
                verrors.extend(await self.middleware.call(
                    'certificate.cert_services_validation',
                    data['syslog_tls_certificate'],
                    f'{schema}.syslog_tls_certificate', False))

        if data['isolated_gpu_pci_ids']:
            available = set()
            critical_gpus = set()
            for gpu in await self.middleware.call('device.get_gpus'):
                available.add(gpu['addr']['pci_slot'])
                if gpu['uses_system_critical_devices']:
                    critical_gpus.add(gpu['addr']['pci_slot'])

            provided = set(data['isolated_gpu_pci_ids'])
            not_available = provided - available
            cannot_isolate = provided & critical_gpus
            if not_available:
                verrors.add(
                    f'{schema}.isolated_gpu_pci_ids',
                    f'{", ".join(not_available)} GPU pci slot(s) are not available or a GPU is not configured.'
                )

            if cannot_isolate:
                verrors.add(
                    f'{schema}.isolated_gpu_pci_ids',
                    f'{", ".join(cannot_isolate)} GPU pci slot(s) consists of devices '
                    'which cannot be isolated from host.')

            if len(available - provided) < 1:
                verrors.add(
                    f'{schema}.isolated_gpu_pci_ids',
                    'A minimum of 1 GPU is required for the host to ensure it functions as desired.'
                )

        for ch in ('\n', '"'):
            if ch in data['kernel_extra_options']:
                verrors.add('kernel_extra_options', f'{ch!r} not allowed')

        return verrors, data
Пример #14
0
    async def validate_general_settings(self, data, schema):
        verrors = ValidationErrors()

        language = data.get('language')
        if language:
            system_languages = self.language_choices()
            if language not in system_languages.keys():
                verrors.add(
                    f'{schema}.language',
                    f'Specified "{language}" language not found, kindly correct it'
                )

        # kbd map needs work

        timezone = data.get('timezone')
        if timezone:
            timezones = await self.timezone_choices()
            if timezone not in timezones:
                verrors.add(
                    f'{schema}.timezone',
                    'Please select a correct timezone'
                )

        ip_addresses = await self.middleware.call(
            'interface.ip_in_use'
        )
        ip4_addresses_list = [alias_dict['address'] for alias_dict in ip_addresses if alias_dict['type'] == 'INET']
        ip6_addresses_list = [alias_dict['address'] for alias_dict in ip_addresses if alias_dict['type'] == 'INET6']

        ip4_addresses = data.get('ui_address')
        for ip4_address in ip4_addresses:
            if (
                ip4_address and
                ip4_address != '0.0.0.0' and
                ip4_address not in ip4_addresses_list
            ):
                verrors.add(
                    f'{schema}.ui_address',
                    f'{ip4_address} ipv4 address is not associated with this machine'
                )

        ip6_addresses = data.get('ui_v6address')
        for ip6_address in ip6_addresses:
            if (
                ip6_address and
                ip6_address != '::' and
                ip6_address not in ip6_addresses_list
            ):
                verrors.add(
                    f'{schema}.ui_v6address',
                    f'{ip6_address} ipv6 address is not associated with this machine'
                )

        for key, wildcard, ips in [('ui_address', '0.0.0.0', ip4_addresses), ('ui_v6address', '::', ip6_addresses)]:
            if wildcard in ips and len(ips) > 1:
                verrors.add(
                    f'{schema}.{key}',
                    f'When "{wildcard}" has been selected, selection of other addresses is not allowed'
                )

        syslog_server = data.get('syslogserver')
        if syslog_server:
            match = re.match(r"^[\w\.\-]+(\:\d+)?$", syslog_server)
            if not match:
                verrors.add(
                    f'{schema}.syslogserver',
                    'Invalid syslog server format'
                )
            elif ':' in syslog_server:
                port = int(syslog_server.split(':')[-1])
                if port < 0 or port > 65535:
                    verrors.add(
                        f'{schema}.syslogserver',
                        'Port specified should be between 0 - 65535'
                    )

        certificate_id = data.get('ui_certificate')
        cert = await self.middleware.call(
            'certificate.query',
            [["id", "=", certificate_id]]
        )
        if not cert:
            verrors.add(
                f'{schema}.ui_certificate',
                'Please specify a valid certificate which exists in the system'
            )
        else:
            cert = cert[0]
            verrors.extend(
                await self.middleware.call(
                    'certificate.cert_services_validation', certificate_id, f'{schema}.ui_certificate', False
                )
            )

            if cert['fingerprint']:
                syslog.openlog(logoption=syslog.LOG_PID, facility=syslog.LOG_USER)
                syslog.syslog(syslog.LOG_ERR, 'Fingerprint of the certificate used in UI : ' + cert['fingerprint'])
                syslog.closelog()

        return verrors
Пример #15
0
    async def validate_attrs(self, data):
        verrors = ValidationErrors()

        additional_params = data.get('additional_params')
        if additional_params:
            # Let's be very generic here and introduce very basic validation
            # Expected format is as following
            # [ipv6.icmpneighbor]
            #   history = 86400
            #   enabled = yes
            #
            # While we are here, we will also introduce basic formatting to the file to ensure
            # that we can make it as compliable as possible

            param_str = ''
            for i in additional_params.split('\n'):
                i = i.strip()
                if not i:
                    continue
                if i.startswith('#'):
                    # Let's not validate this
                    if i.replace('#', '').startswith('['):
                        param_str += f'\n\n{i}'
                    else:
                        param_str += f'\n\t{i}'

                    continue

                if i.startswith('[') and not i.endswith(']'):
                    verrors.add(
                        'netdata_update.additional_params',
                        f'Please correct format for {i}. i.e [system.intr]'
                    )
                elif not i.startswith('[') and '=' not in i:
                    verrors.add(
                        'netdata_update.additional_params',
                        f'Please correct format for {i}. i.e enabled = yes'
                    )

                if i.startswith('['):
                    param_str += f'\n\n{i}'
                else:
                    param_str += f'\n\t{i}'

            data['additional_params'] = param_str + '\n'

        bind_to_ips = data.get('bind')
        if bind_to_ips:
            valid_ips = [ip['address'] for ip in await self.middleware.call('interface.ip_in_use')]
            valid_ips.extend(['127.0.0.1', '::1', '0.0.0.0', '::'])

            for bind_ip in bind_to_ips:
                if bind_ip not in valid_ips:
                    verrors.add(
                        'netdata_update.bind',
                        f'Invalid {bind_ip} bind IP'
                    )
        else:
            verrors.add(
                'netdata_update.bind',
                'This field is required'
            )

        update_alarms = data.pop('update_alarms', {})
        valid_alarms = self._alarms
        if update_alarms:
            for alarm in update_alarms:
                if alarm not in valid_alarms:
                    verrors.add(
                        'netdata_update.alarms',
                        f'{alarm} not a valid alarm'
                    )

            verrors.extend(
                validate_attributes(
                    [Dict(key, Bool('enabled', required=True)) for key in update_alarms],
                    {'attributes': update_alarms}
                )
            )

        # Validating streaming metrics now
        stream_mode = data.get('stream_mode')
        if stream_mode == 'SLAVE':
            for key in ('api_key', 'destination'):
                if not data.get(key):
                    verrors.add(
                        f'netdata_update.{key}',
                        f'{key} is required with stream mode as SLAVE'
                    )

            destinations = data.get('destination')
            if destinations:
                ip_addr = IpAddress()
                port = Port()
                for dest in destinations:
                    ip = dest.split(':')[0]
                    try:
                        ip_addr(ip)
                    except ValueError as e:
                        verrors.add(
                            'netdata_update.destination',
                            str(e)
                        )
                    else:
                        if ':' in dest:
                            try:
                                port(int(dest.split(':')[1]))
                            except ValueError as e:
                                verrors.add(
                                    'netdata_update.destination',
                                    f'Not a valid port: {e}'
                                )
        elif stream_mode == 'MASTER':
            for key in ('allow_from', 'api_key'):
                if not data.get(key):
                    verrors.add(
                        f'netdata_update.{key}',
                        f'{key} is required with stream mode as MASTER'
                    )

        verrors.check()

        data['alarms'].update(update_alarms)

        return data
Пример #16
0
    async def do_update(self, data):
        """
        Update S3 Service Configuration.

        `access_key` must only contain alphanumeric characters and should be between 5 and 20 characters.

        `secret_key` must only contain alphanumeric characters and should be between 8 and 40 characters.

        `browser` when set, enables the web user interface for the S3 Service.

        `certificate` is a valid certificate id which exists in the system. This is used to enable secure
        S3 connections.
        """
        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        for attr, minlen, maxlen in (
            ('access_key', 5, 20),
            ('secret_key', 8, 40),
        ):
            curlen = len(new.get(attr, ''))
            if curlen < minlen or curlen > maxlen:
                verrors.add(
                    f's3_update.{attr}', f'Attribute should be {minlen} to {maxlen} in length'
                )

        if not new['storage_path']:
            verrors.add('s3_update.storage_path', 'Storage path is required')
        else:
            await check_path_resides_within_volume(
                verrors, self.middleware, 's3_update.storage_path', new['storage_path']
            )

            if not verrors:
                if new['storage_path'].rstrip('/').count('/') < 3:
                    verrors.add(
                        's3_update.storage_path',
                        'Top level datasets are not allowed. i.e /mnt/tank/dataset is allowed'
                    )
                else:
                    # If the storage_path does not exist, let's create it
                    if not os.path.exists(new['storage_path']):
                        os.makedirs(new['storage_path'])

        if new['certificate']:
            verrors.extend((await self.middleware.call(
                'certificate.cert_services_validation', new['certificate'], 's3_update.certificate', False
            )))

        if verrors:
            raise verrors

        new['disks'] = new.pop('storage_path')

        await self._update_service(old, new)

        if (await self.middleware.call('filesystem.stat', new['disks']))['user'] != 'minio':
            await self.middleware.call('notifier.winacl_reset', new['disks'], 'minio', 'minio')

        return await self.config()
Пример #17
0
    async def validate_attrs(self, data):
        verrors = ValidationErrors()

        additional_params = data.get('additional_params')
        if additional_params:
            # Let's be very generic here and introduce very basic validation
            # Expected format is as following
            # [ipv6.icmpneighbor]
            #   history = 86400
            #   enabled = yes
            #
            # While we are here, we will also introduce basic formatting to the file to ensure
            # that we can make it as compliable as possible

            param_str = ''
            for i in additional_params.split('\n'):
                i = i.strip()
                if not i:
                    continue
                if i.startswith('#'):
                    # Let's not validate this
                    if i.replace('#', '').startswith('['):
                        param_str += f'\n\n{i}'
                    else:
                        param_str += f'\n\t{i}'

                    continue

                if i.startswith('[') and not i.endswith(']'):
                    verrors.add(
                        'netdata_update.additional_params',
                        f'Please correct format for {i}. i.e [system.intr]')
                elif not i.startswith('[') and '=' not in i:
                    verrors.add(
                        'netdata_update.additional_params',
                        f'Please correct format for {i}. i.e enabled = yes')

                if i.startswith('['):
                    param_str += f'\n\n{i}'
                else:
                    param_str += f'\n\t{i}'

            data['additional_params'] = param_str + '\n'

        bind_to_ips = data.get('bind')
        if bind_to_ips:
            valid_ips = [
                ip['address']
                for ip in await self.middleware.call('interfaces.ip_in_use')
            ]
            valid_ips.extend(['127.0.0.1', '::1', '0.0.0.0', '::'])

            for bind_ip in bind_to_ips:
                if bind_ip not in valid_ips:
                    verrors.add('netdata_update.bind',
                                f'Invalid {bind_ip} bind IP')
        else:
            verrors.add('netdata_update.bind', 'This field is required')

        update_alarms = data.pop('update_alarms', {})
        valid_alarms = self._alarms
        if update_alarms:
            for alarm in update_alarms:
                if alarm not in valid_alarms:
                    verrors.add('netdata_update.alarms',
                                f'{alarm} not a valid alarm')

            verrors.extend(
                validate_attributes([
                    Dict(key, Bool('enabled', required=True))
                    for key in update_alarms
                ], {'attributes': update_alarms}))

        # Validating streaming metrics now
        stream_mode = data.get('stream_mode')
        if stream_mode == 'SLAVE':
            for key in ('api_key', 'destination'):
                if not data.get(key):
                    verrors.add(
                        f'netdata_update.{key}',
                        f'{key} is required with stream mode as SLAVE')

            destinations = data.get('destination')
            if destinations:
                ip_addr = IpAddress()
                port = Port()
                for dest in destinations:
                    ip = dest.split(':')[0]
                    try:
                        ip_addr(ip)
                    except ValueError as e:
                        verrors.add('netdata_update.destination', str(e))
                    else:
                        if ':' in dest:
                            try:
                                port(dest.split(':')[1])
                            except ValueError as e:
                                verrors.add('netdata_update.destination',
                                            f'Not a valid port: {e}')
        elif stream_mode == 'MASTER':
            for key in ('allow_from', 'api_key'):
                if not data.get(key):
                    verrors.add(
                        f'netdata_update.{key}',
                        f'{key} is required with stream mode as MASTER')

        verrors.check()

        data['alarms'].update(update_alarms)

        return data
Пример #18
0
    async def validate_device(self,
                              device,
                              old=None,
                              vm_instance=None,
                              update=True):
        # We allow vm_instance to be passed for cases where VM devices are being updated via VM and
        # the device checks should be performed with the modified vm_instance object not the one db holds
        # vm_instance should be provided at all times when handled by VMService, if VMDeviceService is interacting,
        # then it means the device is configured with a VM and we can retrieve the VM's data from db
        if not vm_instance:
            vm_instance = await self.middleware.call('vm.get_instance',
                                                     device['vm'])

        verrors = ValidationErrors()
        schema = self.DEVICE_ATTRS.get(device['dtype'])
        if schema:
            try:
                device['attributes'] = schema.clean(device['attributes'])
            except Error as e:
                verrors.add(f'attributes.{e.attribute}', e.errmsg, e.errno)

            try:
                schema.validate(device['attributes'])
            except ValidationErrors as e:
                verrors.extend(e)

            if verrors:
                raise verrors

        # vm_instance usages SHOULD NOT rely on device `id` field to uniquely identify objects as it's possible
        # VMService is creating a new VM with devices and the id's don't exist yet

        if device.get('dtype') == 'DISK':
            create_zvol = device['attributes'].get('create_zvol')
            path = device['attributes'].get('path')
            if create_zvol:
                for attr in ('zvol_name', 'zvol_volsize'):
                    if not device['attributes'].get(attr):
                        verrors.add(f'attributes.{attr}',
                                    'This field is required.')
                parentzvol = (device['attributes'].get('zvol_name')
                              or '').rsplit('/', 1)[0]
                if parentzvol and not await self.middleware.call(
                        'pool.dataset.query', [('id', '=', parentzvol)]):
                    verrors.add(
                        'attributes.zvol_name',
                        f'Parent dataset {parentzvol} does not exist.',
                        errno.ENOENT)
                zvol = await self.middleware.call(
                    'pool.dataset.query',
                    [['id', '=', device['attributes'].get('zvol_name')]])
                if not verrors and create_zvol and zvol:
                    verrors.add(
                        'attributes.zvol_name',
                        f'{device["attributes"]["zvol_name"]} already exists.')
                elif zvol and zvol[0]['locked']:
                    verrors.add('attributes.zvol_name',
                                f'{zvol[0]["id"]} is locked.')
            elif not path:
                verrors.add('attributes.path', 'Disk path is required.')
            elif path and not os.path.exists(path):
                verrors.add('attributes.path',
                            f'Disk path {path} does not exist.', errno.ENOENT)

            if not await self.disk_uniqueness_integrity_check(
                    device, vm_instance):
                verrors.add(
                    'attributes.path',
                    f'{vm_instance["name"]} has "{path}" already configured')
        elif device.get('dtype') == 'RAW':
            path = device['attributes'].get('path')
            exists = device['attributes'].get('exists', True)
            if not path:
                verrors.add('attributes.path', 'Path is required.')
            else:
                if exists and not os.path.exists(path):
                    verrors.add('attributes.path', 'Path must exist.')
                if not exists:
                    if os.path.exists(path):
                        verrors.add('attributes.path', 'Path must not exist.')
                    elif not device['attributes'].get('size'):
                        verrors.add(
                            'attributes.size',
                            'Please provide a valid size for the raw file.')
                if (old and old['attributes'].get('size') !=
                        device['attributes'].get('size')
                        and not device['attributes'].get('size')):
                    verrors.add(
                        'attributes.size',
                        'Please provide a valid size for the raw file.')
                await check_path_resides_within_volume(
                    verrors,
                    self.middleware,
                    'attributes.path',
                    path,
                )
                if not await self.disk_uniqueness_integrity_check(
                        device, vm_instance):
                    verrors.add(
                        'attributes.path',
                        f'{vm_instance["name"]} has "{path}" already configured'
                    )
        elif device.get('dtype') == 'CDROM':
            path = device['attributes'].get('path')
            if not path:
                verrors.add('attributes.path', 'Path is required.')
            elif not os.path.exists(path):
                verrors.add('attributes.path',
                            f'Unable to locate CDROM device at {path}')
            elif not await self.disk_uniqueness_integrity_check(
                    device, vm_instance):
                verrors.add(
                    'attributes.path',
                    f'{vm_instance["name"]} has "{path}" already configured')
            if not verrors:
                # We would like to check now if libvirt will actually be able to read the iso file
                # How this works is that if libvirt user is not able to read the file, libvirt automatically changes
                # ownership of the iso file to the libvirt user so that it is able to read however there are cases where
                # even this can fail with perms like 000 or maybe parent path(s) not allowing access.
                # To mitigate this, we can do the following:
                # 1) See if owner of the file is libvirt user
                # 2) If it's not libvirt user:
                # a) Check if libvirt user can access the file
                # b) Change ownership of the file to libvirt user as libvirt would eventually do
                # 3) Check if libvirt user can access the file
                libvirt_user = await self.middleware.call(
                    'user.query', [['username', '=', LIBVIRT_USER]],
                    {'get': True})
                libvirt_group = await self.middleware.call(
                    'group.query', [['group', '=', LIBVIRT_USER]],
                    {'get': True})
                current_owner = os.stat(path)
                is_valid = False
                if current_owner.st_uid != libvirt_user['uid']:
                    if await self.middleware.call(
                            'filesystem.can_access_as_user', LIBVIRT_USER,
                            path, {'read': True}):
                        is_valid = True
                    else:
                        os.chown(path, libvirt_user['uid'],
                                 libvirt_group['gid'])
                if not is_valid and not await self.middleware.call(
                        'filesystem.can_access_as_user', LIBVIRT_USER, path,
                    {'read': True}):
                    verrors.add(
                        'attributes.path',
                        f'{LIBVIRT_USER!r} user cannot read from {path!r} path. Please ensure correct '
                        'permissions are specified.')
                    # Now that we know libvirt user would not be able to read the file in any case,
                    # let's rollback the chown change we did
                    os.chown(path, current_owner.st_uid, current_owner.st_gid)

        elif device.get('dtype') == 'NIC':
            nic = device['attributes'].get('nic_attach')
            if nic:
                nic_choices = await self.middleware.call(
                    'vm.device.nic_attach_choices')
                if nic not in nic_choices:
                    verrors.add('attributes.nic_attach', 'Not a valid choice.')
            await self.failover_nic_check(device, verrors, 'attributes')
        elif device.get('dtype') == 'PCI':
            pptdev = device['attributes'].get('pptdev')
            device_details = await self.middleware.call(
                'vm.device.passthrough_device', pptdev)
            if device_details.get('error'):
                verrors.add(
                    'attribute.pptdev',
                    f'Not a valid choice. The PCI device is not available for passthru: {device_details["error"]}'
                )
            if not await self.middleware.call('vm.device.iommu_enabled'):
                verrors.add('attribute.pptdev', 'IOMMU support is required.')
        elif device.get('dtype') == 'DISPLAY':
            if vm_instance:
                if not update:
                    vm_instance['devices'].append(device)

                await self.validate_display_devices(verrors, vm_instance)

            all_ports = await self.middleware.call(
                'vm.all_used_display_device_ports',
                [['id', '!=', device.get('id')]])
            new_ports = list((await
                              self.middleware.call('vm.port_wizard')).values())
            for key in ('port', 'web_port'):
                if device['attributes'].get(key):
                    if device['attributes'][key] in all_ports:
                        verrors.add(
                            f'attributes.{key}',
                            'Specified display port is already in use')
                else:
                    device['attributes'][key] = new_ports.pop(0)

        if device['dtype'] in ('RAW', 'DISK') and device['attributes'].get('physical_sectorsize')\
                and not device['attributes'].get('logical_sectorsize'):
            verrors.add(
                'attributes.logical_sectorsize',
                'This field must be provided when physical_sectorsize is specified.'
            )

        if verrors:
            raise verrors

        return device
Пример #19
0
    async def do_update(self, job, data):
        """
        Update KMIP Server Configuration.

        System currently authenticates connection with remote KMIP Server with a TLS handshake. `certificate` and
        `certificate_authority` determine the certs which will be used to initiate the TLS handshake with `server`.

        `validate` is enabled by default. When enabled, system will test connection to `server` making sure
        it's reachable.

        `manage_zfs_keys`/`manage_sed_disks` when enabled will sync keys from local database to remote KMIP server.
        When disabled, if there are any keys left to be retrieved from the KMIP server,
        it will sync them back to local database.

        `enabled` if true, cannot be set to disabled if there are existing keys pending to be synced. However users
        can still perform this action by enabling `force_clear`.

        `ssl_version` can be specified to match the ssl configuration being used by KMIP server.

        `change_server` is a boolean field which allows users to migrate data between two KMIP servers. System
        will first migrate keys from old KMIP server to local database and then migrate the keys from local database
        to new KMIP server. If it is unable to retrieve all the keys from old server, this will fail. Users can bypass
        this by enabling `force_clear`.

        `force_clear` is a boolean option which when enabled will in this case remove all
        pending keys to be synced from database. It should be used with extreme caution as users may end up with
        not having ZFS dataset or SED disks keys leaving them locked forever. It is disabled by default.
        """
        old = await self.config()
        new = old.copy()
        new.update(data)
        verrors = ValidationErrors()

        if not new['server'] and new['enabled']:
            verrors.add('kmip_update.server',
                        'Please specify a valid hostname or an IPv4 address')

        if new['enabled']:
            verrors.extend(
                (await
                 self.middleware.call('certificate.cert_services_validation',
                                      new['certificate'],
                                      'kmip_update.certificate', False)))

        ca = await self.middleware.call(
            'certificateauthority.query',
            [['id', '=', new['certificate_authority']]])
        if ca and not verrors:
            ca = ca[0]
            if not await self.middleware.call(
                    'cryptokey.validate_cert_with_chain',
                (await self.middleware.call(
                    'certificate.get_instance',
                    new['certificate']))['certificate'], [ca['certificate']]):
                verrors.add(
                    'kmip_update.certificate_authority',
                    'Certificate chain could not be verified with specified certificate authority.'
                )
        elif not ca and new['enabled']:
            verrors.add('kmip_update.certificate_authority',
                        'Please specify a valid id.')

        if new.pop('validate', True) and new['enabled'] and not verrors:
            if not await self.middleware.call('kmip.test_connection', new):
                verrors.add(
                    'kmip_update.server',
                    f'Unable to connect to {new["server"]}:{new["port"]} KMIP server.'
                )

        change_server = new.pop('change_server', False)
        if change_server and new['server'] == old['server']:
            verrors.add(
                'kmip_update.change_server',
                'Please update server field to reflect the new server.')
        if change_server and not new['enabled']:
            verrors.add('kmip_update.enabled',
                        'Must be enabled when change server is enabled.')

        force_clear = new.pop('force_clear', False)
        clear_keys = force_clear if change_server else False
        sync_error = 'KMIP sync is pending, please make sure database and KMIP server ' \
                     'are in sync before proceeding with this operation.'
        if old['enabled'] != new['enabled'] and await self.middleware.call(
                'kmip.kmip_sync_pending'):
            if force_clear:
                clear_keys = True
            else:
                verrors.add('kmip_update.enabled', sync_error)

        verrors.check()

        job.set_progress(30, 'Initial Validation complete')

        if clear_keys:
            await self.middleware.call('kmip.clear_sync_pending_keys')
            job.set_progress(50, 'Cleared keys pending sync')

        if change_server:
            # We will first migrate all the keys to local database - once done with that,
            # we will proceed with pushing it to the new server - we should have the old server
            # old server -> db
            # db -> new server
            # First can be skipped if old server is not reachable and we want to clear keys
            job.set_progress(
                55, 'Starting migration from existing server to new server')
            await self.middleware.call('datastore.update',
                                       self._config.datastore, old['id'], {
                                           'manage_zfs_keys': False,
                                           'manage_sed_disks': False
                                       })
            job.set_progress(
                60, 'Syncing keys from existing server to local database')
            sync_jobs = [(await self.middleware.call(f'kmip.{i}'))
                         for i in ('sync_zfs_keys', 'sync_sed_keys')]
            errors = []
            for sync_job in sync_jobs:
                await sync_job.wait()
                if sync_job.error:
                    errors.append(sync_job.error)
                elif sync_job.result:
                    errors.append(
                        f'Failed to sync {",".join(sync_job.result)}')

            if errors:
                await self.middleware.call('datastore.update',
                                           self._config.datastore, old['id'],
                                           old)
                # We do this because it's possible a few datasets/disks got synced to db and few didn't - this is
                # to push all the data of interest back to the KMIP server from db
                await self.middleware.call('kmip.sync_keys')
                errors = '\n'.join(errors)
                raise CallError(
                    f'Failed to sync keys from {old["server"]} to host: {errors}'
                )

            if await self.middleware.call('kmip.kmip_sync_pending'):
                raise CallError(sync_error)

            job.set_progress(
                80,
                'Successfully synced keys from existing server to local database'
            )

        await self.middleware.call(
            'datastore.update',
            self._config.datastore,
            old['id'],
            new,
        )

        await self.middleware.call('service.start', 'kmip')
        if new['enabled'] and old['enabled'] != new['enabled']:
            await self.middleware.call('kmip.initialize_keys')
        if any(old[k] != new[k]
               for k in ('enabled', 'manage_zfs_keys',
                         'manage_sed_disks')) or change_server:
            job.set_progress(
                90,
                'Starting sync between local database and configured KMIP server'
            )
            await self.middleware.call('kmip.sync_keys')

        return await self.config()
Пример #20
0
    async def do_update(self, data):
        """
        Update ftp service configuration.

        `clients` is an integer value which sets the maximum number of simultaneous clients allowed. It defaults to 32.

        `ipconnections` is an integer value which shows the maximum number of connections per IP address. It defaults
        to 0 which equals to unlimited.

        `timeout` is the maximum client idle time in seconds before client is disconnected.

        `rootlogin` is a boolean value which when configured to true enables login as root. This is generally
        discouraged because of the security risks.

        `onlyanonymous` allows anonymous FTP logins with access to the directory specified by `anonpath`.

        `banner` is a message displayed to local login users after they successfully authenticate. It is not displayed
        to anonymous login users.

        `filemask` sets the default permissions for newly created files which by default are 077.

        `dirmask` sets the default permissions for newly created directories which by default are 077.

        `resume` if set allows FTP clients to resume interrupted transfers.

        `fxp` if set to true indicates that File eXchange Protocol is enabled. Generally it is discouraged as it
        makes the server vulnerable to FTP bounce attacks.

        `defaultroot` when set ensures that for local users, home directory access is only granted if the user
        is a member of group wheel.

        `ident` is a boolean value which when set to true indicates that IDENT authentication is required. If identd
        is not running on the client, this can result in timeouts.

        `masqaddress` is the public IP address or hostname which is set if FTP clients cannot connect through a
        NAT device.

        `localuserbw` is a positive integer value which indicates maximum upload bandwidth in KB/s for local user.
        Default of zero indicates unlimited upload bandwidth ( from the FTP server configuration ).

        `localuserdlbw` is a positive integer value which indicates maximum download bandwidth in KB/s for local user.
        Default of zero indicates unlimited download bandwidth ( from the FTP server configuration ).

        `anonuserbw` is a positive integer value which indicates maximum upload bandwidth in KB/s for anonymous user.
        Default of zero indicates unlimited upload bandwidth ( from the FTP server configuration ).

        `anonuserdlbw` is a positive integer value which indicates maximum download bandwidth in KB/s for anonymous
        user. Default of zero indicates unlimited download bandwidth ( from the FTP server configuration ).

        `tls` is a boolean value which when set indicates that encrypted connections are enabled. This requires a
        certificate to be configured first with the certificate service and the id of certificate is passed on in
        `ssltls_certificate`.

        `tls_policy` defines whether the control channel, data channel, both channels, or neither channel of an FTP
        session must occur over SSL/TLS.

        `tls_opt_enable_diags` is a boolean value when set, logs verbosely. This is helpful when troubleshooting a
        connection.

        `options` is a string used to add proftpd(8) parameters not covered by ftp service.
        """
        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        if not ((new["passiveportsmin"] == 0) == (new["passiveportsmax"] == 0)):
            verrors.add("passiveportsmin", "passiveportsmin and passiveportsmax should be both zero or non-zero")
        if not ((new["passiveportsmin"] == 0 and new["passiveportsmax"] == 0) or
                (new["passiveportsmax"] > new["passiveportsmin"])):
            verrors.add("ftp_update.passiveportsmax", "When specified, should be greater than passiveportsmin")

        if new["onlyanonymous"]:
            if not new["anonpath"]:
                verrors.add("ftp_update.anonpath", "This field is required for anonymous login")
            else:
                await check_path_resides_within_volume(verrors, self.middleware, "ftp_update.anonpath", new["anonpath"])

        if new["tls"]:
            if not new["ssltls_certificate"]:
                verrors.add(
                    "ftp_update.ssltls_certificate",
                    "Please provide a valid certificate id when TLS is enabled"
                )
            else:
                verrors.extend((await self.middleware.call(
                    "certificate.cert_services_validation", new["ssltls_certificate"],
                    "ftp_update.ssltls_certificate", False
                )))

        if new["masqaddress"]:
            await resolve_hostname(self.middleware, verrors, "ftp_update.masqaddress", new["masqaddress"])

        if verrors:
            raise verrors

        await self._update_service(old, new)

        if not old['tls'] and new['tls']:
            await self.middleware.call('service.start', 'ssl')

        return new
Пример #21
0
    async def validate_general_settings(self, data, schema):
        verrors = ValidationErrors()

        language = data.get('language')
        system_languages = await self.middleware.call('system.general.language_choices')
        if language not in system_languages.keys():
            verrors.add(
                f'{schema}.language',
                f'Specified "{language}" language unknown. Please select a valid language.'
            )

        if data['kbdmap'] not in await self.middleware.call('system.general.kbdmap_choices'):
            verrors.add(
                f'{schema}.kbdmap',
                'Please enter a valid keyboard layout'
            )

        timezone = data.get('timezone')
        timezones = await self.middleware.call('system.general.timezone_choices')
        if timezone not in timezones:
            verrors.add(
                f'{schema}.timezone',
                'Timezone not known. Please select a valid timezone.'
            )

        ip4_addresses_list = await self.middleware.call('system.general.ui_address_choices')
        ip6_addresses_list = await self.middleware.call('system.general.ui_v6address_choices')

        ip4_addresses = data.get('ui_address')
        for ip4_address in ip4_addresses:
            if ip4_address not in ip4_addresses_list:
                verrors.add(
                    f'{schema}.ui_address',
                    f'{ip4_address} ipv4 address is not associated with this machine'
                )

        ip6_addresses = data.get('ui_v6address')
        for ip6_address in ip6_addresses:
            if ip6_address not in ip6_addresses_list:
                verrors.add(
                    f'{schema}.ui_v6address',
                    f'{ip6_address} ipv6 address is not associated with this machine'
                )

        for key, wildcard, ips in [('ui_address', '0.0.0.0', ip4_addresses), ('ui_v6address', '::', ip6_addresses)]:
            if wildcard in ips and len(ips) > 1:
                verrors.add(
                    f'{schema}.{key}',
                    f'When "{wildcard}" has been selected, selection of other addresses is not allowed'
                )

        certificate_id = data.get('ui_certificate')
        cert = await self.middleware.call(
            'certificate.query',
            [["id", "=", certificate_id]]
        )
        if not cert:
            verrors.add(
                f'{schema}.ui_certificate',
                'Please specify a valid certificate which exists in the system'
            )
        else:
            cert = cert[0]
            verrors.extend(
                await self.middleware.call(
                    'certificate.cert_services_validation', certificate_id, f'{schema}.ui_certificate', False
                )
            )

            if cert['fingerprint']:
                syslog.openlog(logoption=syslog.LOG_PID, facility=syslog.LOG_USER)
                syslog.syslog(syslog.LOG_ERR, 'Fingerprint of the certificate used in UI : ' + cert['fingerprint'])
                syslog.closelog()

        return verrors