Exemplo n.º 1
0
class SystemGeneralService(ConfigService):
    class Config:
        namespace = 'system.general'
        datastore = 'system.settings'
        datastore_prefix = 'stg_'
        datastore_extend = 'system.general.general_system_extend'

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._language_choices = self._initialize_languages()
        self._timezone_choices = None
        self._kbdmap_choices = None
        self._country_choices = {}

    @private
    async def general_system_extend(self, data):
        keys = data.keys()
        for key in keys:
            if key.startswith('gui'):
                data['ui_' + key[3:]] = data.pop(key)

        data['sysloglevel'] = data['sysloglevel'].upper()
        if data['ui_certificate']:
            data['ui_certificate'] = await self.middleware.call(
                'certificate.query',
                [['id', '=', data['ui_certificate']['id']]], {'get': True})
        return data

    @accepts()
    def language_choices(self):
        return self._language_choices

    @private
    def _initialize_languages(self):
        languagues = [
            ('af', 'Afrikaans'),
            ('ar', 'Arabic'),
            ('ast', 'Asturian'),
            ('az', 'Azerbaijani'),
            ('bg', 'Bulgarian'),
            ('be', 'Belarusian'),
            ('bn', 'Bengali'),
            ('br', 'Breton'),
            ('bs', 'Bosnian'),
            ('ca', 'Catalan'),
            ('cs', 'Czech'),
            ('cy', 'Welsh'),
            ('da', 'Danish'),
            ('de', 'German'),
            ('dsb', 'Lower Sorbian'),
            ('el', 'Greek'),
            ('en', 'English'),
            ('en-au', 'Australian English'),
            ('en-gb', 'British English'),
            ('eo', 'Esperanto'),
            ('es', 'Spanish'),
            ('es-ar', 'Argentinian Spanish'),
            ('es-co', 'Colombian Spanish'),
            ('es-mx', 'Mexican Spanish'),
            ('es-ni', 'Nicaraguan Spanish'),
            ('es-ve', 'Venezuelan Spanish'),
            ('et', 'Estonian'),
            ('eu', 'Basque'),
            ('fa', 'Persian'),
            ('fi', 'Finnish'),
            ('fr', 'French'),
            ('fy', 'Frisian'),
            ('ga', 'Irish'),
            ('gd', 'Scottish Gaelic'),
            ('gl', 'Galician'),
            ('he', 'Hebrew'),
            ('hi', 'Hindi'),
            ('hr', 'Croatian'),
            ('hsb', 'Upper Sorbian'),
            ('hu', 'Hungarian'),
            ('ia', 'Interlingua'),
            ('id', 'Indonesian'),
            ('io', 'Ido'),
            ('is', 'Icelandic'),
            ('it', 'Italian'),
            ('ja', 'Japanese'),
            ('ka', 'Georgian'),
            ('kab', 'Kabyle'),
            ('kk', 'Kazakh'),
            ('km', 'Khmer'),
            ('kn', 'Kannada'),
            ('ko', 'Korean'),
            ('lb', 'Luxembourgish'),
            ('lt', 'Lithuanian'),
            ('lv', 'Latvian'),
            ('mk', 'Macedonian'),
            ('ml', 'Malayalam'),
            ('mn', 'Mongolian'),
            ('mr', 'Marathi'),
            ('my', 'Burmese'),
            ('nb', 'Norwegian Bokmål'),
            ('ne', 'Nepali'),
            ('nl', 'Dutch'),
            ('nn', 'Norwegian Nynorsk'),
            ('os', 'Ossetic'),
            ('pa', 'Punjabi'),
            ('pl', 'Polish'),
            ('pt', 'Portuguese'),
            ('pt-br', 'Brazilian Portuguese'),
            ('ro', 'Romanian'),
            ('ru', 'Russian'),
            ('sk', 'Slovak'),
            ('sl', 'Slovenian'),
            ('sq', 'Albanian'),
            ('sr', 'Serbian'),
            ('sr-latn', 'Serbian Latin'),
            ('sv', 'Swedish'),
            ('sw', 'Swahili'),
            ('ta', 'Tamil'),
            ('te', 'Telugu'),
            ('th', 'Thai'),
            ('tr', 'Turkish'),
            ('tt', 'Tatar'),
            ('udm', 'Udmurt'),
            ('uk', 'Ukrainian'),
            ('ur', 'Urdu'),
            ('vi', 'Vietnamese'),
            ('zh-hans', 'Simplified Chinese'),
            ('zh-hant', 'Traditional Chinese'),
        ]
        return dict(languagues)

    @private
    async def _initialize_timezone_choices(self):
        pipe = await Popen(
            'find /usr/share/zoneinfo/ -type f -not -name zone.tab -not -regex \'.*/Etc/GMT.*\'',
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
            shell=True)
        self._timezone_choices = (
            await pipe.communicate())[0].decode().strip().split('\n')
        self._timezone_choices = {
            x[20:]: x[20:]
            for x in self._timezone_choices
        }

    @accepts()
    async def timezone_choices(self):
        if not self._timezone_choices:
            await self._initialize_timezone_choices()
        return self._timezone_choices

    @accepts()
    async def country_choices(self):
        if not self._country_choices:
            await self._initialize_country_choices()
        return self._country_choices

    @private
    async def _initialize_country_choices(self):
        def _get_index(country_columns, column):
            index = -1

            i = 0
            for c in country_columns:
                if c.lower() == column.lower():
                    index = i
                    break

                i += 1

            return index

        country_file = '/etc/iso_3166_2_countries.csv'
        cni, two_li = None, None
        with open(country_file, 'r', encoding='utf-8') as csvfile:
            reader = csv.reader(csvfile)

            for index, row in enumerate(reader):
                if index != 0:
                    if row[cni] and row[two_li]:
                        if row[two_li] in self._country_choices:
                            # If two countries in the iso file have the same key, we concatenate their names
                            self._country_choices[
                                row[two_li]] += f' + {row[cni]}'
                        else:
                            self._country_choices[row[two_li]] = row[cni]
                else:
                    # ONLY CNI AND TWO_LI ARE BEING CONSIDERED FROM THE CSV
                    cni = _get_index(row, 'Common Name')
                    two_li = _get_index(row, 'ISO 3166-1 2 Letter Code')

    @private
    async def _initialize_kbdmap_choices(self):
        """Populate choices from /usr/share/vt/keymaps/INDEX.keymaps"""
        index = "/usr/share/vt/keymaps/INDEX.keymaps"

        if not os.path.exists(index):
            return []
        with open(index, 'rb') as f:
            d = f.read().decode('utf8', 'ignore')
        _all = re.findall(r'^(?P<name>[^#\s]+?)\.kbd:en:(?P<desc>.+)$', d,
                          re.M)
        self._kbdmap_choices = {name: desc for name, desc in _all}

    @accepts()
    async def kbdmap_choices(self):
        if not self._kbdmap_choices:
            await self._initialize_kbdmap_choices()
        return self._kbdmap_choices

    @private
    async def validate_general_settings(self, data, schema):
        verrors = ValidationErrors()

        language = data.get('language')
        if language:
            system_languages = self.language_choices()
            if language not in system_languages.keys():
                verrors.add(
                    f'{schema}.language',
                    f'Specified "{language}" language not found, kindly correct it'
                )

        # kbd map needs work

        timezone = data.get('timezone')
        if timezone:
            timezones = await self.timezone_choices()
            if timezone not in timezones:
                verrors.add(f'{schema}.timezone',
                            'Please select a correct timezone')

        ip_addresses = await self.middleware.call('interface.ip_in_use')
        ip4_addresses_list = [
            alias_dict['address'] for alias_dict in ip_addresses
            if alias_dict['type'] == 'INET'
        ]
        ip6_addresses_list = [
            alias_dict['address'] for alias_dict in ip_addresses
            if alias_dict['type'] == 'INET6'
        ]

        ip4_addresses = data.get('ui_address')
        for ip4_address in ip4_addresses:
            if (ip4_address and ip4_address != '0.0.0.0'
                    and ip4_address not in ip4_addresses_list):
                verrors.add(
                    f'{schema}.ui_address',
                    f'{ip4_address} ipv4 address is not associated with this machine'
                )

        ip6_addresses = data.get('ui_v6address')
        for ip6_address in ip6_addresses:
            if (ip6_address and ip6_address != '::'
                    and ip6_address not in ip6_addresses_list):
                verrors.add(
                    f'{schema}.ui_v6address',
                    f'{ip6_address} ipv6 address is not associated with this machine'
                )

        for key, wildcard, ips in [('ui_address', '0.0.0.0', ip4_addresses),
                                   ('ui_v6address', '::', ip6_addresses)]:
            if wildcard in ips and len(ips) > 1:
                verrors.add(
                    f'{schema}.{key}',
                    f'When "{wildcard}" has been selected, selection of other addresses is not allowed'
                )

        syslog_server = data.get('syslogserver')
        if syslog_server:
            match = re.match(r"^[\w\.\-]+(\:\d+)?$", syslog_server)
            if not match:
                verrors.add(f'{schema}.syslogserver',
                            'Invalid syslog server format')
            elif ':' in syslog_server:
                port = int(syslog_server.split(':')[-1])
                if port < 0 or port > 65535:
                    verrors.add(f'{schema}.syslogserver',
                                'Port specified should be between 0 - 65535')

        certificate_id = data.get('ui_certificate')
        cert = await self.middleware.call('certificate.query',
                                          [["id", "=", certificate_id]])
        if not cert:
            verrors.add(
                f'{schema}.ui_certificate',
                'Please specify a valid certificate which exists in the system'
            )
        else:
            cert = cert[0]
            verrors.extend(await self.middleware.call(
                'certificate.cert_services_validation', certificate_id,
                f'{schema}.ui_certificate', False))

            if cert['fingerprint']:
                syslog.openlog(logoption=syslog.LOG_PID,
                               facility=syslog.LOG_USER)
                syslog.syslog(
                    syslog.LOG_ERR,
                    'Fingerprint of the certificate used in UI : ' +
                    cert['fingerprint'])
                syslog.closelog()

        return verrors

    @accepts()
    async def ui_certificate_choices(self):
        """
        Return choices of `ui_certificate` attribute for `system.general.update`.
        """
        return {
            i['id']: i['name']
            for i in await self.middleware.call('certificate.query', [(
                'cert_type_CSR', '=', False)])
        }

    @accepts(
        Dict(
            'general_settings',
            Int('ui_certificate', null=True),
            Int('ui_httpsport', validators=[Range(min=1, max=65535)]),
            Bool('ui_httpsredirect'),
            Int('ui_port', validators=[Range(min=1, max=65535)]),
            List('ui_address', items=[IPAddr('addr')], empty=False),
            List('ui_v6address', items=[IPAddr('addr')], empty=False),
            Str('kbdmap'),
            Str('language'),
            Str('sysloglevel',
                enum=[
                    'F_EMERG', 'F_ALERT', 'F_CRIT', 'F_ERR', 'F_WARNING',
                    'F_NOTICE', 'F_INFO', 'F_DEBUG', 'F_IS_DEBUG'
                ]),
            Str('syslogserver'),
            Str('timezone'),
            update=True,
        ))
    async def do_update(self, data):
        config = await self.config()
        config['ui_certificate'] = config['ui_certificate']['id'] if config[
            'ui_certificate'] else None
        new_config = config.copy()
        new_config.update(data)

        verrors = await self.validate_general_settings(
            new_config, 'general_settings_update')
        if verrors:
            raise verrors

        # Converting new_config to map the database table fields
        new_config['sysloglevel'] = new_config['sysloglevel'].lower()
        keys = new_config.keys()
        for key in list(keys):
            if key.startswith('ui_'):
                new_config['gui' + key[3:]] = new_config.pop(key)

        await self.middleware.call('datastore.update', self._config.datastore,
                                   config['id'], new_config,
                                   {'prefix': 'stg_'})

        # case insensitive comparison should be performed for sysloglevel
        if (config['sysloglevel'].lower() != new_config['sysloglevel'].lower()
                or config['syslogserver'] != new_config['syslogserver']):
            await self.middleware.call('service.restart', 'syslogd')

        if config['timezone'] != new_config['timezone']:
            await self.middleware.call('zettarepl.update_timezone',
                                       new_config['timezone'])
            await self.middleware.call('service.reload', 'timeservices')
            await self.middleware.call('service.restart', 'cron')

        if config['language'] != new_config['language']:
            await self.middleware.call('system.general.set_language')

        await self.middleware.call('service.start', 'ssl')

        return await self.config()

    @accepts()
    async def local_url(self):
        config = await self.middleware.call('system.general.config')

        if config['ui_certificate']:
            protocol = 'https'
            port = config['ui_httpsport']
        else:
            protocol = 'http'
            port = config['ui_port']

        if '0.0.0.0' in config['ui_address'] or '127.0.0.1' in config[
                'ui_address']:
            hosts = ['127.0.0.1']
        else:
            hosts = config['ui_address']

        errors = []
        for host in hosts:
            try:
                reader, writer = await asyncio.wait_for(
                    asyncio.open_connection(
                        host,
                        port=port,
                    ), timeout=5)
                writer.close()

                return f'{protocol}://{host}:{port}'

            except Exception as e:
                errors.append(f'{host}: {e}')

        raise CallError(
            'Unable to connect to any of the specified UI addresses:\n' +
            '\n'.join(errors))

    @private
    def set_language(self):
        language = self.middleware.call_sync(
            'system.general.config')['language']
        set_language(language)
Exemplo n.º 2
0
class S3Service(SystemServiceService):
    class Config:
        service = "s3"
        datastore_prefix = "s3_"
        datastore_extend = "s3.config_extend"
        cli_namespace = "service.s3"

    ENTRY = Dict(
        's3_entry',
        Str('bindip', required=True),
        Int('bindport', validators=[Range(min=1, max=65535)], required=True),
        Str('access_key', max_length=20, required=True),
        Str('secret_key', max_length=40, required=True),
        Bool('browser', required=True),
        Str('storage_path', required=True),
        Int('certificate', null=True, required=True),
        Int('id', required=True),
    )

    @accepts()
    @returns(Dict('s3_bindip_choices', additional_attrs=True))
    async def bindip_choices(self):
        """
        Return ip choices for S3 service to use.
        """
        return {
            d['address']: d['address']
            for d in await self.middleware.call('interface.ip_in_use', {
                'static': True,
                'any': True
            })
        }

    @private
    async def config_extend(self, s3):
        s3['storage_path'] = s3.pop('disks', None)
        s3.pop('mode', None)
        if s3.get('certificate'):
            s3['certificate'] = s3['certificate']['id']
        return s3

    @accepts(
        Patch(
            's3_entry',
            's3_update',
            ('edit', {
                'name':
                'access_key',
                'method':
                lambda x: setattr(x, 'validators', [
                    Match(r'^\w+$',
                          explanation=
                          'Should only contain alphanumeric characters')
                ])
            }),
            ('edit', {
                'name':
                'secret_key',
                'method':
                lambda x: setattr(x, 'validators', [
                    Match(r'^\w+$',
                          explanation=
                          'Should only contain alphanumeric characters')
                ])
            }),
            ('rm', {
                'name': 'id'
            }),
            ('attr', {
                'update': True
            }),
        ))
    async def do_update(self, data):
        """
        Update S3 Service Configuration.

        `access_key` must only contain alphanumeric characters and should be between 5 and 20 characters.

        `secret_key` must only contain alphanumeric characters and should be between 8 and 40 characters.

        `browser` when set, enables the web user interface for the S3 Service.

        `certificate` is a valid certificate id which exists in the system. This is used to enable secure
        S3 connections.
        """
        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        for attr, minlen, maxlen in (
            ('access_key', 5, 20),
            ('secret_key', 8, 40),
        ):
            curlen = len(new.get(attr, ''))
            if curlen < minlen or curlen > maxlen:
                verrors.add(
                    f's3_update.{attr}',
                    f'Attribute should be {minlen} to {maxlen} in length')

        if not new['storage_path'] and await self.middleware.call(
                'service.started', 's3'):
            verrors.add('s3_update.storage_path',
                        'S3 must be stopped before unsetting storage path.')
        elif new['storage_path']:
            await check_path_resides_within_volume(verrors, self.middleware,
                                                   's3_update.storage_path',
                                                   new['storage_path'])

            if not verrors:
                if new['storage_path'].rstrip('/').count('/') < 3:
                    verrors.add(
                        's3_update.storage_path',
                        'Top level datasets are not allowed. i.e /mnt/tank/dataset is allowed'
                    )
                else:
                    # If the storage_path does not exist, let's create it
                    if not os.path.exists(new['storage_path']):
                        os.makedirs(new['storage_path'])

        if new['certificate']:
            verrors.extend(
                (await
                 self.middleware.call('certificate.cert_services_validation',
                                      new['certificate'],
                                      's3_update.certificate', False)))

        if new['bindip'] not in await self.bindip_choices():
            verrors.add('s3_update.bindip',
                        'Please provide a valid ip address')

        if verrors:
            raise verrors

        new['disks'] = new.pop('storage_path')

        await self._update_service(old, new)

        if new['disks'] and (await self.middleware.call(
                'filesystem.stat', new['disks']))['user'] != 'minio':
            await self.middleware.call(
                'filesystem.setperm', {
                    'path':
                    new['disks'],
                    'mode':
                    str(775),
                    'uid': (await self.middleware.call(
                        'dscache.get_uncached_user', 'minio'))['pw_uid'],
                    'gid': (await self.middleware.call(
                        'dscache.get_uncached_group', 'minio'))['gr_gid'],
                    'options': {
                        'recursive': True,
                        'traverse': False
                    }
                })

        return await self.config()
Exemplo n.º 3
0
class RsyncTaskService(CRUDService):

    class Config:
        datastore = 'tasks.rsync'
        datastore_prefix = 'rsync_'
        datastore_extend = 'rsynctask.rsync_task_extend'

    @private
    async def rsync_task_extend(self, data):
        data['extra'] = list(filter(None, re.split(r"\s+", data["extra"])))
        for field in ('mode', 'direction'):
            data[field] = data[field].upper()
        Cron.convert_db_format_to_schedule(data)
        return data

    @private
    async def validate_rsync_task(self, data, schema):
        verrors = ValidationErrors()

        # Windows users can have spaces in their usernames
        # http://www.freebsd.org/cgi/query-pr.cgi?pr=164808

        username = data.get('user')
        if ' ' in username:
            verrors.add(f'{schema}.user', 'User names cannot have spaces')
            raise verrors

        user = await self.middleware.call(
            'notifier.get_user_object',
            username
        )
        if not user:
            verrors.add(f'{schema}.user', f'Provided user "{username}" does not exist')
            raise verrors

        remote_host = data.get('remotehost')
        if not remote_host:
            verrors.add(f'{schema}.remotehost', 'Please specify a remote host')

        if data.get('extra'):
            data['extra'] = ' '.join(data['extra'])
        else:
            data['extra'] = ''

        mode = data.get('mode')
        if not mode:
            verrors.add(f'{schema}.mode', 'This field is required')

        remote_module = data.get('remotemodule')
        if mode == 'MODULE' and not remote_module:
            verrors.add(f'{schema}.remotemodule', 'This field is required')

        if mode == 'SSH':
            remote_port = data.get('remoteport')
            if not remote_port:
                verrors.add(f'{schema}.remoteport', 'This field is required')

            remote_path = data.get('remotepath')
            if not remote_path:
                verrors.add(f'{schema}.remotepath', 'This field is required')

            search = os.path.join(user.pw_dir, '.ssh', 'id_[edr]*')
            exclude_from_search = os.path.join(user.pw_dir, '.ssh', 'id_[edr]*pub')
            key_files = set(glob.glob(search)) - set(glob.glob(exclude_from_search))
            if not key_files:
                verrors.add(
                    f'{schema}.user',
                    'In order to use rsync over SSH you need a user'
                    ' with a private key (DSA/ECDSA/RSA) set up in home dir.'
                )
            else:
                for file in glob.glob(search):
                    if '.pub' not in file:
                        # file holds a private key and it's permissions should be 600
                        if os.stat(file).st_mode & 0o077 != 0:
                            verrors.add(
                                f'{schema}.user',
                                f'Permissions {oct(os.stat(file).st_mode & 0o777)} for {file} are too open. Please '
                                f'correct them by running chmod 600 {file}'
                            )

            if(
                data.get('validate_rpath') and
                remote_path and
                remote_host and
                remote_port
            ):
                if '@' in remote_host:
                    remote_username, remote_host = remote_host.rsplit('@', 1)
                else:
                    remote_username = username

                try:
                    with (await asyncio.wait_for(asyncssh.connect(
                            remote_host,
                            port=remote_port,
                            username=remote_username,
                            client_keys=key_files,
                            known_hosts=None
                    ), timeout=5)) as conn:

                        await conn.run(f'test -d {shlex.quote(remote_path)}', check=True)

                except asyncio.TimeoutError:

                    verrors.add(
                        f'{schema}.remotehost',
                        'SSH timeout occurred. Remote path cannot be validated.'
                    )

                except OSError as e:

                    if e.errno == 113:
                        verrors.add(
                            f'{schema}.remotehost',
                            f'Connection to the remote host {remote_host} on port {remote_port} failed.'
                        )
                    else:
                        verrors.add(
                            f'{schema}.remotehost',
                            e.__str__()
                        )

                except asyncssh.DisconnectError as e:

                    verrors.add(
                        f'{schema}.remotehost',
                        f'Disconnect Error[ error code {e.code} ] was generated when trying to '
                        f'communicate with remote host {remote_host} and remote user {remote_username}.'
                    )

                except asyncssh.ProcessError as e:

                    if e.code == '1':
                        verrors.add(
                            f'{schema}.remotepath',
                            'The Remote Path you specified does not exist or is not a directory.'
                            'Either create one yourself on the remote machine or uncheck the '
                            'validate_rpath field'
                        )
                    else:
                        verrors.add(
                            f'{schema}.remotepath',
                            f'Connection to Remote Host was successful but failed to verify '
                            f'Remote Path. {e.__str__()}'
                        )

                except asyncssh.Error as e:

                    if e.__class__.__name__ in e.__str__():
                        exception_reason = e.__str__()
                    else:
                        exception_reason = e.__class__.__name__ + ' ' + e.__str__()
                    verrors.add(
                        f'{schema}.remotepath',
                        f'Remote Path could not be validated. An exception was raised. {exception_reason}'
                    )
            elif data.get('validate_rpath'):
                verrors.add(
                    f'{schema}.remotepath',
                    'Remote path could not be validated because of missing fields'
                )

        data.pop('validate_rpath', None)

        # Keeping compatibility with legacy UI
        for field in ('mode', 'direction'):
            data[field] = data[field].lower()

        return verrors, data

    @accepts(Dict(
        'rsync_task_create',
        Str('path', required=True),
        Str('user', required=True),
        Str('remotehost'),
        Int('remoteport'),
        Str('mode', enum=['MODULE', 'SSH'], default='MODULE'),
        Str('remotemodule'),
        Str('remotepath'),
        Bool('validate_rpath'),
        Str('direction', enum=['PULL', 'PUSH'], default='PUSH'),
        Str('desc'),
        Cron(
            'schedule',
            defaults={'minute': '00'},
        ),
        Bool('recursive'),
        Bool('times'),
        Bool('compress'),
        Bool('archive'),
        Bool('delete'),
        Bool('quiet'),
        Bool('preserveperm'),
        Bool('preserveattr'),
        Bool('delayupdates'),
        List('extra', items=[Str('extra')]),
        Bool('enabled'),
        register=True,
    ))
    async def do_create(self, data):
        """
        Create a Rsync Task.

        `path` represents the path to pool/dataset.

        `remotehost` is ip address or hostname of the remote system. If username differs on the remote host,
        "username@remote_host" format should be used.

        `mode` represents different operating mechanisms for Rsync i.e Rsync Module mode / Rsync SSH mode.

        `remotemodule` is the name of remote module, this attribute should be specified when `mode` is set to MODULE.

        `remotepath` specifies the path on the remote system.

        `validate_rpath` is a boolean which when sets validates the existence of the remote path.

        `direction` specifies if data should be PULLED or PUSHED from the remote system.

        `compress` when set reduces the size of the data which is to be transmitted.

        `archive` when set makes rsync run recursively, preserving symlinks, permissions, modification times, group,
        and special files.

        `delete` when set deletes files in the destination directory which do not exist in the source directory.

        `preserveperm` when set preserves original file permissions.

        .. examples(websocket)::

          Create a Rsync Task which pulls data from a remote system every 5 minutes.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "rsynctask.create",
                "params": [{
                    "enabled": true,
                    "schedule": {
                        "minute": "5",
                        "hour": "*",
                        "dom": "*",
                        "month": "*",
                        "dow": "*"
                    },
                    "desc": "Test rsync task",
                    "user": "******",
                    "mode": "MODULE",
                    "remotehost": "[email protected]",
                    "compress": true,
                    "archive": true,
                    "direction": "PULL",
                    "path": "/mnt/vol1/rsync_dataset",
                    "remotemodule": "remote_module1"
                }]
            }
        """
        verrors, data = await self.validate_rsync_task(data, 'rsync_task_create')
        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(data)

        data['id'] = await self.middleware.call(
            'datastore.insert',
            self._config.datastore,
            data,
            {'prefix': self._config.datastore_prefix}
        )
        await self.middleware.call('service.restart', 'cron')

        return await self._get_instance(data['id'])

    @accepts(
        Int('id', validators=[Range(min=1)]),
        Patch('rsync_task_create', 'rsync_task_update', ('attr', {'update': True}))
    )
    async def do_update(self, id, data):
        """
        Update Rsync Task of `id`.
        """
        old = await self.query(filters=[('id', '=', id)], options={'get': True})

        new = old.copy()
        new.update(data)

        verrors, data = await self.validate_rsync_task(new, 'rsync_task_update')
        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(new)

        await self.middleware.call(
            'datastore.update',
            self._config.datastore,
            id,
            new,
            {'prefix': self._config.datastore_prefix}
        )
        await self.middleware.call('service.restart', 'cron')

        return await self.query(filters=[('id', '=', id)], options={'get': True})

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete Rsync Task of `id`.
        """
        res = await self.middleware.call('datastore.delete', self._config.datastore, id)
        await self.middleware.call('service.restart', 'cron')
        return res

    @private
    async def commandline(self, id):
        """
        Helper method to generate the rsync command avoiding code duplication.
        """
        rsync = await self._get_instance(id)
        path = shlex.quote(rsync['path'])

        line = [
            '/usr/bin/lockf', '-s', '-t', '0', '-k', path, '/usr/local/bin/rsync'
        ]
        for name, flag in (
            ('archive', '-a'),
            ('compress', '-z'),
            ('delayupdates', '--delay-updates'),
            ('delete', '--delete-delay'),
            ('preserveattr', '-X'),
            ('preserveperm', '-p'),
            ('recursive', '-r'),
            ('times', '-t'),
        ):
            if rsync[name]:
                line.append(flag)
        if rsync['extra']:
            line.append(' '.join(rsync['extra']))

        # Do not use username if one is specified in host field
        # See #5096 for more details
        if '@' in rsync['remotehost']:
            remote = rsync['remotehost']
        else:
            remote = f'"{rsync["user"]}"@{rsync["remotehost"]}'

        if rsync['mode'] == 'MODULE':
            module_args = [path, f'{remote}::"{rsync["remotemodule"]}"']
            if rsync['direction'] != 'PUSH':
                module_args.reverse()
            line += module_args
        else:
            line += [
                '-e',
                f'ssh -p {rsync["remoteport"]} -o BatchMode=yes -o StrictHostKeyChecking=yes'
            ]
            path_args = [path, f'{remote}:"{shlex.quote(rsync["remotepath"])}"']
            if rsync['direction'] != 'PUSH':
                path_args.reverse()
            line += path_args

        if rsync['quiet']:
            line += ['>', '/dev/null', '2>&1']

        return ' '.join(line)

    @item_method
    @accepts(Int('id'))
    @job(lock=lambda args: args[-1], logs=True)
    def run(self, job, id):
        """
        Job to run rsync task of `id`.

        Output is saved to job log excerpt as well as syslog.
        """
        rsync = self.middleware.call_sync('rsynctask._get_instance', id)
        commandline = self.middleware.call_sync('rsynctask.commandline', id)

        cp = run_command_with_user_context(
            commandline, rsync['user'], lambda v: job.logs_fd.write(v)
        )

        if cp.returncode != 0:
            raise CallError(
                f'rsync command returned {cp.returncode}. Check logs for further information.'
            )
Exemplo n.º 4
0
class ReplicationService(CRUDService):
    class Config:
        datastore = "storage.replication"
        datastore_prefix = "repl_"
        datastore_extend = "replication.extend"
        datastore_extend_context = "replication.extend_context"

    @private
    async def extend_context(self, extra):
        return {
            "state": await self.middleware.call("zettarepl.get_state"),
        }

    @private
    async def extend(self, data, context):
        data["periodic_snapshot_tasks"] = [{
            k.replace("task_", ""): v
            for k, v in task.items()
        } for task in data["periodic_snapshot_tasks"]]

        for task in data["periodic_snapshot_tasks"]:
            Cron.convert_db_format_to_schedule(task, begin_end=True)

        if data["direction"] == "PUSH":
            data["also_include_naming_schema"] = data["naming_schema"]
            data["naming_schema"] = []
        if data["direction"] == "PULL":
            data["also_include_naming_schema"] = []

        Cron.convert_db_format_to_schedule(data,
                                           "schedule",
                                           key_prefix="schedule_",
                                           begin_end=True)
        Cron.convert_db_format_to_schedule(data,
                                           "restrict_schedule",
                                           key_prefix="restrict_schedule_",
                                           begin_end=True)

        if "error" in context["state"]:
            data["state"] = context["state"]["error"]
        else:
            data["state"] = context["state"]["tasks"].get(
                f"replication_task_{data['id']}", {
                    "state": "PENDING",
                })

        data["job"] = data["state"].pop("job", None)

        return data

    @private
    async def compress(self, data):
        if data["direction"] == "PUSH":
            data["naming_schema"] = data["also_include_naming_schema"]
        del data["also_include_naming_schema"]

        Cron.convert_schedule_to_db_format(data,
                                           "schedule",
                                           key_prefix="schedule_",
                                           begin_end=True)
        Cron.convert_schedule_to_db_format(data,
                                           "restrict_schedule",
                                           key_prefix="restrict_schedule_",
                                           begin_end=True)

        del data["periodic_snapshot_tasks"]

        return data

    @accepts(
        Dict(
            "replication_create",
            Str("name", required=True),
            Str("direction", enum=["PUSH", "PULL"], required=True),
            Str("transport",
                enum=["SSH", "SSH+NETCAT", "LOCAL"],
                required=True),
            Int("ssh_credentials", null=True, default=None),
            Str("netcat_active_side",
                enum=["LOCAL", "REMOTE"],
                null=True,
                default=None),
            Str("netcat_active_side_listen_address", null=True, default=None),
            Int("netcat_active_side_port_min",
                null=True,
                default=None,
                validators=[Port()]),
            Int("netcat_active_side_port_max",
                null=True,
                default=None,
                validators=[Port()]),
            Str("netcat_passive_side_connect_address", null=True,
                default=None),
            List("source_datasets",
                 items=[Path("dataset", empty=False)],
                 required=True,
                 empty=False),
            Path("target_dataset", required=True, empty=False),
            Bool("recursive", required=True),
            List("exclude", items=[Path("dataset", empty=False)], default=[]),
            Bool("properties", default=True),
            Bool("replicate", default=False),
            List("periodic_snapshot_tasks",
                 items=[Int("periodic_snapshot_task")],
                 default=[],
                 validators=[Unique()]),
            List("naming_schema",
                 items=[
                     Str("naming_schema",
                         validators=[ReplicationSnapshotNamingSchema()])
                 ],
                 default=[]),
            List("also_include_naming_schema",
                 items=[
                     Str("naming_schema",
                         validators=[ReplicationSnapshotNamingSchema()])
                 ],
                 default=[]),
            Bool("auto", required=True),
            Cron("schedule",
                 defaults={"minute": "00"},
                 begin_end=True,
                 null=True,
                 default=None),
            Cron("restrict_schedule",
                 defaults={"minute": "00"},
                 begin_end=True,
                 null=True,
                 default=None),
            Bool("only_matching_schedule", default=False),
            Bool("allow_from_scratch", default=False),
            Str("readonly", enum=["SET", "REQUIRE", "IGNORE"], default="SET"),
            Bool("hold_pending_snapshots", default=False),
            Str("retention_policy",
                enum=["SOURCE", "CUSTOM", "NONE"],
                required=True),
            Int("lifetime_value",
                null=True,
                default=None,
                validators=[Range(min=1)]),
            Str("lifetime_unit",
                null=True,
                default=None,
                enum=["HOUR", "DAY", "WEEK", "MONTH", "YEAR"]),
            Str("compression",
                enum=["LZ4", "PIGZ", "PLZIP"],
                null=True,
                default=None),
            Int("speed_limit",
                null=True,
                default=None,
                validators=[Range(min=1)]),
            Bool("large_block", default=True),
            Bool("embed", default=False),
            Bool("compressed", default=True),
            Int("retries", default=5, validators=[Range(min=1)]),
            Str("logging_level",
                enum=["DEBUG", "INFO", "WARNING", "ERROR"],
                null=True,
                default=None),
            Bool("enabled", default=True),
            register=True,
            strict=True,
        ))
    async def do_create(self, data):
        """
        Create a Replication Task

        Create a Replication Task that will push or pull ZFS snapshots to or from remote host..

        * `name` specifies a name for replication task
        * `direction` specifies whether task will `PUSH` or `PULL` snapshots
        * `transport` is a method of snapshots transfer:
          * `SSH` transfers snapshots via SSH connection. This method is supported everywhere but does not achieve
            great performance
            `ssh_credentials` is a required field for this transport (Keychain Credential ID of type `SSH_CREDENTIALS`)
          * `SSH+NETCAT` uses unencrypted connection for data transfer. This can only be used in trusted networks
            and requires a port (specified by range from `netcat_active_side_port_min` to `netcat_active_side_port_max`)
            to be open on `netcat_active_side`
            `ssh_credentials` is also required for control connection
          * `LOCAL` replicates to or from localhost
        * `source_datasets` is a non-empty list of datasets to replicate snapshots from
        * `target_dataset` is a dataset to put snapshots into. It must exist on target side
        * `recursive` and `exclude` have the same meaning as for Periodic Snapshot Task
        * `properties` control whether we should send dataset properties along with snapshots
        * `periodic_snapshot_tasks` is a list of periodic snapshot task IDs that are sources of snapshots for this
          replication task. Only push replication tasks can be bound to periodic snapshot tasks.
        * `naming_schema` is a list of naming schemas for pull replication
        * `also_include_naming_schema` is a list of naming schemas for push replication
        * `auto` allows replication to run automatically on schedule or after bound periodic snapshot task
        * `schedule` is a schedule to run replication task. Only `auto` replication tasks without bound periodic
          snapshot tasks can have a schedule
        * `restrict_schedule` restricts when replication task with bound periodic snapshot tasks runs. For example,
          you can have periodic snapshot tasks that run every 15 minutes, but only run replication task every hour.
        * Enabling `only_matching_schedule` will only replicate snapshots that match `schedule` or
          `restrict_schedule`
        * `allow_from_scratch` will destroy all snapshots on target side and replicate everything from scratch if none
          of the snapshots on target side matches source snapshots
        * `readonly` controls destination datasets readonly property:
          * `SET` will set all destination datasets to readonly=on after finishing the replication
          * `REQUIRE` will require all existing destination datasets to have readonly=on property
          * `IGNORE` will avoid this kind of behavior
        * `hold_pending_snapshots` will prevent source snapshots from being deleted by retention of replication fails
          for some reason
        * `retention_policy` specifies how to delete old snapshots on target side:
          * `SOURCE` deletes snapshots that are absent on source side
          * `CUSTOM` deletes snapshots that are older than `lifetime_value` and `lifetime_unit`
          * `NONE` does not delete any snapshots
        * `compression` compresses SSH stream. Available only for SSH transport
        * `speed_limit` limits speed of SSH stream. Available only for SSH transport
        * `large_block`, `embed` and `compressed` are various ZFS stream flag documented in `man zfs send`
        * `retries` specifies number of retries before considering replication failed

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.create",
                "params": [{
                    "name": "Work Backup",
                    "direction": "PUSH",
                    "transport": "SSH",
                    "ssh_credentials": [12],
                    "source_datasets", ["data/work"],
                    "target_dataset": "repl/work",
                    "recursive": true,
                    "periodic_snapshot_tasks": [5],
                    "auto": true,
                    "restrict_schedule": {
                        "minute": "0",
                        "hour": "*/2",
                        "dom": "*",
                        "month": "*",
                        "dow": "1,2,3,4,5",
                        "begin": "09:00",
                        "end": "18:00"
                    },
                    "only_matching_schedule": true,
                    "retention_policy": "CUSTOM",
                    "lifetime_value": 1,
                    "lifetime_unit": "WEEK",
                }]
            }
        """

        verrors = ValidationErrors()
        verrors.add_child("replication_create", await self._validate(data))

        if verrors:
            raise verrors

        periodic_snapshot_tasks = data["periodic_snapshot_tasks"]
        await self.compress(data)

        id = await self.middleware.call(
            "datastore.insert", self._config.datastore, data,
            {"prefix": self._config.datastore_prefix})

        await self._set_periodic_snapshot_tasks(id, periodic_snapshot_tasks)

        await self.middleware.call("service.restart", "cron")
        await self.middleware.call("zettarepl.update_tasks")

        return await self._get_instance(id)

    @accepts(Int("id"),
             Patch(
                 "replication_create",
                 "replication_update",
                 ("attr", {
                     "update": True
                 }),
             ))
    async def do_update(self, id, data):
        """
        Update a Replication Task with specific `id`

        See the documentation for `create` method for information on payload contents

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.update",
                "params": [
                    7,
                    {
                        "name": "Work Backup",
                        "direction": "PUSH",
                        "transport": "SSH",
                        "ssh_credentials": [12],
                        "source_datasets", ["data/work"],
                        "target_dataset": "repl/work",
                        "recursive": true,
                        "periodic_snapshot_tasks": [5],
                        "auto": true,
                        "restrict_schedule": {
                            "minute": "0",
                            "hour": "*/2",
                            "dom": "*",
                            "month": "*",
                            "dow": "1,2,3,4,5",
                            "begin": "09:00",
                            "end": "18:00"
                        },
                        "only_matching_schedule": true,
                        "retention_policy": "CUSTOM",
                        "lifetime_value": 1,
                        "lifetime_unit": "WEEK",
                    }
                ]
            }
        """

        old = await self._get_instance(id)

        new = old.copy()
        if new["ssh_credentials"]:
            new["ssh_credentials"] = new["ssh_credentials"]["id"]
        new["periodic_snapshot_tasks"] = [
            task["id"] for task in new["periodic_snapshot_tasks"]
        ]
        new.update(data)

        verrors = ValidationErrors()
        verrors.add_child("replication_update", await self._validate(new, id))

        if verrors:
            raise verrors

        periodic_snapshot_tasks = new["periodic_snapshot_tasks"]
        await self.compress(new)

        new.pop("state", None)
        new.pop("job", None)

        await self.middleware.call("datastore.update", self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})

        await self._set_periodic_snapshot_tasks(id, periodic_snapshot_tasks)

        await self.middleware.call("service.restart", "cron")
        await self.middleware.call("zettarepl.update_tasks")

        return await self._get_instance(id)

    @accepts(Int("id"))
    async def do_delete(self, id):
        """
        Delete a Replication Task with specific `id`

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.delete",
                "params": [
                    1
                ]
            }
        """

        response = await self.middleware.call("datastore.delete",
                                              self._config.datastore, id)

        await self.middleware.call("service.restart", "cron")
        await self.middleware.call("zettarepl.update_tasks")

        return response

    @item_method
    @accepts(Int("id"), Bool("really_run", default=True, hidden=True))
    @job(logs=True)
    async def run(self, job, id, really_run):
        """
        Run Replication Task of `id`.
        """
        if really_run:
            task = await self._get_instance(id)

            if not task["enabled"]:
                raise CallError("Task is not enabled")

            if task["state"]["state"] == "RUNNING":
                raise CallError("Task is already running")

            if task["state"]["state"] == "HOLD":
                raise CallError("Task is on hold")

        await self.middleware.call("zettarepl.run_replication_task", id,
                                   really_run, job)

    async def _validate(self, data, id=None):
        verrors = ValidationErrors()

        await self._ensure_unique(verrors, "", "name", data["name"], id)

        # Direction

        snapshot_tasks = []

        if data["direction"] == "PUSH":
            e, snapshot_tasks = await self._query_periodic_snapshot_tasks(
                data["periodic_snapshot_tasks"])
            verrors.add_child("periodic_snapshot_tasks", e)

            if data["naming_schema"]:
                verrors.add("naming_schema",
                            "This field has no sense for push replication")

            if not snapshot_tasks and not data["also_include_naming_schema"]:
                verrors.add(
                    "periodic_snapshot_tasks",
                    "You must at least either bind a periodic snapshot task or provide "
                    "\"Also Include Naming Schema\" for push replication task")

            if data["schedule"]:
                if data["periodic_snapshot_tasks"]:
                    verrors.add(
                        "schedule",
                        "Push replication can't be bound to periodic snapshot task and have "
                        "schedule at the same time")
            else:
                if data["auto"] and not data["periodic_snapshot_tasks"]:
                    verrors.add(
                        "auto",
                        "Push replication that runs automatically must be either "
                        "bound to periodic snapshot task or have schedule")

        if data["direction"] == "PULL":
            if data["schedule"]:
                pass
            else:
                if data["auto"]:
                    verrors.add(
                        "auto",
                        "Pull replication that runs automatically must have schedule"
                    )

            if data["periodic_snapshot_tasks"]:
                verrors.add(
                    "periodic_snapshot_tasks",
                    "Pull replication can't be bound to periodic snapshot task"
                )

            if not data["naming_schema"]:
                verrors.add("naming_schema",
                            "Naming schema is required for pull replication")

            if data["also_include_naming_schema"]:
                verrors.add("also_include_naming_schema",
                            "This field has no sense for pull replication")

            if data["hold_pending_snapshots"]:
                verrors.add(
                    "hold_pending_snapshots",
                    "Pull replication tasks can't hold pending snapshots because "
                    "they don't do source retention")

        # Transport

        if data["transport"] == "SSH+NETCAT":
            if data["netcat_active_side"] is None:
                verrors.add(
                    "netcat_active_side",
                    "You must choose active side for SSH+netcat replication")

            if data["netcat_active_side_port_min"] is not None and data[
                    "netcat_active_side_port_max"] is not None:
                if data["netcat_active_side_port_min"] > data[
                        "netcat_active_side_port_max"]:
                    verrors.add(
                        "netcat_active_side_port_max",
                        "Please specify value greater or equal than netcat_active_side_port_min"
                    )

            if data["compression"] is not None:
                verrors.add(
                    "compression",
                    "Compression is not supported for SSH+netcat replication")

            if data["speed_limit"] is not None:
                verrors.add(
                    "speed_limit",
                    "Speed limit is not supported for SSH+netcat replication")
        else:
            if data["netcat_active_side"] is not None:
                verrors.add(
                    "netcat_active_side",
                    "This field only has sense for SSH+netcat replication")

            for k in [
                    "netcat_active_side_listen_address",
                    "netcat_active_side_port_min",
                    "netcat_active_side_port_max",
                    "netcat_passive_side_connect_address"
            ]:
                if data[k] is not None:
                    verrors.add(
                        k,
                        "This field only has sense for SSH+netcat replication")

        if data["transport"] == "LOCAL":
            if data["ssh_credentials"] is not None:
                verrors.add(
                    "ssh_credentials",
                    "Remote credentials have no sense for local replication")

            if data["compression"] is not None:
                verrors.add("compression",
                            "Compression has no sense for local replication")

            if data["speed_limit"] is not None:
                verrors.add("speed_limit",
                            "Speed limit has no sense for local replication")
        else:
            if data["ssh_credentials"] is None:
                verrors.add(
                    "ssh_credentials",
                    "SSH Credentials are required for non-local replication")
            else:
                try:
                    await self.middleware.call(
                        "keychaincredential.get_of_type",
                        data["ssh_credentials"], "SSH_CREDENTIALS")
                except CallError as e:
                    verrors.add("ssh_credentials", str(e))

        # Common for all directions and transports

        for i, source_dataset in enumerate(data["source_datasets"]):
            for snapshot_task in snapshot_tasks:
                if is_child(source_dataset, snapshot_task["dataset"]):
                    if data["recursive"]:
                        for exclude in snapshot_task["exclude"]:
                            if exclude not in data["exclude"]:
                                verrors.add(
                                    "exclude",
                                    f"You should exclude {exclude!r} as bound periodic snapshot "
                                    f"task dataset {snapshot_task['dataset']!r} does"
                                )
                    else:
                        if source_dataset in snapshot_task["exclude"]:
                            verrors.add(
                                f"source_datasets.{i}",
                                f"Dataset {source_dataset!r} is excluded by bound "
                                f"periodic snapshot task for dataset "
                                f"{snapshot_task['dataset']!r}")

        if not data["recursive"] and data["exclude"]:
            verrors.add(
                "exclude",
                "Excluding child datasets is only supported for recursive replication"
            )

        for i, v in enumerate(data["exclude"]):
            if not any(
                    v.startswith(ds + "/") for ds in data["source_datasets"]):
                verrors.add(
                    f"exclude.{i}",
                    "This dataset is not a child of any of source datasets")

        if data["replicate"]:
            if not data["recursive"]:
                verrors.add(
                    "recursive",
                    "This option is required for full filesystem replication")

            if data["exclude"]:
                verrors.add(
                    "exclude",
                    "This option is not supported for full filesystem replication"
                )

            if not data["properties"]:
                verrors.add(
                    "properties",
                    "This option is required for full filesystem replication")

        if data["schedule"]:
            if not data["auto"]:
                verrors.add(
                    "schedule",
                    "You can't have schedule for replication that does not run automatically"
                )
        else:
            if data["only_matching_schedule"]:
                verrors.add(
                    "only_matching_schedule",
                    "You can't have only-matching-schedule without schedule")

        if data["retention_policy"] == "CUSTOM":
            if data["lifetime_value"] is None:
                verrors.add(
                    "lifetime_value",
                    "This field is required for custom retention policy")
            if data["lifetime_unit"] is None:
                verrors.add(
                    "lifetime_value",
                    "This field is required for custom retention policy")
        else:
            if data["lifetime_value"] is not None:
                verrors.add(
                    "lifetime_value",
                    "This field has no sense for specified retention policy")
            if data["lifetime_unit"] is not None:
                verrors.add(
                    "lifetime_unit",
                    "This field has no sense for specified retention policy")

        if data["enabled"]:
            for i, snapshot_task in enumerate(snapshot_tasks):
                if not snapshot_task["enabled"]:
                    verrors.add(
                        f"periodic_snapshot_tasks.{i}",
                        "You can't bind disabled periodic snapshot task to enabled replication task"
                    )

        return verrors

    async def _set_periodic_snapshot_tasks(self, replication_task_id,
                                           periodic_snapshot_tasks_ids):
        await self.middleware.call(
            "datastore.delete",
            "storage.replication_repl_periodic_snapshot_tasks",
            [["replication_id", "=", replication_task_id]])
        for periodic_snapshot_task_id in periodic_snapshot_tasks_ids:
            await self.middleware.call(
                "datastore.insert",
                "storage.replication_repl_periodic_snapshot_tasks",
                {
                    "replication_id": replication_task_id,
                    "task_id": periodic_snapshot_task_id,
                },
            )

    async def _query_periodic_snapshot_tasks(self, ids):
        verrors = ValidationErrors()

        query_result = await self.middleware.call("pool.snapshottask.query",
                                                  [["id", "in", ids]])

        snapshot_tasks = []
        for i, task_id in enumerate(ids):
            for task in query_result:
                if task["id"] == task_id:
                    snapshot_tasks.append(task)
                    break
            else:
                verrors.add(str(i), "This snapshot task does not exist")

        return verrors, snapshot_tasks

    @accepts(
        Str("transport", enum=["SSH", "SSH+NETCAT", "LOCAL"], required=True),
        Int("ssh_credentials", null=True, default=None))
    async def list_datasets(self, transport, ssh_credentials=None):
        """
        List datasets on remote side

        Accepts `transport` and SSH credentials ID (for non-local transport)

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.list_datasets",
                "params": [
                    "SSH",
                    7
                ]
            }
        """

        return await self.middleware.call("zettarepl.list_datasets", transport,
                                          ssh_credentials)

    @accepts(Str("dataset", required=True),
             Str("transport",
                 enum=["SSH", "SSH+NETCAT", "LOCAL"],
                 required=True), Int("ssh_credentials",
                                     null=True,
                                     default=None))
    async def create_dataset(self, dataset, transport, ssh_credentials=None):
        """
        Creates dataset on remote side

        Accepts `dataset` name, `transport` and SSH credentials ID (for non-local transport)

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.create_dataset",
                "params": [
                    "repl/work",
                    "SSH",
                    7
                ]
            }
        """

        return await self.middleware.call("zettarepl.create_dataset", dataset,
                                          transport, ssh_credentials)

    @accepts()
    async def list_naming_schemas(self):
        """
        List all naming schemas used in periodic snapshot and replication tasks.
        """
        naming_schemas = []
        for snapshottask in await self.middleware.call(
                "pool.snapshottask.query"):
            naming_schemas.append(snapshottask["naming_schema"])
        for replication in await self.middleware.call("replication.query"):
            naming_schemas.extend(replication["naming_schema"])
            naming_schemas.extend(replication["also_include_naming_schema"])
        return sorted(set(naming_schemas))

    @accepts(
        List("datasets", empty=False, items=[
            Path("dataset", empty=False),
        ]),
        List("naming_schema",
             empty=False,
             items=[
                 Str("naming_schema",
                     validators=[ReplicationSnapshotNamingSchema()])
             ]),
        Str("transport", enum=["SSH", "SSH+NETCAT", "LOCAL"], required=True),
        Int("ssh_credentials", null=True, default=None),
    )
    async def count_eligible_manual_snapshots(self, datasets, naming_schema,
                                              transport, ssh_credentials):
        """
        Count how many existing snapshots of `dataset` match `naming_schema`.

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.count_eligible_manual_snapshots",
                "params": [
                    "repl/work",
                    ["auto-%Y-%m-%d_%H-%M"],
                    "SSH",
                    4
                ]
            }
        """
        return await self.middleware.call(
            "zettarepl.count_eligible_manual_snapshots", datasets,
            naming_schema, transport, ssh_credentials)

    @accepts(
        Str("direction", enum=["PUSH", "PULL"], required=True),
        List("source_datasets",
             items=[Path("dataset", empty=False)],
             required=True,
             empty=False),
        Path("target_dataset", required=True, empty=False),
        Str("transport",
            enum=["SSH", "SSH+NETCAT", "LOCAL", "LEGACY"],
            required=True),
        Int("ssh_credentials", null=True, default=None),
    )
    async def target_unmatched_snapshots(self, direction, source_datasets,
                                         target_dataset, transport,
                                         ssh_credentials):
        """
        Check if target has any snapshots that do not exist on source.

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.target_unmatched_snapshots",
                "params": [
                    "PUSH",
                    ["repl/work", "repl/games"],
                    "backup",
                    "SSH",
                    4
                ]
            }

        Returns

            {
                "backup/work": ["auto-2019-10-15_13-00", "auto-2019-10-15_09-00"],
                "backup/games": ["auto-2019-10-15_13-00"],
            }
        """
        return await self.middleware.call(
            "zettarepl.target_unmatched_snapshots", direction, source_datasets,
            target_dataset, transport, ssh_credentials)

    @private
    def new_snapshot_name(self, naming_schema):
        return datetime.now().strftime(naming_schema)

    # Legacy pair support
    @private
    @accepts(
        Dict(
            "replication-pair-data",
            Str("hostname", required=True),
            Str("public-key", required=True),
            Str("user", null=True),
        ))
    async def pair(self, data):
        result = await self.middleware.call(
            "keychaincredential.ssh_pair", {
                "remote_hostname": data["hostname"],
                "username": data["user"] or "root",
                "public_key": data["public-key"],
            })
        return {
            "ssh_port": result["port"],
            "ssh_hostkey": result["host_key"],
        }
Exemplo n.º 5
0
class ISCSIGlobalService(SystemServiceService):

    class Config:
        datastore_extend = 'iscsi.global.config_extend'
        datastore_prefix = 'iscsi_'
        service = 'iscsitarget'
        service_model = 'iscsitargetglobalconfiguration'
        namespace = 'iscsi.global'

    @private
    def config_extend(self, data):
        data['isns_servers'] = data['isns_servers'].split()
        return data

    @accepts(Dict(
        'iscsiglobal_update',
        Str('basename'),
        List('isns_servers', items=[Str('server')]),
        Int('pool_avail_threshold', validators=[Range(min=1, max=99)], null=True),
        Bool('alua'),
        update=True
    ))
    async def do_update(self, data):
        """
        `alua` is a no-op for FreeNAS.
        """
        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        servers = data.get('isns_servers') or []
        for server in servers:
            reg = RE_IP_PORT.search(server)
            if reg:
                ip = reg.group(1)
                if ip and ip[0] == '[' and ip[-1] == ']':
                    ip = ip[1:-1]
                try:
                    ip_validator = IpAddress()
                    ip_validator(ip)
                    continue
                except ValueError:
                    pass
            verrors.add('iscsiglobal_update.isns_servers', f'Server "{server}" is not a valid IP(:PORT)? tuple.')

        if verrors:
            raise verrors

        new['isns_servers'] = '\n'.join(servers)

        await self._update_service(old, new)

        if old['alua'] != new['alua']:
            await self.middleware.call('service.start', 'ix-loader')

        return await self.config()

    @filterable
    def sessions(self, filters, options):
        """
        Get a list of currently running iSCSI sessions. This includes initiator and target names
        and the unique connection IDs.
        """
        def transform(tag, text):
            if tag in (
                'target_portal_group_tag', 'max_data_segment_length', 'max_burst_length',
                'first_burst_length',
            ) and text.isdigit():
                return int(text)
            if tag in ('immediate_data', 'iser'):
                return bool(int(text))
            if tag in ('header_digest', 'data_digest', 'offload') and text == 'None':
                return None
            return text

        cp = subprocess.run(['ctladm', 'islist', '-x'], capture_output=True, text=True)
        connections = etree.fromstring(cp.stdout)
        sessions = []
        for connection in connections.xpath("//connection"):
            sessions.append({
                i.tag: transform(i.tag, i.text) for i in connection.iterchildren()
            })
        return filter_list(sessions, filters, options)

    @private
    async def alua_enabled(self):
        """
        Returns whether iSCSI ALUA is enabled or not.
        """
        if await self.middleware.call('system.is_freenas'):
            return False
        if not await self.middleware.call('failover.licensed'):
            return False

        license = (await self.middleware.call('system.info'))['license']
        if license and 'FIBRECHANNEL' in license['features']:
            return True

        return (await self.middleware.call('iscsi.global.config'))['alua']
Exemplo n.º 6
0
class VMService(CRUDService, VMSupervisorMixin):
    class Config:
        namespace = 'vm'
        datastore = 'vm.vm'
        datastore_extend = 'vm.extend_vm'
        cli_namespace = 'service.vm'

    @accepts()
    async def bootloader_options(self):
        """
        Supported motherboard firmware options.
        """
        return BOOT_LOADER_OPTIONS

    @private
    async def extend_vm(self, vm):
        vm['devices'] = await self.middleware.call('vm.device.query',
                                                   [('vm', '=', vm['id'])])
        vm['status'] = await self.middleware.call('vm.status', vm['id'])
        if osc.IS_FREEBSD:
            vm.pop('cpu_mode', None)
            vm.pop('cpu_model', None)
        return vm

    @accepts(
        Dict(
            'vm_create',
            Str('cpu_mode',
                default='CUSTOM',
                enum=['CUSTOM', 'HOST-MODEL', 'HOST-PASSTHROUGH']),
            Str('cpu_model', default=None, null=True),
            Str('name', required=True),
            Str('description'),
            Int('vcpus', default=1),
            Int('cores', default=1),
            Int('threads', default=1),
            Int('memory', required=True),
            Str('bootloader',
                enum=list(BOOT_LOADER_OPTIONS.keys()),
                default='UEFI'),
            Str('grubconfig', null=True),
            List('devices',
                 items=[
                     Patch('vmdevice_create', 'vmdevice_update', ('rm', {
                         'name': 'vm'
                     }))
                 ]),
            Bool('autostart', default=True),
            Str('time', enum=['LOCAL', 'UTC'], default='LOCAL'),
            Int('shutdown_timeout',
                default=90,
                valdiators=[Range(min=5, max=300)]),
            register=True,
        ))
    async def do_create(self, data):
        """
        Create a Virtual Machine (VM).

        `grubconfig` may either be a path for the grub.cfg file or the actual content
        of the file to be used with GRUB bootloader.

        `devices` is a list of virtualized hardware to add to the newly created Virtual Machine.
        Failure to attach a device destroys the VM and any resources allocated by the VM devices.

        Maximum of 16 guest virtual CPUs are allowed. By default, every virtual CPU is configured as a
        separate package. Multiple cores can be configured per CPU by specifying `cores` attributes.
        `vcpus` specifies total number of CPU sockets. `cores` specifies number of cores per socket. `threads`
        specifies number of threads per core.

        `shutdown_timeout` indicates the time in seconds the system waits for the VM to cleanly shutdown. During system
        shutdown, if the VM hasn't exited after a hardware shutdown signal has been sent by the system within
        `shutdown_timeout` seconds, system initiates poweroff for the VM to stop it.
        """
        async with LIBVIRT_LOCK:
            await self.middleware.run_in_thread(self._check_setup_connection)

        verrors = ValidationErrors()
        await self.__common_validation(verrors, 'vm_create', data)
        verrors.check()

        devices = data.pop('devices')
        vm_id = await self.middleware.call('datastore.insert', 'vm.vm', data)
        try:
            await self.safe_devices_updates(devices)
        except Exception as e:
            await self.middleware.call('vm.delete', vm_id)
            raise e
        else:
            for device in devices:
                await self.middleware.call('vm.device.create', {
                    'vm': vm_id,
                    **device
                })

        await self.middleware.run_in_thread(self._add, vm_id)

        return await self.get_instance(vm_id)

    @private
    async def safe_devices_updates(self, devices):
        # We will filter devices which create resources and if any of those fail, we destroy the created
        # resources with the devices
        # Returns true if resources were created successfully, false otherwise
        created_resources = []
        existing_devices = {
            d['id']: d
            for d in await self.middleware.call('vm.device.query')
        }
        try:
            for device in devices:
                if not await self.middleware.call(
                        'vm.device.create_resource', device,
                        existing_devices.get(device.get('id'))):
                    continue

                created_resources.append(await self.middleware.call(
                    'vm.device.update_device', device,
                    existing_devices.get(device.get('id'))))
        except Exception:
            for created_resource in created_resources:
                try:
                    await self.middleware.call(
                        'vm.device.delete_resource', {
                            'zvol': created_resource['dtype'] == 'DISK',
                            'raw_file': created_resource['dtype'] == 'RAW'
                        }, created_resource)
                except Exception:
                    self.logger.warn(
                        f'Failed to delete {created_resource["dtype"]}',
                        exc_info=True)
            raise

    async def __common_validation(self, verrors, schema_name, data, old=None):
        vcpus = data['vcpus'] * data['cores'] * data['threads']
        if vcpus:
            flags = await self.middleware.call('vm.flags')
            max_vcpus = await self.middleware.call('vm.maximum_supported_vcpus'
                                                   )
            if vcpus > max_vcpus:
                verrors.add(
                    f'{schema_name}.vcpus',
                    f'Maximum {max_vcpus} vcpus are supported.'
                    f'Please ensure the product of "{schema_name}.vcpus", "{schema_name}.cores" and '
                    f'"{schema_name}.threads" is less then {max_vcpus}.')
            elif flags['intel_vmx']:
                if vcpus > 1 and flags['unrestricted_guest'] is False:
                    verrors.add(
                        f'{schema_name}.vcpus',
                        'Only one Virtual CPU is allowed in this system.')
            elif flags['amd_rvi']:
                if vcpus > 1 and flags['amd_asids'] is False:
                    verrors.add(
                        f'{schema_name}.vcpus',
                        'Only one virtual CPU is allowed in this system.')
            elif not await self.middleware.call('vm.supports_virtualization'):
                verrors.add(schema_name,
                            'This system does not support virtualization.')

        if osc.IS_LINUX:
            if data.get('grubconfig'):
                verrors.add(
                    f'{schema_name}.grubconfig',
                    'This attribute is not supported on this platform.')
            if data.get('cpu_mode') != 'CUSTOM' and data.get('cpu_model'):
                verrors.add(
                    f'{schema_name}.cpu_model',
                    'This attribute should not be specified when "cpu_mode" is not "CUSTOM".'
                )
            elif data.get('cpu_model') and data[
                    'cpu_model'] not in await self.middleware.call(
                        'vm.cpu_model_choices'):
                verrors.add(f'{schema_name}.cpu_model',
                            'Please select a valid CPU model.')

        if 'name' in data:
            filters = [('name', '=', data['name'])]
            if old:
                filters.append(('id', '!=', old['id']))
            if await self.middleware.call('vm.query', filters):
                verrors.add(f'{schema_name}.name', 'This name already exists.',
                            errno.EEXIST)
            elif not RE_NAME.search(data['name']):
                verrors.add(f'{schema_name}.name',
                            'Only alphanumeric characters are allowed.')

        devices_ids = {
            d['id']: d
            for d in await self.middleware.call('vm.device.query')
        }
        for i, device in enumerate(data.get('devices') or []):
            try:
                await self.middleware.call('vm.device.validate_device', device,
                                           devices_ids.get(device.get('id')),
                                           data)
                if old:
                    # We would like to enforce the presence of "vm" attribute in each device so that
                    # it explicitly tells it wants to be associated to the provided "vm" in question
                    if device.get('id') and device['id'] not in devices_ids:
                        verrors.add(
                            f'{schema_name}.devices.{i}.{device["id"]}',
                            f'VM device {device["id"]} does not exist.')
                    elif not device.get('vm') or device['vm'] != old['id']:
                        verrors.add(
                            f'{schema_name}.devices.{i}.{device["id"]}',
                            f'Device must be associated with current VM {old["id"]}.'
                        )
            except ValidationErrors as verrs:
                for attribute, errmsg, enumber in verrs:
                    verrors.add(f'{schema_name}.devices.{i}.{attribute}',
                                errmsg, enumber)

        # TODO: Let's please implement PCI express hierarchy as the limit on devices in KVM is quite high
        # with reports of users having thousands of disks
        # Let's validate that the VM has the correct no of slots available to accommodate currently configured devices
        if osc.IS_FREEBSD:
            if not await self.middleware.call('vm.validate_slots', data):
                verrors.add(
                    f'{schema_name}.devices',
                    'Please adjust the number of devices attached to this VM. '
                    f'A maximum of {await self.middleware.call("vm.available_slots")} PCI slots are allowed.'
                )
            if data.get('cpu_mode', 'CUSTOM') != 'CUSTOM':
                verrors.add(
                    f'{schema_name}.cpu_mode',
                    'This attribute is not supported on this platform.')
            if data.get('cpu_model'):
                verrors.add(
                    f'{schema_name}.cpu_model',
                    'This attribute is not supported on this platform')

            data.pop('cpu_mode', None)
            data.pop('cpu_model', None)

    async def __do_update_devices(self, id, devices):
        # There are 3 cases:
        # 1) "devices" can have new device entries
        # 2) "devices" can have updated existing entries
        # 3) "devices" can have removed exiting entries
        old_devices = await self.middleware.call('vm.device.query',
                                                 [['vm', '=', id]])
        existing_devices = [d.copy() for d in devices if 'id' in d]
        for remove_id in ({d['id']
                           for d in old_devices} -
                          {d['id']
                           for d in existing_devices}):
            await self.middleware.call('vm.device.delete', remove_id)

        for update_device in existing_devices:
            device_id = update_device.pop('id')
            await self.middleware.call('vm.device.update', device_id,
                                       update_device)

        for create_device in filter(lambda v: 'id' not in v, devices):
            await self.middleware.call('vm.device.create', create_device)

    @accepts(Int('id'),
             Patch('vm_create', 'vm_update', ('attr', {
                 'update': True
             }), ('edit', {
                 'name':
                 'devices',
                 'method':
                 lambda v: setattr(v, 'items', [
                     Patch('vmdevice_create', 'vmdevice_update',
                           ('add', {
                               'name': 'id',
                               'type': 'int',
                               'required': False
                           }))
                 ])
             })))
    async def do_update(self, id, data):
        """
        Update all information of a specific VM.

        `devices` is a list of virtualized hardware to attach to the virtual machine. If `devices` is not present,
        no change is made to devices. If either the device list order or data stored by the device changes when the
        attribute is passed, these actions are taken:

        1) If there is no device in the `devices` list which was previously attached to the VM, that device is
           removed from the virtual machine.
        2) Devices are updated in the `devices` list when they contain a valid `id` attribute that corresponds to
           an existing device.
        3) Devices that do not have an `id` attribute are created and attached to `id` VM.
        """

        old = await self.get_instance(id)
        new = old.copy()
        new.update(data)

        if new['name'] != old['name']:
            await self.middleware.run_in_thread(self._check_setup_connection)
            if old['status']['state'] == 'RUNNING':
                raise CallError(
                    'VM name can only be changed when VM is inactive')

            if old['name'] not in self.vms:
                raise CallError(f'Unable to locate domain for {old["name"]}')

        verrors = ValidationErrors()
        await self.__common_validation(verrors, 'vm_update', new, old=old)
        if verrors:
            raise verrors

        devices = new.pop('devices', [])
        new.pop('status', None)
        if devices != old['devices']:
            await self.safe_devices_updates(devices)
            await self.__do_update_devices(id, devices)

        await self.middleware.call('datastore.update', 'vm.vm', id, new)

        vm_data = await self.get_instance(id)
        if new['name'] != old['name']:
            await self.middleware.run_in_thread(self._rename_domain, old,
                                                vm_data)

        return await self.get_instance(id)

    @accepts(
        Int('id'),
        Dict(
            'vm_delete',
            Bool('zvols', default=False),
            Bool('force', default=False),
        ),
    )
    async def do_delete(self, id, data):
        """
        Delete a VM.
        """
        async with LIBVIRT_LOCK:
            vm = await self.get_instance(id)
            await self.middleware.run_in_thread(self._check_setup_connection)
            status = await self.middleware.call('vm.status', id)
            if status.get('state') == 'RUNNING':
                await self.middleware.call('vm.poweroff', id)
                # We would like to wait at least 7 seconds to have the vm
                # complete it's post vm actions which might require interaction with it's domain
                await asyncio.sleep(7)
            elif status.get('state') == 'ERROR' and not data.get('force'):
                raise CallError(
                    'Unable to retrieve VM status. Failed to destroy VM')

            if data['zvols']:
                devices = await self.middleware.call('vm.device.query',
                                                     [('vm', '=', id),
                                                      ('dtype', '=', 'DISK')])

                for zvol in devices:
                    if not zvol['attributes']['path'].startswith('/dev/zvol/'):
                        continue

                    disk_name = zvol['attributes']['path'].rsplit(
                        '/dev/zvol/')[-1]
                    await self.middleware.call('zfs.dataset.delete', disk_name,
                                               {'recursive': True})

            await self.middleware.run_in_thread(self._undefine_domain,
                                                vm['name'])

            # We remove vm devices first
            for device in vm['devices']:
                await self.middleware.call('vm.device.delete', device['id'])
            result = await self.middleware.call('datastore.delete', 'vm.vm',
                                                id)
            if not await self.middleware.call('vm.query'):
                await self.middleware.call('vm.deinitialize_vms')
                self._clear()
            return result

    @item_method
    @accepts(Int('id'))
    def status(self, id):
        """
        Get the status of `id` VM.

        Returns a dict:
            - state, RUNNING or STOPPED
            - pid, process id if RUNNING
        """
        vm = self.middleware.call_sync('datastore.query', 'vm.vm',
                                       [['id', '=', id]], {'get': True})
        if self._has_domain(vm['name']):
            try:
                # Whatever happens, query shouldn't fail
                return self._status(vm['name'])
            except Exception:
                self.middleware.logger.debug(
                    'Failed to retrieve VM status for %r',
                    vm['name'],
                    exc_info=True)

        return {
            'state': 'ERROR',
            'pid': None,
            'domain_state': 'ERROR',
        }
Exemplo n.º 7
0
class SSHService(SystemServiceService):
    class Config:
        service = "ssh"
        service_model = "ssh"
        datastore_prefix = "ssh_"
        cli_namespace = 'service.ssh'

    @accepts()
    def bindiface_choices(self):
        """
        Available choices for the bindiface attribute of SSH service.
        """
        return self.middleware.call_sync('interface.choices')

    @accepts(
        Dict('ssh_update',
             List('bindiface', items=[Str('iface')]),
             Int('tcpport', validators=[Range(min=1, max=65535)]),
             Bool('rootlogin'),
             Bool('passwordauth'),
             Bool('kerberosauth'),
             Bool('tcpfwd'),
             Bool('compression'),
             Str('sftp_log_level',
                 enum=[
                     "", "QUIET", "FATAL", "ERROR", "INFO", "VERBOSE", "DEBUG",
                     "DEBUG2", "DEBUG3"
                 ]),
             Str('sftp_log_facility',
                 enum=[
                     "", "DAEMON", "USER", "AUTH", "LOCAL0", "LOCAL1",
                     "LOCAL2", "LOCAL3", "LOCAL4", "LOCAL5", "LOCAL6", "LOCAL7"
                 ]),
             List('weak_ciphers',
                  items=[Str('cipher', enum=['AES128-CBC', 'NONE'])]),
             Str('options', max_length=None),
             update=True))
    async def do_update(self, data):
        """
        Update settings of SSH daemon service.

        If `bindiface` is empty it will listen for all available addresses.

        .. examples(websocket)::

          Make sshd listen only to igb0 interface.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "ssh.update",
                "params": [{
                    "bindiface": ["igb0"]
                }]
            }

        """
        old = await self.config()

        new = old.copy()
        new.update(data)

        if new['bindiface']:
            verrors = ValidationErrors()
            iface_choices = await self.middleware.call('ssh.bindiface_choices')
            invalid_ifaces = list(
                filter(lambda x: x not in iface_choices, new['bindiface']))
            if invalid_ifaces:
                verrors.add(
                    'ssh_update.bindiface',
                    f'The following interfaces are not valid: {", ".join(invalid_ifaces)}',
                )
            verrors.check()

        await self._update_service(old, new)

        keyfile = "/usr/local/etc/ssh/ssh_host_ecdsa_key.pub"
        if os.path.exists(keyfile):
            with open(keyfile, "rb") as f:
                pubkey = f.read().strip().split(None, 3)[1]
            decoded_key = base64.b64decode(pubkey)
            key_digest = hashlib.sha256(decoded_key).digest()
            ssh_fingerprint = (b"SHA256:" +
                               base64.b64encode(key_digest).replace(
                                   b"=", b"")).decode("utf-8")

            syslog.openlog(logoption=syslog.LOG_PID, facility=syslog.LOG_USER)
            syslog.syslog(
                syslog.LOG_ERR,
                'ECDSA Fingerprint of the SSH KEY: ' + ssh_fingerprint)
            syslog.closelog()

        return new

    @private
    def save_keys(self):
        update = {}
        for i in [
                "ssh_host_key",
                "ssh_host_key.pub",
                "ssh_host_dsa_key",
                "ssh_host_dsa_key.pub",
                "ssh_host_dsa_key-cert.pub",
                "ssh_host_ecdsa_key",
                "ssh_host_ecdsa_key.pub",
                "ssh_host_ecdsa_key-cert.pub",
                "ssh_host_rsa_key",
                "ssh_host_rsa_key.pub",
                "ssh_host_rsa_key-cert.pub",
                "ssh_host_ed25519_key",
                "ssh_host_ed25519_key.pub",
                "ssh_host_ed25519_key-cert.pub",
        ]:
            if osc.IS_FREEBSD:
                path = os.path.join("/usr/local/etc/ssh", i)
            else:
                path = os.path.join("/etc/ssh", i)
            if os.path.exists(path):
                with open(path, "rb") as f:
                    data = base64.b64encode(f.read()).decode("ascii")

                column = i.replace(
                    ".",
                    "_",
                ).replace("-", "_")

                update[column] = data

        old = self.middleware.call_sync('ssh.config')
        self.middleware.call_sync('datastore.update', 'services.ssh',
                                  old['id'], update)
Exemplo n.º 8
0
class PoolScrubService(CRUDService):

    class Config:
        datastore = 'storage.scrub'
        datastore_extend = 'pool.scrub.pool_scrub_extend'
        datastore_prefix = 'scrub_'
        namespace = 'pool.scrub'

    @private
    async def pool_scrub_extend(self, data):
        data['pool'] = data.pop('volume')
        data['pool'] = data['pool']['id']
        Cron.convert_db_format_to_schedule(data)
        return data

    @private
    async def validate_data(self, data, schema):
        verrors = ValidationErrors()

        pool_pk = data.get('pool')
        if pool_pk:
            pool_obj = await self.middleware.call(
                'datastore.query',
                'storage.volume',
                [('id', '=', pool_pk)]
            )

            if len(pool_obj) == 0:
                verrors.add(
                    f'{schema}.pool',
                    'The specified volume does not exist'
                )
            elif (
                    'id' not in data.keys() or
                    (
                        'id' in data.keys() and
                        'original_pool_id' in data.keys() and
                        pool_pk != data['original_pool_id']
                    )
            ):
                scrub_obj = await self.query(filters=[('volume_id', '=', pool_pk)])
                if len(scrub_obj) != 0:
                    verrors.add(
                        f'{schema}.pool',
                        'A scrub with this pool already exists'
                    )

        return verrors, data

    @accepts(
        Dict(
            'pool_scrub_create',
            Int('pool', validators=[Range(min=1)], required=True),
            Int('threshold', validators=[Range(min=0)]),
            Str('description'),
            Cron('schedule'),
            Bool('enabled'),
            register=True
        )
    )
    async def do_create(self, data):
        verrors, data = await self.validate_data(data, 'pool_scrub_create')

        if verrors:
            raise verrors

        data['volume'] = data.pop('pool')
        Cron.convert_schedule_to_db_format(data)

        data['id'] = await self.middleware.call(
            'datastore.insert',
            self._config.datastore,
            data,
            {'prefix': self._config.datastore_prefix}
        )

        await self.middleware.call(
            'service.restart',
            'cron',
            {'onetime': False}
        )

        return await self.query(filters=[('id', '=', data['id'])], options={'get': True})

    @accepts(
        Int('id', validators=[Range(min=1)]),
        Patch('pool_scrub_create', 'pool_scrub_update', ('attr', {'update': True}))
    )
    async def do_update(self, id, data):
        task_data = await self.query(filters=[('id', '=', id)], options={'get': True})
        original_data = task_data.copy()
        task_data['original_pool_id'] = original_data['pool']
        task_data.update(data)
        verrors, task_data = await self.validate_data(task_data, 'pool_scrub_update')

        if verrors:
            raise verrors

        task_data.pop('original_pool_id')
        Cron.convert_schedule_to_db_format(task_data)
        Cron.convert_schedule_to_db_format(original_data)

        if len(set(task_data.items()) ^ set(original_data.items())) > 0:

            task_data['volume'] = task_data.pop('pool')

            await self.middleware.call(
                'datastore.update',
                self._config.datastore,
                id,
                task_data,
                {'prefix': self._config.datastore_prefix}
            )

            await self.middleware.call(
                'service.restart',
                'cron',
                {'onetime': False}
            )

        return await self.query(filters=[('id', '=', id)], options={'get': True})

    @accepts(
        Int('id')
    )
    async def do_delete(self, id):
        response = await self.middleware.call(
            'datastore.delete',
            self._config.datastore,
            id
        )

        await self.middleware.call(
            'service.restart',
            'cron',
            {'onetime': False}
        )
        return response
Exemplo n.º 9
0
class FTPService(SystemServiceService):
    class Config:
        service = "ftp"
        datastore_prefix = "ftp_"

    @accepts(
        Dict('ftp_update',
             Int('port', validators=[Range(min=1, max=65535)]),
             Int('clients', validators=[Range(min=1, max=10000)]),
             Int('ipconnections', validators=[Range(min=0, max=1000)]),
             Int('loginattempt', validators=[Range(min=0, max=1000)]),
             Int('timeout', validators=[Range(min=0, max=10000)]),
             Bool('rootlogin'),
             Bool('onlyanonymous'),
             Dir('anonpath', null=True),
             Bool('onlylocal'),
             Str('banner'),
             Str('filemask', validators=[Match(r"^[0-7]{3}$")]),
             Str('dirmask', validators=[Match(r"^[0-7]{3}$")]),
             Bool('fxp'),
             Bool('resume'),
             Bool('defaultroot'),
             Bool('ident'),
             Bool('reversedns'),
             Str('masqaddress'),
             Int('passiveportsmin',
                 validators=[Or(Exact(0), Range(min=1024, max=65535))]),
             Int('passiveportsmax',
                 validators=[Or(Exact(0), Range(min=1024, max=65535))]),
             Int('localuserbw', validators=[Range(min=0)]),
             Int('localuserdlbw', validators=[Range(min=0)]),
             Int('anonuserbw', validators=[Range(min=0)]),
             Int('anonuserdlbw', validators=[Range(min=0)]),
             Bool('tls'),
             Str('tls_policy',
                 enum=[
                     "on", "off", "data", "!data", "auth", "ctrl", "ctrl+data",
                     "ctrl+!data", "auth+data", "auth+!data"
                 ]),
             Bool('tls_opt_allow_client_renegotiations'),
             Bool('tls_opt_allow_dot_login'),
             Bool('tls_opt_allow_per_user'),
             Bool('tls_opt_common_name_required'),
             Bool('tls_opt_enable_diags'),
             Bool('tls_opt_export_cert_data'),
             Bool('tls_opt_no_cert_request'),
             Bool('tls_opt_no_empty_fragments'),
             Bool('tls_opt_no_session_reuse_required'),
             Bool('tls_opt_stdenvvars'),
             Bool('tls_opt_dns_name_required'),
             Bool('tls_opt_ip_address_required'),
             Int('ssltls_certificate', null=True),
             Str('options'),
             update=True))
    async def do_update(self, data):
        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        if not ((new["passiveportsmin"] == 0) == (new["passiveportsmax"]
                                                  == 0)):
            verrors.add(
                "passiveportsmin",
                "passiveportsmin and passiveportsmax should be both zero or non-zero"
            )
        if not ((new["passiveportsmin"] == 0 and new["passiveportsmax"] == 0)
                or (new["passiveportsmax"] > new["passiveportsmin"])):
            verrors.add(
                "ftp_update.passiveportsmax",
                "When specified, should be greater than passiveportsmin")

        if new["onlyanonymous"] and not new["anonpath"]:
            verrors.add("ftp_update.anonpath",
                        "This field is required for anonymous login")

        if new["anonpath"]:
            await check_path_resides_within_volume(verrors, self.middleware,
                                                   "ftp_update.anonpath",
                                                   new["anonpath"])

        if new["tls"] and new["ssltls_certificate"] == 0:
            verrors.add("ftp_update.ssltls_certificate",
                        "This field is required when TLS is enabled")

        if new["masqaddress"]:
            await resolve_hostname(self.middleware, verrors,
                                   "ftp_update.masqaddress",
                                   new["masqaddress"])

        if verrors:
            raise verrors

        await self._update_service(old, new)

        if not old['tls'] and new['tls']:
            await self.middleware.call('service._start_ssl', 'proftpd')

        return new
Exemplo n.º 10
0
class SSHService(SystemServiceService):

    class Config:
        service = "ssh"
        service_model = "ssh"
        datastore_prefix = "ssh_"
        cli_namespace = 'service.ssh'

    ENTRY = Dict(
        'ssh_entry',
        List('bindiface', items=[Str('iface')], required=True),
        Int('tcpport', validators=[Range(min=1, max=65535)], required=True),
        Bool('rootlogin', required=True),
        Bool('passwordauth', required=True),
        Bool('kerberosauth', required=True),
        Bool('tcpfwd', required=True),
        Bool('compression', required=True),
        Str(
            'sftp_log_level', enum=['', 'QUIET', 'FATAL', 'ERROR', 'INFO', 'VERBOSE', 'DEBUG', 'DEBUG2', 'DEBUG3'],
            required=True
        ),
        Str(
            'sftp_log_facility', enum=[
                '', 'DAEMON', 'USER', 'AUTH', 'LOCAL0', 'LOCAL1', 'LOCAL2', 'LOCAL3', 'LOCAL4',
                'LOCAL5', 'LOCAL6', 'LOCAL7'
            ], required=True
        ),
        List('weak_ciphers', items=[Str('cipher', enum=['AES128-CBC', 'NONE'])], required=True),
        Str('options', max_length=None, required=True),
        Str('privatekey', required=True, max_length=None),
        Str('host_dsa_key', required=True, max_length=None, null=True),
        Str('host_dsa_key_pub', required=True, max_length=None, null=True),
        Str('host_dsa_key_cert_pub', required=True, max_length=None, null=True),
        Str('host_ecdsa_key', required=True, max_length=None, null=True),
        Str('host_ecdsa_key_pub', required=True, max_length=None, null=True),
        Str('host_ecdsa_key_cert_pub', required=True, max_length=None, null=True),
        Str('host_ed25519_key', required=True, max_length=None, null=True),
        Str('host_ed25519_key_pub', required=True, max_length=None, null=True),
        Str('host_ed25519_key_cert_pub', required=True, max_length=None, null=True),
        Str('host_key', required=True, max_length=None, null=True),
        Str('host_key_pub', required=True, max_length=None, null=True),
        Str('host_rsa_key', required=True, max_length=None, null=True),
        Str('host_rsa_key_pub', required=True, max_length=None, null=True),
        Str('host_rsa_key_cert_pub', required=True, max_length=None, null=True),
        Int('id', required=True),
    )

    @accepts()
    @returns(Dict('ssh_bind_interfaces_choices', additional_attrs=True))
    def bindiface_choices(self):
        """
        Available choices for the bindiface attribute of SSH service.
        """
        return self.middleware.call_sync('interface.choices')

    @accepts(
        Patch(
            'ssh_entry', 'ssh_update',
            ('rm', {'name': 'id'}),
            ('rm', {'name': 'privatekey'}),
            ('rm', {'name': 'host_dsa_key'}),
            ('rm', {'name': 'host_dsa_key_pub'}),
            ('rm', {'name': 'host_dsa_key_cert_pub'}),
            ('rm', {'name': 'host_ecdsa_key'}),
            ('rm', {'name': 'host_ecdsa_key_pub'}),
            ('rm', {'name': 'host_ecdsa_key_cert_pub'}),
            ('rm', {'name': 'host_ed25519_key'}),
            ('rm', {'name': 'host_ed25519_key_pub'}),
            ('rm', {'name': 'host_ed25519_key_cert_pub'}),
            ('rm', {'name': 'host_key'}),
            ('rm', {'name': 'host_key_pub'}),
            ('rm', {'name': 'host_rsa_key'}),
            ('rm', {'name': 'host_rsa_key_pub'}),
            ('rm', {'name': 'host_rsa_key_cert_pub'}),
            ('attr', {'update': True}),
        )
    )
    async def do_update(self, data):
        """
        Update settings of SSH daemon service.

        If `bindiface` is empty it will listen for all available addresses.

        .. examples(websocket)::

          Make sshd listen only to igb0 interface.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "ssh.update",
                "params": [{
                    "bindiface": ["igb0"]
                }]
            }

        """
        old = await self.config()

        new = old.copy()
        new.update(data)

        if new['bindiface']:
            verrors = ValidationErrors()
            iface_choices = await self.middleware.call('ssh.bindiface_choices')
            invalid_ifaces = list(filter(lambda x: x not in iface_choices, new['bindiface']))
            if invalid_ifaces:
                verrors.add(
                    'ssh_update.bindiface',
                    f'The following interfaces are not valid: {", ".join(invalid_ifaces)}',
                )
            verrors.check()

        await self._update_service(old, new)

        keyfile = "/usr/local/etc/ssh/ssh_host_ecdsa_key.pub"
        if os.path.exists(keyfile):
            with open(keyfile, "rb") as f:
                pubkey = f.read().strip().split(None, 3)[1]
            decoded_key = base64.b64decode(pubkey)
            key_digest = hashlib.sha256(decoded_key).digest()
            ssh_fingerprint = (b"SHA256:" + base64.b64encode(key_digest).replace(b"=", b"")).decode("utf-8")

            syslog.openlog(logoption=syslog.LOG_PID, facility=syslog.LOG_USER)
            syslog.syslog(syslog.LOG_ERR, 'ECDSA Fingerprint of the SSH KEY: ' + ssh_fingerprint)
            syslog.closelog()

        return await self.config()

    keys = [
        (
            os.path.join("/etc/ssh", i),
            i.replace(".", "_",).replace("-", "_")
        )
        for i in [
            "ssh_host_key",
            "ssh_host_key.pub",
            "ssh_host_dsa_key",
            "ssh_host_dsa_key.pub",
            "ssh_host_dsa_key-cert.pub",
            "ssh_host_ecdsa_key",
            "ssh_host_ecdsa_key.pub",
            "ssh_host_ecdsa_key-cert.pub",
            "ssh_host_rsa_key",
            "ssh_host_rsa_key.pub",
            "ssh_host_rsa_key-cert.pub",
            "ssh_host_ed25519_key",
            "ssh_host_ed25519_key.pub",
            "ssh_host_ed25519_key-cert.pub",
        ]
    ]

    @private
    def cleanup_keys(self):
        config = self.middleware.call_sync("datastore.config", "services.ssh")
        for path, column in self.keys:
            if not config[column] and os.path.exists(path):
                self.middleware.logger.warning("Removing irrelevant SSH host key %r", path)
                os.unlink(path)

    @private
    def generate_keys(self):
        self.middleware.logger.debug("Generating SSH host keys")
        p = subprocess.run(
            # For each of the key types (rsa, dsa, ecdsa and ed25519) for which host keys do not exist,
            # generate the host keys with the default key file path, an empty passphrase, default bits
            # for the key type, and default comment.
            ["ssh-keygen", "-A"],
            stdout=subprocess.PIPE,
            stderr=subprocess.STDOUT,
            encoding="utf-8",
            errors="ignore"
        )
        if p.returncode != 0:
            self.middleware.logger.error("Error generating SSH host keys: %s", p.stdout)

    @private
    def save_keys(self):
        update = {}
        old = self.middleware.call_sync('datastore.query', 'services_ssh', [], {'get': True})
        for path, column in self.keys:
            if os.path.exists(path):
                with open(path, "rb") as f:
                    data = base64.b64encode(f.read()).decode("ascii")
                    if data != old[column]:
                        update[column] = data

        if update:
            self.middleware.call_sync('datastore.update', 'services.ssh', old['id'], update, {'ha_sync': False})
Exemplo n.º 11
0
class IdmapDomainService(CRUDService):
    class Config:
        datastore = 'directoryservice.idmap_domain'
        datastore_prefix = 'idmap_domain_'
        namespace = 'idmap'
        datastore_extend = f'{namespace}.idmap_extend'

    @private
    async def idmap_extend(self, data):
        if data.get('idmap_backend'):
            data['idmap_backend'] = data['idmap_backend'].upper()

        opt_enums = ['ssl', 'linked_service']
        if data.get('options'):
            for i in opt_enums:
                if data['options'].get(i):
                    data['options'][i] = data['options'][i].upper()

        return data

    @private
    async def idmap_compress(self, data):
        opt_enums = ['ssl', 'linked_service']
        if data.get('options'):
            for i in opt_enums:
                if data['options'].get(i):
                    data['options'][i] = data['options'][i].lower()

        data['idmap_backend'] = data['idmap_backend'].lower()

        return data

    @private
    async def get_next_idmap_range(self):
        """
        Increment next high range by 100,000,000 ids. This number has
        to accomodate the highest available rid value for a domain.
        Configured idmap ranges _must_ not overlap.
        """
        domains = await self.query()
        sorted_idmaps = sorted(domains,
                               key=lambda domain: domain['range_high'])
        low_range = sorted_idmaps[-1]['range_high'] + 1
        high_range = sorted_idmaps[-1]['range_high'] + 100000000
        return (low_range, high_range)

    @private
    async def remove_winbind_idmap_tdb(self):
        sysdataset = (await
                      self.middleware.call('systemdataset.config'))['basename']
        ts = str(datetime.datetime.now(datetime.timezone.utc).timestamp())[:10]
        await self.middleware.call('zfs.snapshot.create', {
            'dataset': f'{sysdataset}/samba4',
            'name': f'wbc-{ts}'
        })
        try:
            os.remove('/var/db/system/samba4/winbindd_idmap.tdb')

        except FileNotFoundError:
            self.logger.trace(
                "winbindd_idmap.tdb does not exist. Skipping removal.")

        except Exception:
            self.logger.debug("Failed to remove winbindd_idmap.tdb.",
                              exc_info=True)

    @private
    async def domain_info(self, domain):
        ret = {}

        if domain == 'DS_TYPE_ACTIVEDIRECTORY':
            domain = (await self.middleware.call('smb.config'))['workgroup']

        wbinfo = await run(['wbinfo', '-D', domain], check=False)
        if wbinfo.returncode != 0:
            raise CallError(f'Failed to get domain info for {domain}: '
                            f'{wbinfo.stderr.decode().strip()}')

        for entry in wbinfo.stdout.splitlines():
            kv = entry.decode().split(':')
            ret.update({kv[0].strip(): kv[1].strip()})

        return ret

    @private
    async def get_sssd_low_range(self,
                                 domain,
                                 sssd_config=None,
                                 seed=0xdeadbeef):
        """
        This is best effort attempt for SSSD compatibility. It will allocate low
        range for then initial slice in the SSSD environment. The SSSD allocation algorithm
        is non-deterministic. Domain SID string is converted to a 32-bit hashed value
        using murmurhash3 algorithm.

        The modulus of this value with the total number of available slices is used to
        pick the slice. This slice number is then used to calculate the low range for
        RID 0. With the default settings in SSSD this will be deterministic as long as
        the domain has less than 200,000 RIDs.
        """
        sid = (await self.domain_info(domain))['SID']
        sssd_config = {} if not sssd_config else sssd_config
        range_size = sssd_config.get('range_size', 200000)
        range_low = sssd_config.get('range_low', 10001)
        range_max = sssd_config.get('range_max', 2000200000)
        max_slices = int((range_max - range_low) / range_size)

        data = bytearray(sid.encode())
        datalen = len(data)
        hash = seed
        data_bytes = data

        c1 = 0xcc9e2d51
        c2 = 0x1b873593
        r1 = 15
        r2 = 13
        n = 0xe6546b64

        while datalen >= 4:
            k = int.from_bytes(data_bytes[:4], byteorder='little') & 0xFFFFFFFF
            self.logger.debug('%d', k)
            data_bytes = data_bytes[4:]
            datalen = datalen - 4
            k = (k * c1) & 0xFFFFFFFF
            k = (k << r1 | k >> 32 - r1) & 0xFFFFFFFF
            k = (k * c2) & 0xFFFFFFFF
            hash ^= k
            hash = (hash << r2 | hash >> 32 - r2) & 0xFFFFFFFF
            hash = (hash * 5 + n) & 0xFFFFFFFF

        if datalen > 0:
            k = 0
            if datalen >= 3:
                k = k | int.from_bytes(data_bytes[2], byteorder='little') << 16
            if datalen >= 2:
                k = k | data_bytes[1] << 8
            if datalen >= 1:
                k = k | data_bytes[0]
                k = (k * c1) & 0xFFFFFFFF
                k = (k << r1 | k >> 32 - r1) & 0xFFFFFFFF
                k = (k * c2) & 0xFFFFFFFF
                hash ^= k

        hash = (hash ^ len(data)) & 0xFFFFFFFF
        hash ^= hash >> 16
        hash = (hash * 0x85ebca6b) & 0xFFFFFFFF
        hash ^= hash >> 13
        hash = (hash * 0xc2b2ae35) & 0xFFFFFFFF
        hash ^= hash >> 16

        return (hash % max_slices) * range_size + range_size

    @accepts()
    @job(lock='clear_idmap_cache')
    async def clear_idmap_cache(self, job):
        """
        Stop samba, remove the winbindd_cache.tdb file, start samba, flush samba's cache.
        This should be performed after finalizing idmap changes.
        """
        await self.middleware.call('service.stop', 'cifs')

        try:
            os.remove('/var/db/system/samba4/winbindd_cache.tdb')

        except FileNotFoundError:
            self.logger.debug(
                "Failed to remove winbindd_cache.tdb. File not found.")

        except Exception:
            self.logger.debug("Failed to remove winbindd_cache.tdb.",
                              exc_info=True)

        await self.middleware.call('etc.generate', 'smb')
        await self.middleware.call('service.start', 'cifs')
        gencache_flush = await run(['net', 'cache', 'flush'], check=False)
        if gencache_flush.returncode != 0:
            raise CallError(
                f'Attempt to flush gencache failed with error: {gencache_flush.stderr.decode().strip()}'
            )

    @private
    async def autodiscover_trusted_domains(self):
        smb = await self.middleware.call('smb.config')

        ad_idmap_backend = (await self.query(
            [('name', '=', 'DS_TYPE_ACTIVEDIRECTORY')],
            {'get': True}))['idmap_backend']
        if ad_idmap_backend == IdmapBackend.AUTORID.name:
            self.logger.trace(
                'Skipping auto-generation of trusted domains due to AutoRID being enabled.'
            )
            return

        wbinfo = await run(['wbinfo', '-m', '--verbose'], check=False)
        if wbinfo.returncode != 0:
            raise CallError(
                f'wbinfo -m failed with error: {wbinfo.stderr.decode().strip()}'
            )

        for entry in wbinfo.stdout.decode().splitlines():
            c = entry.split()
            range_low, range_high = await self.get_next_idmap_range()
            if len(c) == 6 and c[0] != smb['workgroup']:
                await self.middleware.call(
                    'idmap.create', {
                        'name': c[0],
                        'dns_domain_name': c[1],
                        'range_low': range_low,
                        'range_high': range_high,
                        'idmap_backend': 'RID'
                    })

    @accepts()
    async def backend_options(self):
        """
        This returns full information about idmap backend options. Not all
        `options` are valid for every backend.
        """
        return {x.name: x.value for x in IdmapBackend}

    @accepts(
        Str('idmap_backend', enum=[x.name for x in IdmapBackend]), )
    async def options_choices(self, backend):
        """
        Returns a list of supported keys for the specified idmap backend.
        """
        return IdmapBackend[backend].supported_keys()

    @accepts()
    async def backend_choices(self):
        """
        Returns array of valid idmap backend choices per directory service.
        """
        return IdmapBackend.ds_choices()

    @private
    async def validate(self, schema_name, data, verrors):
        if data['name'] == DSType.DS_TYPE_LDAP.name:
            if data['idmap_backend'] not in (await
                                             self.backend_choices())['LDAP']:
                verrors.add(
                    f'{schema_name}.idmap_backend',
                    f'idmap backend [{data["idmap_backend"]}] is not appropriate '
                    f'for the system domain type {data["name"]}')

        elif data['name'] == DSType.DS_TYPE_DEFAULT_DOMAIN.name:
            if data['idmap_backend'] != 'TDB':
                verrors.add(
                    f'{schema_name}.idmap_backend',
                    'TDB is the only supported idmap backend for DS_TYPE_DEFAULT_DOMAIN.'
                )

        if data['range_high'] < data['range_low']:
            """
            If we don't exit at this point further range() operations will raise an IndexError.
            """
            verrors.add(
                f'{schema_name}.range_low',
                'Idmap high range must be greater than idmap low range')
            return

        configured_domains = await self.query()
        ldap_enabled = False if await self.middleware.call(
            'ldap.get_state') == 'DISABLED' else True
        ad_enabled = False if await self.middleware.call(
            'activedirectory.get_state') == 'DISABLED' else True
        new_range = range(data['range_low'], data['range_high'])
        idmap_backend = data.get('idmap_backend')
        for i in configured_domains:
            # Do not generate validation error comparing to oneself.
            if i['name'] == data['name']:
                continue

            # Do not generate validation errors for overlapping with a disabled DS.
            if not ldap_enabled and i['name'] == 'DS_TYPE_LDAP':
                continue

            if not ad_enabled and i['name'] == 'DS_TYPE_ACTIVEDIRECTORY':
                continue

            # Idmap settings under Services->SMB are ignored when autorid is enabled.
            if idmap_backend == IdmapBackend.AUTORID.name and i[
                    'name'] == 'DS_TYPE_DEFAULT_DOMAIN':
                continue

            # Overlap between ranges defined for 'ad' backend are permitted.
            if idmap_backend == IdmapBackend.AD.name and i[
                    'idmap_backend'] == IdmapBackend.AD.name:
                continue

            existing_range = range(i['range_low'], i['range_high'])
            if range(max(existing_range[0], new_range[0]),
                     min(existing_range[-1], new_range[-1]) + 1):
                verrors.add(
                    f'{schema_name}.range_low',
                    'new idmap range conflicts with existing range for domain '
                    f'[{i["name"]}].')

    @private
    async def validate_options(self,
                               schema_name,
                               data,
                               verrors,
                               check=['MISSING', 'EXTRA']):
        supported_keys = set(
            IdmapBackend[data['idmap_backend']].supported_keys())
        required_keys = set(
            IdmapBackend[data['idmap_backend']].required_keys())
        provided_keys = set([str(x) for x in data['options'].keys()])

        missing_keys = required_keys - provided_keys
        extra_keys = provided_keys - supported_keys

        if 'MISSING' in check:
            for k in missing_keys:
                verrors.add(
                    f'{schema_name}.options.{k}',
                    f'[{k}] is a required parameter for the [{data["idmap_backend"]}] idmap backend.'
                )

        if 'EXTRA' in check:
            for k in extra_keys:
                verrors.add(
                    f'{schema_name}.options.{k}',
                    f'[{k}] is not a valid parameter for the [{data["idmap_backend"]}] idmap backend.'
                )

    @private
    async def prune_keys(self, data):
        supported_keys = set(
            IdmapBackend[data['idmap_backend']].supported_keys())
        provided_keys = set([str(x) for x in data['options'].keys()])

        for k in (provided_keys - supported_keys):
            data['options'].pop(k)

    @accepts(
        Dict('idmap_domain_create',
             Str('name', required=True),
             Str('dns_domain_name'),
             Int('range_low',
                 required=True,
                 validators=[Range(min=1000, max=2147483647)]),
             Int('range_high',
                 required=True,
                 validators=[Range(min=1000, max=2147483647)]),
             Str('idmap_backend', enum=[x.name for x in IdmapBackend]),
             Int('certificate', null=True),
             Dict(
                 'options',
                 Str('schema_mode', enum=['RFC2307', 'SFU', 'SFU20']),
                 Bool('unix_primary_group'),
                 Bool('unix_nss_info'),
                 Int('rangesize',
                     validators=[Range(min=10000, max=1000000000)]),
                 Bool('readonly'),
                 Bool('ignore_builtin'),
                 Str('ldap_base_dn'),
                 Str('ldap_user_dn'),
                 Str('ldap_user_dn_password', private=True),
                 Str('ldap_url'),
                 Str('ssl', enum=[x.value for x in SSL]),
                 Str('linked_service', enum=['LOCAL_ACCOUNT', 'LDAP', 'NIS']),
                 Str('ldap_server'),
                 Bool('ldap_realm'),
                 Str('bind_path_user'),
                 Str('bind_path_group'),
                 Bool('user_cn'),
                 Str('cn_realm'),
                 Str('ldap_domain'),
                 Str('ldap_url'),
                 Bool('sssd_compat'),
             ),
             register=True))
    async def do_create(self, data):
        """
        Create a new IDMAP domain. These domains must be unique. This table
        will be automatically populated after joining an Active Directory domain
        if "allow trusted domains" is set to True in the AD service configuration.
        There are three default system domains: DS_TYPE_ACTIVEDIRECTORY, DS_TYPE_LDAP, DS_TYPE_DEFAULT_DOMAIN.
        The system domains correspond with the idmap settings under Active Directory, LDAP, and SMB
        respectively.

        `name` the pre-windows 2000 domain name.

        `DNS_domain_name` DNS name of the domain.

        `idmap_backend` provides a plugin interface for Winbind to use varying
        backends to store SID/uid/gid mapping tables. The correct setting
        depends on the environment in which the NAS is deployed.

        `range_low` and `range_high` specify the UID and GID range for which this backend is authoritative.

        `certificate_id` references the certificate ID of the SSL certificate to use for certificate-based
        authentication to a remote LDAP server. This parameter is not supported for all idmap backends as some
        backends will generate SID to ID mappings algorithmically without causing network traffic.

        `options` are additional parameters that are backend-dependent:

        `AD` idmap backend options:
        `unix_primary_group` If True, the primary group membership is fetched from the LDAP attributes (gidNumber).
        If False, the primary group membership is calculated via the "primaryGroupID" LDAP attribute.

        `unix_nss_info` if True winbind will retrieve the login shell and home directory from the LDAP attributes.
        If False or if the AD LDAP entry lacks the SFU attributes the smb4.conf parameters `template shell` and `template homedir` are used.

        `schema_mode` Defines the schema that idmap_ad should use when querying Active Directory regarding user and group information.
        This can be either the RFC2307 schema support included in Windows 2003 R2 or the Service for Unix (SFU) schema.
        For SFU 3.0 or 3.5 please choose "SFU", for SFU 2.0 please choose "SFU20". The behavior of primary group membership is
        controlled by the unix_primary_group option.

        `AUTORID` idmap backend options:
        `readonly` sets the module to read-only mode. No new ranges will be allocated and new mappings
        will not be created in the idmap pool.

        `ignore_builtin` ignores mapping requests for the BUILTIN domain.

        `LDAP` idmap backend options:
        `ldap_base_dn` defines the directory base suffix to use for SID/uid/gid mapping entries.

        `ldap_user_dn` defines the user DN to be used for authentication.

        `ldap_url` specifies the LDAP server to use for SID/uid/gid map entries.

        `ssl` specifies whether to encrypt the LDAP transport for the idmap backend.

        `NSS` idmap backend options:
        `linked_service` specifies the auxiliary directory service ID provider.

        `RFC2307` idmap backend options:
        `domain` specifies the domain for which the idmap backend is being created. Numeric id, short-form
        domain name, or long-form DNS domain name of the domain may be specified. Entry must be entered as
        it appears in `idmap.domain`.

        `range_low` and `range_high` specify the UID and GID range for which this backend is authoritative.

        `ldap_server` defines the type of LDAP server to use. This can either be an LDAP server provided
        by the Active Directory Domain (ad) or a stand-alone LDAP server.

        `bind_path_user` specfies the search base where user objects can be found in the LDAP server.

        `bind_path_group` specifies the search base where group objects can be found in the LDAP server.

        `user_cn` query cn attribute instead of uid attribute for the user name in LDAP.

        `realm` append @realm to cn for groups (and users if user_cn is set) in LDAP queries.

        `ldmap_domain` when using the LDAP server in the Active Directory server, this allows one to
        specify the domain where to access the Active Directory server. This allows using trust relationships
        while keeping all RFC 2307 records in one place. This parameter is optional, the default is to access
        the AD server in the current domain to query LDAP records.

        `ldap_url` when using a stand-alone LDAP server, this parameter specifies the LDAP URL for accessing the LDAP server.

        `ldap_user_dn` defines the user DN to be used for authentication.

        `ldap_user_dn_password` is the password to be used for LDAP authentication.

        `realm` defines the realm to use in the user and group names. This is only required when using cn_realm together with
         a stand-alone ldap server.

        `RID` backend options:
        `sssd_compat` generate idmap low range based on same algorithm that SSSD uses by default.
        """
        verrors = ValidationErrors()
        if data['name'] in [x['name'] for x in await self.query()]:
            verrors.add('idmap_domain_create.name',
                        'Domain names must be unique.')

        if data['options'].get('sssd_compat'):
            if await self.middleware.call('activedirectory.get_state'
                                          ) != 'HEALTHY':
                verrors.add(
                    'idmap_domain_create.options',
                    'AD service must be enabled and started to '
                    'generate an SSSD-compatible id range')
                verrors.check()

            data['range_low'] = await self.get_sssd_low_range(data['name'])
            data['range_high'] = data['range_low'] + 100000000

        await self.validate('idmap_domain_create', data, verrors)
        await self.validate_options('idmap_domain_create', data, verrors)
        if data.get('certificate_id') and not data['options'].get('ssl'):
            verrors.add(
                'idmap_domain_create.certificate_id',
                f'The {data["idmap_backend"]} idmap backend does not '
                'generate LDAP traffic. Certificates do not apply.')
        verrors.check()

        if data['options'].get('ldap_user_dn_password'):
            try:
                DSType[data["name"]]
                domain = (await
                          self.middleware.call("smb.config"))['workgroup']
            except KeyError:
                domain = data["name"]

            secret = data['options'].pop('ldap_user_dn_password')

            await self.middleware.call("directoryservices.set_ldap_secret",
                                       domain, secret)
            await self.middleware.call("directoryservices.backup_secrets")

        final_options = IdmapBackend[data['idmap_backend']].defaults()
        final_options.update(data['options'])
        data['options'] = final_options
        data["id"] = await self.middleware.call(
            "datastore.insert",
            self._config.datastore,
            data,
            {"prefix": self._config.datastore_prefix},
        )
        return await self._get_instance(data['id'])

    @accepts(Int('id', required=True),
             Patch("idmap_domain_create", "idmap_domain_update",
                   ("attr", {
                       "update": True
                   })))
    async def do_update(self, id, data):
        """
        Update a domain by id.
        """
        old = await self._get_instance(id)
        new = old.copy()
        if data.get('idmap_backend'
                    ) and data['idmap_backend'] != old['idmap_backend']:
            """
            Remove options from previous backend because they are almost certainly
            not valid for the new backend.
            """
            new['options'] = {}

        new.update(data)
        tmp = data.copy()
        verrors = ValidationErrors()
        if old['name'] in [x.name
                           for x in DSType] and old['name'] != new['name']:
            verrors.add(
                'idmap_domain_update.name',
                f'Changing name of default domain {old["name"]} is not permitted'
            )

        if new['options'].get(
                'sssd_compat') and not old['options'].get('sssd_compat'):
            if await self.middleware.call('activedirectory.get_state'
                                          ) != 'HEALTHY':
                verrors.add(
                    'idmap_domain_update.options',
                    'AD service must be enabled and started to '
                    'generate an SSSD-compatible id range')
                verrors.check()

            new['range_low'] = await self.get_sssd_low_range(new['name'])
            new['range_high'] = new['range_low'] + 100000000

        if new['idmap_backend'] == 'AUTORID' and new[
                'name'] != 'DS_TYPE_ACTIVEDIRECTORY':
            verrors.add(
                "idmap_domain_update.idmap_backend",
                "AUTORID is only permitted for the default idmap backend for "
                "the active directory directory service (DS_TYPE_ACTIVEDIRECTORY)."
            )

        await self.validate('idmap_domain_update', new, verrors)
        await self.validate_options('idmap_domain_update', new, verrors,
                                    ['MISSING'])
        tmp['idmap_backend'] = new['idmap_backend']
        if data.get('options'):
            await self.validate_options('idmap_domain_update', tmp, verrors,
                                        ['EXTRA'])

        if data.get('certificate_id') and not data['options'].get('ssl'):
            verrors.add(
                'idmap_domain_update.certificate_id',
                f'The {new["idmap_backend"]} idmap backend does not '
                'generate LDAP traffic. Certificates do not apply.')
        verrors.check()
        await self.prune_keys(new)
        final_options = IdmapBackend[new['idmap_backend']].defaults()
        final_options.update(new['options'])
        new['options'] = final_options

        if new['options'].get('ldap_user_dn_password'):
            try:
                DSType[new["name"]]
                domain = (await
                          self.middleware.call("smb.config"))['workgroup']
            except KeyError:
                domain = new["name"]

            secret = new['options'].pop('ldap_user_dn_password')
            await self.middleware.call("directoryservices.set_ldap_secret",
                                       domain, secret)
            await self.middleware.call("directoryservices.backup_secrets")

        await self.idmap_compress(new)
        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})
        cache_job = await self.middleware.call('idmap.clear_idmap_cache')
        await cache_job.wait()
        return await self._get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete a domain by id. Deletion of default system domains is not permitted.
        """
        if id <= 5:
            entry = await self._get_instance(id)
            raise CallError(
                f'Deleting system idmap domain [{entry["name"]}] is not permitted.',
                errno.EPERM)
        await self.middleware.call("datastore.delete", self._config.datastore,
                                   id)

    @private
    async def name_to_sid(self, name):
        wb = await run([SMBCmd.WBINFO.value, '--name-to-sid', name],
                       check=False)
        if wb.returncode != 0:
            self.logger.debug("wbinfo failed with error: %s",
                              wb.stderr.decode().strip())

        return wb.stdout.decode().strip()

    @private
    async def sid_to_name(self, sid):
        """
        Last two characters of name string encode the account type.
        """
        wb = await run([SMBCmd.WBINFO.value, '--sid-to-name', sid],
                       check=False)
        if wb.returncode != 0:
            self.logger.debug("wbinfo failed with error: %s",
                              wb.stderr.decode().strip())

        return wb.stdout.decode().strip()[:-2]

    @private
    async def sid_to_unixid(self, sid_str):
        rv = None
        gid = None
        uid = None
        wb = await run([SMBCmd.WBINFO.value, '--sid-to-gid', sid_str],
                       check=False)
        if wb.returncode == 0:
            gid = int(wb.stdout.decode().strip())

        wb = await run([SMBCmd.WBINFO.value, '--sid-to-uid', sid_str],
                       check=False)
        if wb.returncode == 0:
            uid = int(wb.stdout.decode().strip())

        if gid and (gid == uid):
            rv = {"id_type": "BOTH", "id": gid}
        elif gid:
            rv = {"id_type": "GROUP", "id": gid}
        elif uid:
            rv = {"id_type": "USER", "id": uid}

        return rv

    @private
    async def unixid_to_sid(self, data):
        """
        Samba generates SIDs for local accounts that lack explicit mapping in
        passdb.tdb or group_mapping.tdb with a prefix of S-1-22-1 (users) and
        S-1-22-2 (groups). This is not returned by wbinfo, but for consistency
        with what appears when viewed over SMB protocol we'll do the same here.
        """
        unixid = data.get("id")
        id = IDType[data.get("id_type", "GROUP")]

        if id == IDType.USER:
            wb = await run([SMBCmd.WBINFO.value, '--uid-to-sid',
                            str(unixid)],
                           check=False)
        else:
            wb = await run([SMBCmd.WBINFO.value, '--gid-to-sid',
                            str(unixid)],
                           check=False)

        if wb.returncode != 0:
            self.logger.warning("Could not convert [%d] to SID: %s", unixid,
                                wb.stderr.decode().strip())
            if WBCErr.DOMAIN_NOT_FOUND.err() in wb.stderr.decode():
                is_local = await self.middleware.call(
                    f'{"user" if id == IDType.USER else "group"}.query',
                    [("uid" if id == IDType.USER else "gid", '=', unixid)],
                    {"count": True})
                if is_local:
                    return f'S-1-22-{1 if id == IDType.USER else 2}-{unixid}'

            return None

        return wb.stdout.decode().strip()
Exemplo n.º 12
0
        return process.returncode == 0

    def _ping6_host(self, host, timeout):
        if osc.IS_LINUX:
            process = run(['ping6', '-w', f'{timeout}', host])
        else:
            process = run(['ping6', '-X', f'{timeout}', host])

        return process.returncode == 0

    @accepts(
        Dict(
            'options',
            Str('type', enum=['ICMP', 'ICMPV4', 'ICMPV6'], default='ICMP'),
            Str('hostname', required=True),
            Int('timeout', validators=[Range(min=1, max=60)], default=4),
        ),
    )
    def ping_remote(self, options):
        """
        Method that will send an ICMP echo request to "hostname"
        and will wait up to "timeout" for a reply.
        """
        ip = None
        ip_found = True
        verrors = ValidationErrors()
        try:
            ip = IpAddress()
            ip(options['hostname'])
            ip = options['hostname']
        except ValueError:
Exemplo n.º 13
0
class NFSService(SystemServiceService):
    class Config:
        service = "nfs"
        service_verb = "restart"
        datastore_prefix = "nfs_srv_"
        datastore_extend = 'nfs.nfs_extend'

    @private
    async def nfs_extend(self, nfs):
        nfs["v4_krb_enabled"] = (nfs["v4_krb"] or await
                                 self.middleware.call("kerberos.keytab.query"))
        nfs["userd_manage_gids"] = nfs.pop("16")
        return nfs

    @private
    async def nfs_compress(self, nfs):
        nfs.pop("v4_krb_enabled")
        nfs["16"] = nfs.pop("userd_manage_gids")
        return nfs

    @accepts()
    async def bindip_choices(self):
        """
        Returns ip choices for NFS service to use
        """
        return {
            d['address']: d['address']
            for d in await self.middleware.call('interface.ip_in_use', {
                'static': True,
                'any': True
            })
        }

    @private
    async def bindip(self, config):
        bindip = config['bindip']
        if osc.IS_LINUX:
            bindip = bindip[:1]

        if bindip:
            found = False
            for iface in await self.middleware.call('interface.query'):
                for alias in iface['state']['aliases']:
                    if alias['address'] in bindip:
                        found = True
                        break
                if found:
                    break
        else:
            found = True

        if found:
            await self.middleware.call('alert.oneshot_delete',
                                       'NFSBindAddress', None)
            return bindip
        else:
            await self.middleware.call('alert.oneshot_create',
                                       'NFSBindAddress', None)
            return []

    @accepts(
        Dict('nfs_update',
             Int('servers', validators=[Range(min=1, max=256)]),
             Bool('udp'),
             Bool('allow_nonroot'),
             Bool('v4'),
             Bool('v4_v3owner'),
             Bool('v4_krb'),
             Str('v4_domain'),
             List('bindip', items=[IPAddr('ip')]),
             Int('mountd_port',
                 null=True,
                 validators=[Range(min=1, max=65535)]),
             Int('rpcstatd_port',
                 null=True,
                 validators=[Range(min=1, max=65535)]),
             Int('rpclockd_port',
                 null=True,
                 validators=[Range(min=1, max=65535)]),
             Bool('userd_manage_gids'),
             Bool('mountd_log'),
             Bool('statd_lockd_log'),
             update=True))
    async def do_update(self, data):
        """
        Update NFS Service Configuration.

        `servers` represents number of servers to create.

        When `allow_nonroot` is set, it allows non-root mount requests to be served.

        `bindip` is a list of IP's on which NFS will listen for requests. When it is unset/empty, NFS listens on
        all available addresses.

        `v4` when set means that we switch from NFSv3 to NFSv4.

        `v4_v3owner` when set means that system will use NFSv3 ownership model for NFSv4.

        `v4_krb` will force NFS shares to fail if the Kerberos ticket is unavailable.

        `v4_domain` overrides the default DNS domain name for NFSv4.

        `mountd_port` specifies the port mountd(8) binds to.

        `rpcstatd_port` specifies the port rpc.statd(8) binds to.

        `rpclockd_port` specifies the port rpclockd_port(8) binds to.

        .. examples(websocket)::

          Update NFS Service Configuration to listen on 192.168.0.10 and use NFSv4

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "pool.resilver.update",
                "params": [{
                    "bindip": [
                        "192.168.0.10"
                    ],
                    "v4": true
                }]
            }
        """
        if data.get("v4") is False:
            data.setdefault("v4_v3owner", False)

        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        new_v4_krb_enabled = (new["v4_krb"] or await
                              self.middleware.call("kerberos.keytab.query"))

        if new["v4"] and new_v4_krb_enabled and not await self.middleware.call(
                "system.is_freenas"):
            if await self.middleware.call("failover.licensed"):
                gc = await self.middleware.call("datastore.config",
                                                "network.globalconfiguration")
                if not gc["gc_hostname_virtual"] or not gc["gc_domain"]:
                    verrors.add(
                        "nfs_update.v4",
                        "Enabling kerberos authentication on TrueNAS HA requires setting the virtual hostname and "
                        "domain")

        if osc.IS_LINUX:
            if len(new['bindip']) > 1:
                verrors.add(
                    'nfs_update.bindip',
                    'Listening on more than one address is not supported')
        bindip_choices = await self.bindip_choices()
        for i, bindip in enumerate(new['bindip']):
            if bindip not in bindip_choices:
                verrors.add(f'nfs_update.bindip.{i}',
                            'Please provide a valid ip address')

        if new["v4"] and new_v4_krb_enabled and await self.middleware.call(
                'activedirectory.get_state') != "DISABLED":
            """
            In environments with kerberized NFSv4 enabled, we need to tell winbindd to not prefix
            usernames with the short form of the AD domain. Directly update the db and regenerate
            the smb.conf to avoid having a service disruption due to restarting the samba server.
            """
            if await self.middleware.call('smb.get_smb_ha_mode') == 'LEGACY':
                raise ValidationError(
                    'nfs_update.v4',
                    'Enabling kerberos authentication on TrueNAS HA requires '
                    'the system dataset to be located on a data pool.')
            ad = await self.middleware.call('activedirectory.config')
            await self.middleware.call('datastore.update',
                                       'directoryservice.activedirectory',
                                       ad['id'],
                                       {'ad_use_default_domain': True})
            await self.middleware.call('etc.generate', 'smb')
            await self.middleware.call('service.reload', 'cifs')

        if not new["v4"] and new["v4_v3owner"]:
            verrors.add("nfs_update.v4_v3owner",
                        "This option requires enabling NFSv4")

        if new["v4_v3owner"] and new["userd_manage_gids"]:
            verrors.add(
                "nfs_update.userd_manage_gids",
                "This option is incompatible with NFSv3 ownership model for NFSv4"
            )

        if not new["v4"] and new["v4_domain"]:
            verrors.add("nfs_update.v4_domain",
                        "This option does not apply to NFSv3")

        if verrors:
            raise verrors

        await self.nfs_compress(new)

        await self._update_service(old, new)

        await self.nfs_extend(new)

        return new
Exemplo n.º 14
0
class TwoFactorAuthService(ConfigService):

    class Config:
        datastore = 'system.twofactorauthentication'
        datastore_extend = 'auth.twofactor.two_factor_extend'
        namespace = 'auth.twofactor'

    @private
    async def two_factor_extend(self, data):
        for srv in ['ssh']:
            data['services'].setdefault(srv, False)

        return data

    @accepts(
        Dict(
            'auth_twofactor_update',
            Bool('enabled'),
            Int('otp_digits', validators=[Range(min=6, max=8)]),
            Int('window', validators=[Range(min=0)]),
            Int('interval', validators=[Range(min=5)]),
            Dict(
                'services',
                Bool('ssh', default=False)
            ),
            update=True
        )
    )
    async def do_update(self, data):
        """
        `otp_digits` represents number of allowed digits in the OTP.

        `window` extends the validity to `window` many counter ticks before and after the current one.

        `interval` is time duration in seconds specifying OTP expiration time from it's creation time.
        """
        old_config = await self.config()
        config = old_config.copy()

        config.update(data)

        if config['enabled'] and not config['secret']:
            # Only generate a new secret on `enabled` when `secret` is not already set.
            # This will aid users not setting secret up again on their mobiles.
            config['secret'] = await self.middleware.run_in_thread(
                self.generate_base32_secret
            )

        await self.middleware.call(
            'datastore.update',
            self._config.datastore,
            config['id'],
            config
        )

        await self.middleware.call('service.reload', 'ssh')

        return await self.config()

    @accepts(
        Str('token', null=True)
    )
    def verify(self, token):
        """
        Returns boolean true if provided `token` is successfully authenticated.
        """
        config = self.middleware.call_sync(f'{self._config.namespace}.config')
        if not config['enabled']:
            raise CallError('Please enable Two Factor Authentication first.')

        totp = pyotp.totp.TOTP(
            config['secret'], interval=config['interval'], digits=config['otp_digits']
        )
        return totp.verify(token, valid_window=config['window'])

    @accepts()
    def renew_secret(self):
        """
        Generates a new secret for Two Factor Authentication. Returns boolean true on success.
        """
        config = self.middleware.call_sync(f'{self._config.namespace}.config')
        if not config['enabled']:
            raise CallError('Please enable Two Factor Authentication first.')

        self.middleware.call_sync(
            'datastore.update',
            self._config.datastore,
            config['id'], {
                'secret': self.generate_base32_secret()
            }
        )

        if config['services']['ssh']:
            self.middleware.call_sync('service.reload', 'ssh')

        return True

    @accepts()
    async def provisioning_uri(self):
        """
        Returns the provisioning URI for the OTP. This can then be encoded in a QR Code and used to
        provision an OTP app like Google Authenticator.
        """
        config = await self.middleware.call(f'{self._config.namespace}.config')
        return pyotp.totp.TOTP(
            config['secret'], interval=config['interval'], digits=config['otp_digits']
        ).provisioning_uri(
            f'{(await self.middleware.call("system.info"))["hostname"]}@'
            f'{await self.middleware.call("system.product_name")}',
            'iXsystems'
        )

    @private
    def generate_base32_secret(self):
        return pyotp.random_base32()
Exemplo n.º 15
0
class SystemAdvancedService(ConfigService):
    class Config:
        datastore = 'system.advanced'
        datastore_prefix = 'adv_'
        datastore_extend = 'system.advanced.system_advanced_extend'
        namespace = 'system.advanced'
        cli_namespace = 'system.advanced'

    ENTRY = Dict(
        'system_advanced_entry',
        Bool('advancedmode', required=True),
        Bool('autotune', required=True),
        Bool('kdump_enabled', required=True),
        Int('boot_scrub', validators=[Range(min=1)], required=True),
        Bool('consolemenu', required=True),
        Bool('consolemsg', required=True),
        Bool('debugkernel', required=True),
        Bool('fqdn_syslog', required=True),
        Str('motd', required=True),
        Bool('powerdaemon', required=True),
        Bool('serialconsole', required=True),
        Str('serialport', required=True),
        Str('anonstats_token', required=True),
        Str('serialspeed',
            enum=['9600', '19200', '38400', '57600', '115200'],
            required=True),
        Int('swapondrive', validators=[Range(min=0)], required=True),
        Int('overprovision',
            validators=[Range(min=0)],
            null=True,
            required=True),
        Bool('traceback', required=True),
        Bool('uploadcrash', required=True),
        Bool('anonstats', required=True),
        Str('sed_user', enum=['USER', 'MASTER'], required=True),
        Str('sysloglevel',
            enum=[
                'F_EMERG',
                'F_ALERT',
                'F_CRIT',
                'F_ERR',
                'F_WARNING',
                'F_NOTICE',
                'F_INFO',
                'F_DEBUG',
            ],
            required=True),
        Str('syslogserver'),
        Str('syslog_transport', enum=['UDP', 'TCP', 'TLS'], required=True),
        Int('syslog_tls_certificate', null=True, required=True),
        Int('syslog_tls_certificate_authority', null=True, required=True),
        List('isolated_gpu_pci_ids', items=[Str('pci_id')], required=True),
        Str('kernel_extra_options', required=True),
        Int('id', required=True),
    )

    @private
    async def system_advanced_extend(self, data):
        data['consolemsg'] = (
            await
            self.middleware.call('system.general.config'))['ui_consolemsg']

        if data.get('sed_user'):
            data['sed_user'] = data.get('sed_user').upper()

        for k in filter(
                lambda k: data[k],
            ['syslog_tls_certificate_authority', 'syslog_tls_certificate']):
            data[k] = data[k]['id']

        if data['swapondrive'] and (
                await
                self.middleware.call('system.product_type')) == 'ENTERPRISE':
            data['swapondrive'] = 0

        data.pop('sed_passwd')
        data.pop('kmip_uid')

        return data

    async def __validate_fields(self, schema, data):
        verrors = ValidationErrors()

        serial_choice = data.get('serialport')
        if data.get('serialconsole'):
            if not serial_choice:
                verrors.add(
                    f'{schema}.serialport',
                    'Please specify a serial port when serial console option is checked'
                )
            elif serial_choice not in await self.middleware.call(
                    'system.advanced.serial_port_choices'):
                verrors.add(
                    f'{schema}.serialport',
                    'Serial port specified has not been identified by the system'
                )

        ups_port = (await self.middleware.call('ups.config'))['port']
        if not verrors and os.path.join('/dev', serial_choice
                                        or '') == ups_port:
            verrors.add(
                f'{schema}.serialport',
                'Serial port must be different then the port specified for UPS Service'
            )

        syslog_server = data.get('syslogserver')
        if syslog_server:
            match = re.match(r"^[\w\.\-]+(\:\d+)?$", syslog_server)
            if not match:
                verrors.add(f'{schema}.syslogserver',
                            'Invalid syslog server format')
            elif ':' in syslog_server:
                port = int(syslog_server.split(':')[-1])
                if port < 0 or port > 65535:
                    verrors.add(f'{schema}.syslogserver',
                                'Port must be in the range of 0 to 65535.')

        if data['syslog_transport'] == 'TLS':
            if not data['syslog_tls_certificate_authority']:
                verrors.add(
                    f'{schema}.syslog_tls_certificate_authority',
                    'This is required when using TLS as syslog transport')
            ca_cert = await self.middleware.call(
                'certificateauthority.query',
                [['id', '=', data['syslog_tls_certificate_authority']]])
            if not ca_cert:
                verrors.add(f'{schema}.syslog_tls_certificate_authority',
                            'Unable to locate specified CA')
            elif ca_cert[0]['revoked']:
                verrors.add(f'{schema}.syslog_tls_certificate_authority',
                            'Specified CA has been revoked')

            if data['syslog_tls_certificate']:
                verrors.extend(await self.middleware.call(
                    'certificate.cert_services_validation',
                    data['syslog_tls_certificate'],
                    f'{schema}.syslog_tls_certificate', False))

        if data['isolated_gpu_pci_ids']:
            available = set()
            critical_gpus = set()
            for gpu in await self.middleware.call('device.get_gpus'):
                available.add(gpu['addr']['pci_slot'])
                if gpu['uses_system_critical_devices']:
                    critical_gpus.add(gpu['addr']['pci_slot'])

            provided = set(data['isolated_gpu_pci_ids'])
            not_available = provided - available
            cannot_isolate = provided & critical_gpus
            if not_available:
                verrors.add(
                    f'{schema}.isolated_gpu_pci_ids',
                    f'{", ".join(not_available)} GPU pci slot(s) are not available or a GPU is not configured.'
                )

            if cannot_isolate:
                verrors.add(
                    f'{schema}.isolated_gpu_pci_ids',
                    f'{", ".join(cannot_isolate)} GPU pci slot(s) consists of devices '
                    'which cannot be isolated from host.')

            if len(available - provided) < 1:
                verrors.add(
                    f'{schema}.isolated_gpu_pci_ids',
                    'A minimum of 1 GPU is required for the host to ensure it functions as desired.'
                )

        for ch in ('\n', '"'):
            if ch in data['kernel_extra_options']:
                verrors.add('kernel_extra_options', f'{ch!r} not allowed')

        return verrors, data

    @accepts(
        Patch(
            'system_advanced_entry',
            'system_advanced_update',
            ('rm', {
                'name': 'id'
            }),
            ('rm', {
                'name': 'anonstats_token'
            }),
            ('add', Str('sed_passwd', private=True)),
            ('attr', {
                'update': True
            }),
        ))
    async def do_update(self, data):
        """
        Update System Advanced Service Configuration.

        `consolemenu` should be disabled if the menu at console is not desired. It will default to standard login
        in the console if disabled.

        `autotune` when enabled executes autotune script which attempts to optimize the system based on the installed
        hardware.

        When `syslogserver` is defined, logs of `sysloglevel` or above are sent.

        `consolemsg` is a deprecated attribute and will be removed in further releases. Please, use `consolemsg`
        attribute in the `system.general` plugin.

        `isolated_gpu_pci_ids` is a list of PCI ids which are isolated from host system.
        """
        consolemsg = None
        if 'consolemsg' in data:
            consolemsg = data.pop('consolemsg')
            warnings.warn(
                "`consolemsg` has been deprecated and moved to `system.general`",
                DeprecationWarning)

        config_data = await self.config()
        config_data['sed_passwd'] = await self.sed_global_password()
        config_data.pop('consolemsg')
        original_data = deepcopy(config_data)
        config_data.update(data)

        verrors, config_data = await self.__validate_fields(
            'advanced_settings_update', config_data)
        if verrors:
            raise verrors

        if config_data != original_data:
            if original_data.get('sed_user'):
                original_data['sed_user'] = original_data['sed_user'].lower()
            if config_data.get('sed_user'):
                config_data['sed_user'] = config_data['sed_user'].lower()
            if not config_data['sed_passwd'] and config_data[
                    'sed_passwd'] != original_data['sed_passwd']:
                # We want to make sure kmip uid is None in this case
                adv_config = await self.middleware.call(
                    'datastore.config', self._config.datastore)
                asyncio.ensure_future(
                    self.middleware.call('kmip.reset_sed_global_password',
                                         adv_config['adv_kmip_uid']))
                config_data['kmip_uid'] = None

            await self.middleware.call(
                'datastore.update', self._config.datastore, config_data['id'],
                config_data, {'prefix': self._config.datastore_prefix})

            if original_data['boot_scrub'] != config_data['boot_scrub']:
                await self.middleware.call('service.restart', 'cron')

            generate_grub = original_data[
                'kernel_extra_options'] != config_data['kernel_extra_options']
            if original_data['motd'] != config_data['motd']:
                await self.middleware.call('etc.generate', 'motd')

            if original_data['powerdaemon'] != config_data['powerdaemon']:
                await self.middleware.call('service.restart', 'powerd')

            if original_data['fqdn_syslog'] != config_data['fqdn_syslog']:
                await self.middleware.call('service.restart', 'syslogd')

            if (original_data['sysloglevel'].lower() !=
                    config_data['sysloglevel'].lower()
                    or original_data['syslogserver'] !=
                    config_data['syslogserver']
                    or original_data['syslog_transport'] !=
                    config_data['syslog_transport']
                    or original_data['syslog_tls_certificate'] !=
                    config_data['syslog_tls_certificate']
                    or original_data['syslog_tls_certificate_authority'] !=
                    config_data['syslog_tls_certificate_authority']):
                await self.middleware.call('service.restart', 'syslogd')

            if config_data['sed_passwd'] and original_data[
                    'sed_passwd'] != config_data['sed_passwd']:
                await self.middleware.call('kmip.sync_sed_keys')

            if config_data['kdump_enabled'] != original_data['kdump_enabled']:
                # kdump changes require a reboot to take effect. So just generating the kdump config
                # should be enough
                await self.middleware.call('etc.generate', 'kdump')
                generate_grub = True

            if original_data['isolated_gpu_pci_ids'] != config_data[
                    'isolated_gpu_pci_ids']:
                await self.middleware.call('boot.update_initramfs')

            await self.middleware.call('system.advanced.configure_tty',
                                       original_data, config_data,
                                       generate_grub)

        if consolemsg is not None:
            await self.middleware.call('system.general.update',
                                       {'ui_consolemsg': consolemsg})

        return await self.config()

    @accepts()
    @returns(Str('sed_global_password'))
    async def sed_global_password(self):
        """
        Returns configured global SED password.
        """
        passwd = (await self.middleware.call(
            'datastore.config', 'system.advanced',
            {'prefix': self._config.datastore_prefix}))['sed_passwd']
        return passwd if passwd else await self.middleware.call(
            'kmip.sed_global_password')
Exemplo n.º 16
0
class PeriodicSnapshotService(CRUDService):
    class Config:
        datastore = 'storage.task'
        datastore_prefix = 'task_'
        datastore_extend = 'pool.snapshot.periodic_snapshot_extend'
        namespace = 'pool.snapshot'

    @private
    def periodic_snapshot_extend(self, data):
        data['begin'] = str(data['begin'])
        data['end'] = str(data['end'])
        data['ret_unit'] = data['ret_unit'].upper()
        data['dow'] = [int(day) for day in data.pop('byweekday').split(',')]
        data.pop('repeat_unit', None)
        return data

    @private
    async def common_validation(self, data, schema_name):
        verrors = ValidationErrors()

        if not data['dow']:
            verrors.add(f'{schema_name}.dow',
                        'At least one day must be chosen')

        data['ret_unit'] = data['ret_unit'].lower()
        data['begin'] = time(
            *[int(value) for value in data['begin'].split(':')])
        data['end'] = time(*[int(value) for value in data['end'].split(':')])
        data['byweekday'] = ','.join([str(day) for day in data.pop('dow')])

        return data, verrors

    @accepts(
        Dict('periodic_snapshot_create',
             Bool('enabled', default=True),
             Bool('recursive', default=False),
             Int('interval',
                 enum=[
                     5, 10, 15, 30, 60, 120, 180, 240, 360, 720, 1440, 10080,
                     20160, 40320
                 ],
                 required=True),
             Int('ret_count', required=True),
             List('dow',
                  items=[Int('day', validators=[Range(min=1, max=7)])],
                  required=True),
             Str('begin', validators=[Time()], required=True),
             Str('end', validators=[Time()], required=True),
             Str('filesystem', required=True),
             Str('ret_unit',
                 enum=['HOUR', 'DAY', 'WEEK', 'MONTH', 'YEAR'],
                 required=True),
             register=True))
    async def do_create(self, data):

        data, verrors = await self.common_validation(
            data, 'periodic_snapshot_create')

        if data['filesystem'] not in (
                await self.middleware.call('pool.filesystem_choices')):
            verrors.add('periodic_snapshot_create.filesystem',
                        'Invalid ZFS filesystem')

        if verrors:
            raise verrors

        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})

        await self.middleware.call('service.restart', 'cron',
                                   {'onetime': False})

        return await self._get_instance(data['id'])

    @accepts(Int('id', required=True),
             Patch('periodic_snapshot_create', 'periodic_snapshot_update',
                   ('attr', {
                       'update': True
                   })))
    async def do_update(self, id, data):

        old = await self._get_instance(id)
        new = old.copy()
        new.update(data)

        new, verrors = await self.common_validation(
            new, 'periodic_snapshot_update')

        if old['filesystem'] != new['filesystem']:
            if new['filesystem'] not in (
                    await self.middleware.call('pool.filesystem_choices')):
                verrors.add('periodic_snapshot_update.filesystem',
                            'Invalid ZFS filesystem')

        if verrors:
            raise verrors

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})

        await self.middleware.call('service.restart', 'cron',
                                   {'onetime': False})

        return await self._get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        response = await self.middleware.call('datastore.delete',
                                              self._config.datastore, id)

        return response
Exemplo n.º 17
0
class SMARTTestService(CRUDService):
    class Config:
        datastore = 'tasks.smarttest'
        datastore_extend = 'smart.test.smart_test_extend'
        datastore_prefix = 'smarttest_'
        namespace = 'smart.test'

    async def smart_test_extend(self, data):
        disks = data.pop('disks')
        data['disks'] = [disk['disk_identifier'] for disk in disks]
        test_type = {
            'L': 'LONG',
            'S': 'SHORT',
            'C': 'CONVEYANCE',
            'O': 'OFFLINE',
        }
        data['type'] = test_type[data.pop('type')]
        Cron.convert_db_format_to_schedule(data)
        return data

    @private
    async def validate_data(self, data, schema):
        verrors = ValidationErrors()

        smart_tests = await self.query(filters=[('type', '=', data['type'])])
        configured_disks = [d for test in smart_tests for d in test['disks']]
        disks_dict = {
            disk['identifier']: disk['name']
            for disk in (await self.middleware.call('disk.query'))
        }

        disks = data.get('disks')
        used_disks = []
        invalid_disks = []
        for disk in disks:
            if disk in configured_disks:
                used_disks.append(disks_dict[disk])
            if disk not in disks_dict.keys():
                invalid_disks.append(disk)

        if used_disks:
            verrors.add(
                f'{schema}.disks',
                f'The following disks already have tests for this type: {", ".join(used_disks)}'
            )

        if invalid_disks:
            verrors.add(
                f'{schema}.disks',
                f'The following disks are invalid: {", ".join(invalid_disks)}')

        return verrors

    @accepts(
        Dict('smart_task_create',
             Cron('schedule'),
             Str('desc'),
             List('disks', items=[Str('disk')], default=[], required=True),
             Str('type',
                 enum=['LONG', 'SHORT', 'CONVEYANCE', 'OFFLINE'],
                 required=True),
             register=True))
    async def do_create(self, data):
        data['type'] = data.pop('type')[0]
        verrors = await self.validate_data(data, 'smart_test_create')

        if not data.get('disks'):
            verrors.add('smart_test_create.disks', 'This field is required')

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(data)

        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})

        await self._service_change('smartd', 'restart')

        return data

    @accepts(Int('id', validators=[Range(min=1)]),
             Patch('smart_task_create', 'smart_task_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        old = await self.query(filters=[('id', '=', id)],
                               options={'get': True})
        new = old.copy()
        new.update(data)

        new['type'] = new.pop('type')[0]
        old['type'] = old.pop('type')[0]
        new_disks = [disk for disk in new['disks'] if disk not in old['disks']]
        deleted_disks = [
            disk for disk in old['disks'] if disk not in new['disks']
        ]
        if old['type'] == new['type']:
            new['disks'] = new_disks
        verrors = await self.validate_data(new, 'smart_test_update')

        new['disks'] = [
            disk for disk in chain(new_disks, old['disks'])
            if disk not in deleted_disks
        ]

        if not new.get('disks'):
            verrors.add('smart_test_update.disks', 'This field is required')

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(new)

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})

        await self._service_change('smartd', 'restart')

        return await self.query(filters=[('id', '=', id)],
                                options={'get': True})

    @accepts(Int('id'))
    async def do_delete(self, id):
        response = await self.middleware.call('datastore.delete',
                                              self._config.datastore, id)

        await self._service_change('smartd', 'restart')

        return response
Exemplo n.º 18
0
class ReportingService(ConfigService):
    class Config:
        datastore = 'system.reporting'

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.__rrds = {}
        for name, klass in RRD_PLUGINS.items():
            self.__rrds[name] = klass(self.middleware)

    @accepts(
        Dict('reporting_update',
             Bool('cpu_in_percentage'),
             Str('graphite'),
             Int('graph_age', validators=[Range(min=1)]),
             Int('graph_points', validators=[Range(min=1)]),
             Bool('confirm_rrd_destroy'),
             update=True))
    async def do_update(self, data):
        """
        Configure Reporting Database settings.

        If `cpu_in_percentage` is `true`, collectd reports CPU usage in percentage instead of "jiffies".

        `graphite` specifies a destination hostname or IP for collectd data sent by the Graphite plugin..

        `graph_age` specifies the maximum age of stored graphs in months. `graph_points` is the number of points for
        each hourly, daily, weekly, etc. graph. Changing these requires destroying the current reporting database,
        so when these fields are changed, an additional `confirm_rrd_destroy: true` flag must be present.

        .. examples(websocket)::

          Update reporting settings

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "reporting.update",
                "params": [{
                    "cpu_in_percentage": false,
                    "graphite": "",
                }]
            }

          Recreate reporting database with new settings

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "reporting.update",
                "params": [{
                    "graph_age": 12,
                    "graph_points": 1200,
                    "confirm_rrd_destroy": true,
                }]
            }
        """

        confirm_rrd_destroy = data.pop('confirm_rrd_destroy', False)

        old = await self.config()

        new = copy.deepcopy(old)
        new.update(data)

        verrors = ValidationErrors()

        destroy_database = False
        for k in ['graph_age', 'graph_points']:
            if old[k] != new[k]:
                destroy_database = True

                if not confirm_rrd_destroy:
                    verrors.add(
                        f'reporting_update.{k}',
                        _('Changing this option requires destroying the reporting database. This action must be '
                          'confirmed by setting confirm_rrd_destroy flag'),
                    )

        if verrors:
            raise verrors

        await self.middleware.call('datastore.update', self._config.datastore,
                                   old['id'], new,
                                   {'prefix': self._config.datastore_prefix})

        if destroy_database:
            await self.middleware.call('service.stop', 'collectd')
            await self.middleware.call('service.stop', 'rrdcached')
            await run('sh', '-c', 'rm -rf /var/db/collectd/rrd/*', check=False)
            await self.middleware.call('reporting.setup')
            await self.middleware.call('service.start', 'rrdcached')

        await self.middleware.call('service.restart', 'collectd')

        return await self.config()

    @private
    def setup(self):
        is_freenas = self.middleware.call_sync('system.is_freenas')
        # If not is_freenas, remove the rc.conf cache. rc.conf.local runs again with the correct collectd_enable.
        # See issue #5019
        if not is_freenas:
            try:
                os.remove('/var/tmp/freenas_config.md5')
            except FileNotFoundError:
                pass

        systemdatasetconfig = self.middleware.call_sync('systemdataset.config')
        if not systemdatasetconfig['path']:
            self.middleware.logger.error(f'System dataset is not mounted')
            return False

        rrd_mount = f'{systemdatasetconfig["path"]}/rrd-{systemdatasetconfig["uuid"]}'
        if not os.path.exists(rrd_mount):
            self.middleware.logger.error(
                f'{rrd_mount} does not exist or is not a directory')
            return False

        # Ensure that collectd working path is a symlink to system dataset
        pwd = '/var/db/collectd/rrd'
        if os.path.exists(pwd) and (not os.path.isdir(pwd)
                                    or not os.path.islink(pwd)):
            shutil.move(pwd, f'{pwd}.{time.strftime("%Y%m%d%H%M%S")}')
        if not os.path.exists(pwd):
            os.symlink(rrd_mount, pwd)

        # Migrate legacy RAMDisk
        persist_file = '/data/rrd_dir.tar.bz2'
        if os.path.isfile(persist_file):
            with tarfile.open(persist_file) as tar:
                if 'collectd/rrd' in tar.getnames():
                    tar.extractall(pwd, get_members(tar, 'collectd/rrd/'))

            os.unlink('/data/rrd_dir.tar.bz2')

        hostname = self.middleware.call_sync('system.info')['hostname']
        if not hostname:
            hostname = self.middleware.call_sync(
                'network.configuration.config')['hostname']

        # Migrate from old version, where `hostname` was a real directory and `localhost` was a symlink.
        # Skip the case where `hostname` is "localhost", so symlink was not (and is not) needed.
        if (hostname != 'localhost'
                and os.path.isdir(os.path.join(pwd, hostname))
                and not os.path.islink(os.path.join(pwd, hostname))):
            if os.path.exists(os.path.join(pwd, 'localhost')):
                if os.path.islink(os.path.join(pwd, 'localhost')):
                    os.unlink(os.path.join(pwd, 'localhost'))
                else:
                    # This should not happen, but just in case
                    shutil.move(
                        os.path.join(pwd, 'localhost'),
                        os.path.join(
                            pwd,
                            f'localhost.bak.{time.strftime("%Y%m%d%H%M%S")}'))
            shutil.move(os.path.join(pwd, hostname),
                        os.path.join(pwd, 'localhost'))

        # Remove all directories except "localhost" and its backups (that may be erroneously created by
        # running collectd before this script)
        to_remove_dirs = [
            os.path.join(pwd, d) for d in os.listdir(pwd)
            if not d.startswith('localhost')
            and os.path.isdir(os.path.join(pwd, d))
        ]
        for r_dir in to_remove_dirs:
            subprocess.run(['rm', '-rf', r_dir])

        # Remove all symlinks (that are stale if hostname was changed).
        to_remove_symlinks = [
            os.path.join(pwd, l) for l in os.listdir(pwd)
            if os.path.islink(os.path.join(pwd, l))
        ]
        for r_symlink in to_remove_symlinks:
            os.unlink(r_symlink)

        # Create "localhost" directory if it does not exist
        if not os.path.exists(os.path.join(pwd, 'localhost')):
            os.makedirs(os.path.join(pwd, 'localhost'))

        # Create "${hostname}" -> "localhost" symlink if necessary
        if hostname != 'localhost':
            os.symlink(os.path.join(pwd, 'localhost'),
                       os.path.join(pwd, hostname))

        # Let's return a positive value to indicate that necessary collectd operations were performed successfully
        return True

    @filterable
    def graphs(self, filters, options):
        return filter_list([i.__getstate__() for i in self.__rrds.values()],
                           filters, options)

    def __rquery_to_start_end(self, query):
        unit = query.get('unit')
        if unit:
            verrors = ValidationErrors()
            for i in ('start', 'end'):
                if i in query:
                    verrors.add(
                        f'reporting_query.{i}',
                        f'{i!r} should only be used if "unit" attribute is not provided.',
                    )
            verrors.check()
        else:
            if 'start' not in query:
                unit = 'HOURLY'
            else:
                starttime = query['start']
                endtime = query.get('end') or 'now'

        if unit:
            unit = unit[0].lower()
            page = query['page']
            starttime = f'end-{page + 1}{unit}'
            if not page:
                endtime = 'now'
            else:
                endtime = f'now-{page}{unit}'
        return starttime, endtime

    @accepts(
        List('graphs',
             items=[
                 Dict(
                     'graph',
                     Str('name', required=True),
                     Str('identifier', default=None, null=True),
                 ),
             ],
             empty=False),
        Dict(
            'reporting_query',
            Str('unit', enum=['HOUR', 'DAY', 'WEEK', 'MONTH', 'YEAR']),
            Int('page', default=0),
            Str('start', empty=False),
            Str('end', empty=False),
            Bool('aggregate', default=True),
            register=True,
        ))
    def get_data(self, graphs, query):
        """
        Get reporting data for given graphs.

        List of possible graphs can be retrieved using `reporting.graphs` call.

        For the time period of the graph either `unit` and `page` OR `start` and `end` should be
        used, not both.

        `aggregate` will return aggregate available data for each graph (e.g. min, max, mean).

        .. examples(websocket)::

          Get graph data of "nfsstat" from the last hour.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "reporting.get_data",
                "params": [
                    [{"name": "nfsstat"}],
                    {"unit": "HOURLY"},
                ]
            }

        """
        starttime, endtime = self.__rquery_to_start_end(query)
        rv = []
        for i in graphs:
            try:
                rrd = self.__rrds[i['name']]
            except KeyError:
                raise CallError(f'Graph {i["name"]!r} not found.',
                                errno.ENOENT)
            rv.append(
                rrd.export(i['identifier'],
                           starttime,
                           endtime,
                           aggregate=query['aggregate']))
        return rv

    @private
    @accepts(Ref('reporting_query'))
    def get_all(self, query):
        starttime, endtime = self.__rquery_to_start_end(query)
        rv = []
        for rrd in self.__rrds.values():
            idents = rrd.get_identifiers()
            if idents is None:
                idents = [None]
            for ident in idents:
                rv.append(
                    rrd.export(ident,
                               starttime,
                               endtime,
                               aggregate=query['aggregate']))
        return rv

    @private
    def get_plugin_and_rrd_types(self, name_idents):
        rv = []
        for name, identifier in name_idents:
            rrd = self.__rrds[name]
            rv.append(((name, identifier), rrd.plugin,
                       rrd.get_rrd_types(identifier)))
        return rv
Exemplo n.º 19
0
class NFSService(SystemServiceService):
    class Config:
        service = "nfs"
        datastore_prefix = "nfs_srv_"
        datastore_extend = 'nfs.nfs_extend'

    @private
    def nfs_extend(self, nfs):
        nfs["userd_manage_gids"] = nfs.pop("16")
        return nfs

    @private
    def nfs_compress(self, nfs):
        nfs["16"] = nfs.pop("userd_manage_gids")
        return nfs

    @accepts(
        Dict('nfs_update',
             Int('servers', validators=[Range(min=1, max=256)]),
             Bool('udp'),
             Bool('allow_nonroot'),
             Bool('v4'),
             Bool('v4_v3owner'),
             Bool('v4_krb'),
             List('bindip', items=[IPAddr('ip')]),
             Int('mountd_port',
                 null=True,
                 validators=[Range(min=1, max=65535)]),
             Int('rpcstatd_port',
                 null=True,
                 validators=[Range(min=1, max=65535)]),
             Int('rpclockd_port',
                 null=True,
                 validators=[Range(min=1, max=65535)]),
             Bool('userd_manage_gids'),
             Bool('mountd_log'),
             Bool('statd_lockd_log'),
             update=True))
    async def do_update(self, data):
        """
        Update NFS Service Configuration.

        `servers` represents number of servers to create.

        When `allow_nonroot` is set, it allows non-root mount requests to be served.

        `bindip` is a list of IP's on which NFS will listen for requests. When it is unset/empty, NFS listens on
        all available addresses.

        `v4` when set means that we switch from NFSv3 to NFSv4.

        `v4_v3owner` when set means that system will use NFSv3 ownership model for NFSv4.

        `mountd_port` specifies the port mountd(8) binds to.

        `rpcstatd_port` specifies the port rpc.statd(8) binds to.

        `rpclockd_port` specifies the port rpclockd_port(8) binds to.

        .. examples(websocket)::

          Update NFS Service Configuration to listen on 192.168.0.10 and use NFSv4

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "pool.resilver.update",
                "params": [{
                    "bindip": [
                        "192.168.0.10"
                    ],
                    "v4": true
                }]
            }
        """
        if data.get("v4") is False:
            data.setdefault("v4_v3owner", False)

        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        if new["v4"] and new["v4_krb"] and not await self.middleware.call(
                "system.is_freenas"):
            if await self.middleware.call("failover.licensed"):
                gc = await self.middleware.call("datastore.config",
                                                "network.globalconfiguration")
                if not gc["gc_hostname_virtual"] or gc["gc_domain"]:
                    verrors.add(
                        "nfs_update.v4",
                        "Enabling kerberos authentication on TrueNAS HA requires setting the virtual hostname and "
                        "domain")

        if not new["v4"] and new["v4_v3owner"]:
            verrors.add("nfs_update.v4_v3owner",
                        "This option requires enabling NFSv4")

        if new["v4_v3owner"] and new["userd_manage_gids"]:
            verrors.add(
                "nfs_update.userd_manage_gids",
                "This option is incompatible with NFSv3 ownership model for NFSv4"
            )

        if verrors:
            raise verrors

        self.nfs_compress(new)

        await self._update_service(old, new)

        self.nfs_extend(new)

        return new
Exemplo n.º 20
0
class OpenVPNServerService(SystemServiceService):
    class Config:
        namespace = 'openvpn.server'
        service = 'openvpn_server'
        service_model = 'openvpnserver'
        service_verb = 'restart'
        datastore_extend = 'openvpn.server.server_extend'

    @private
    async def server_extend(self, data):
        data['server_certificate'] = None if not data[
            'server_certificate'] else data['server_certificate']['id']
        data[
            'root_ca'] = None if not data['root_ca'] else data['root_ca']['id']
        data['tls_crypt_auth_enabled'] = bool(data['tls_crypt_auth'])
        return data

    @private
    async def config_valid(self):
        config = await self.config()
        if not config['root_ca']:
            raise CallError('Please configure root_ca first.')
        else:
            if not await self.middleware.call(
                    'certificateauthority.query',
                [['id', '=', config['root_ca']], ['revoked', '=', False]]):
                raise CallError(
                    'Root CA has been revoked. Please select another Root CA.')

        if not config['server_certificate']:
            raise CallError('Please configure server certificate first.')
        else:
            if not await self.middleware.call('certificate.query', [[
                    'id', '=', config['server_certificate']
            ], ['revoked', '=', False]]):
                raise CallError(
                    'Server certificate has been revoked. Please select another Server certificate.'
                )

        if not await self.validate_nobind(config):
            raise CallError(
                'Please enable "nobind" on OpenVPN Client to concurrently run OpenVPN Server/Client '
                'on the same local port without any issues.')

    @accepts()
    async def authentication_algorithm_choices(self):
        """
        Returns a dictionary of valid authentication algorithms which can be used with OpenVPN server.
        """
        return OpenVPN.digests()

    @accepts()
    async def cipher_choices(self):
        """
        Returns a dictionary of valid ciphers which can be used with OpenVPN server.
        """
        return OpenVPN.ciphers()

    @private
    async def validate(self, data, schema_name):
        verrors, data = await OpenVPN.common_validation(
            self.middleware, data, schema_name, 'server')

        if not await self.validate_nobind(data):
            verrors.add(
                f'{schema_name}.nobind',
                'Please enable "nobind" on OpenVPN Client to concurrently run OpenVPN Server/Client '
                'on the same local port without any issues.')

        if ipaddress.ip_address(
                data['server']).version == 4 and data['netmask'] > 32:
            verrors.add(
                f'{schema_name}.netmask',
                'For IPv4 server addresses please provide a netmask value from 0-32.'
            )

        verrors.check()

        return data

    @private
    async def validate_nobind(self, config):
        client_config = await self.middleware.call('openvpn.client.config')
        if (await self.middleware.call('service.started', 'openvpn_client')
                and config['port'] == client_config['port']
                and not client_config['nobind']):
            return False
        else:
            return True

    @private
    async def generate_static_key(self):
        keyfile = tempfile.NamedTemporaryFile(mode='w+', dir='/tmp/')
        await run(['openvpn', '--genkey', '--secret', keyfile.name])
        keyfile.seek(0)
        key = keyfile.read()
        keyfile.close()
        return key.strip()

    @accepts()
    async def renew_static_key(self):
        """
        Reset OpenVPN server's TLS static key which will be used to encrypt/authenticate control channel packets.
        """
        return await self.update({
            'tls_crypt_auth': (await self.generate_static_key()),
            'tls_crypt_auth_enabled':
            True
        })

    @accepts(Int('client_certificate_id'), Str('server_address', null=True))
    async def client_configuration_generation(self,
                                              client_certificate_id,
                                              server_address=None):
        """
        Returns a configuration for OpenVPN client which can be used with any client to connect to FN/TN OpenVPN
        server.

        `client_certificate_id` should be a valid certificate issued for use with OpenVPN client service.

        `server_address` if specified auto-fills the remote directive in the OpenVPN configuration enabling the end
        user to use the file without making any edits to connect to OpenVPN server.
        """
        await self.config_valid()
        config = await self.config()
        root_ca = await self.middleware.call('certificateauthority.query',
                                             [['id', '=', config['root_ca']]],
                                             {'get': True})
        client_cert = await self.middleware.call(
            'certificate.query',
            [['id', '=', client_certificate_id], ['revoked', '=', False]])
        if not client_cert:
            raise CallError(
                'Please provide a client certificate id for a certificate which exists on '
                'the system and hasn\'t been marked as revoked.')
        else:
            client_cert = client_cert[0]
            if (await OpenVPN.common_validation(
                    self.middleware, {
                        **config, 'client_certificate': client_certificate_id
                    }, '', 'client'))[0]:
                raise CallError(
                    'Please ensure provided client certificate exists in Root CA chain '
                    'and has necessary extensions set.')

        client_config = [
            'client',
            f'dev {config["device_type"].lower()}',
            f'proto {config["protocol"].lower()}',
            f'port {config["port"]}',
            f'remote "{server_address or "PLEASE FILL OUT SERVER DOMAIN/IP HERE"}"',
            'user nobody',
            'group nobody',
            'persist-key',
            'persist-tun',
            '<ca>',
            f'{root_ca["certificate"]}',
            '</ca>',
            '<cert>',
            client_cert['certificate'],
            '</cert>',
            '<key>',
            client_cert['privatekey'],
            '</key>',
            'verb 3',
            'remote-cert-tls server',
            f'compress {config["compression"].lower()}'
            if config['compression'] else None,
            f'auth {config["authentication_algorithm"]}'
            if config['authentication_algorithm'] else None,
            f'cipher {config["cipher"]}' if config['cipher'] else None,
        ]

        if config['tls_crypt_auth_enabled']:
            client_config.extend(
                ['<tls-crypt>', config['tls_crypt_auth'], '</tls-crypt>'])

        return '\n'.join(filter(bool, client_config)).strip()

    @accepts(
        Dict('openvpn_server_update',
             Bool('tls_crypt_auth_enabled'),
             Int('netmask', validators=[Range(min=0, max=128)]),
             Int('server_certificate'),
             Int('port', validators=[Port()]),
             Int('root_ca'),
             IPAddr('server'),
             Str('additional_parameters'),
             Str('authentication_algorithm', null=True),
             Str('cipher', null=True),
             Str('compression', null=True, enum=['LZO', 'LZ4']),
             Str('device_type', enum=['TUN', 'TAP']),
             Str('protocol', enum=PROTOCOLS),
             Str('tls_crypt_auth', null=True),
             Str('topology', null=True, enum=['NET30', 'P2P', 'SUBNET']),
             update=True))
    async def do_update(self, data):
        """
        Update OpenVPN Server configuration.

        When `tls_crypt_auth_enabled` is enabled and `tls_crypt_auth` not provided, a static key is automatically
        generated to be used with OpenVPN server.
        """
        old_config = await self.config()
        config = old_config.copy()

        config.update(data)

        # If tls_crypt_auth_enabled is set and we don't have a tls_crypt_auth key,
        # let's generate one please
        if config['tls_crypt_auth_enabled'] and not config['tls_crypt_auth']:
            config['tls_crypt_auth'] = await self.generate_static_key()

        config = await self.validate(config, 'openvpn_server_update')

        await self._update_service(old_config, config)

        return await self.config()
Exemplo n.º 21
0
class NFSService(SystemServiceService):
    class Config:
        service = "nfs"
        datastore_prefix = "nfs_srv_"
        datastore_extend = 'nfs.nfs_extend'

    @private
    def nfs_extend(self, nfs):
        nfs["userd_manage_gids"] = nfs.pop("16")
        return nfs

    @private
    def nfs_compress(self, nfs):
        nfs["16"] = nfs.pop("userd_manage_gids")
        return nfs

    @accepts(
        Dict('nfs_update',
             Int('servers', validators=[Range(min=1, max=256)]),
             Bool('udp'),
             Bool('allow_nonroot'),
             Bool('v4'),
             Bool('v4_v3owner'),
             Bool('v4_krb'),
             List('bindip', items=[IPAddr('ip')]),
             Int('mountd_port',
                 null=True,
                 validators=[Range(min=1, max=65535)]),
             Int('rpcstatd_port',
                 null=True,
                 validators=[Range(min=1, max=65535)]),
             Int('rpclockd_port',
                 null=True,
                 validators=[Range(min=1, max=65535)]),
             Bool('userd_manage_gids'),
             Bool('mountd_log'),
             Bool('statd_lockd_log'),
             update=True))
    async def do_update(self, data):
        if data.get("v4") is False:
            data.setdefault("v4_v3owner", False)

        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        if not new["v4"] and new["v4_v3owner"]:
            verrors.add("nfs_update.v4_v3owner",
                        "This option requires enabling NFSv4")

        if new["v4_v3owner"] and new["userd_manage_gids"]:
            verrors.add(
                "nfs_update.userd_manage_gids",
                "This option is incompatible with NFSv3 ownership model for NFSv4"
            )

        if verrors:
            raise verrors

        self.nfs_compress(new)

        await self._update_service(old, new)

        self.nfs_extend(new)

        return new
Exemplo n.º 22
0
class S3Service(SystemServiceService):
    class Config:
        service = "s3"
        datastore_prefix = "s3_"
        datastore_extend = "s3.config_extend"

    @private
    async def config_extend(self, s3):
        s3['storage_path'] = s3.pop('disks', None)
        s3.pop('mode', None)
        if s3.get('certificate'):
            s3['certificate'] = s3['certificate']['id']
        return s3

    @accepts(
        Dict(
            's3_update',
            Str('bindip'),
            Int('bindport', validators=[Range(min=1, max=65535)]),
            Str('access_key', validators=[Match("^\w+$")]),
            Str('secret_key', validators=[Match("^\w+$")]),
            Bool('browser'),
            Str('storage_path'),
            Int('certificate', null=True),
            update=True,
        ))
    async def do_update(self, data):
        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        for attr, minlen, maxlen in (
            ('access_key', 5, 20),
            ('secret_key', 8, 40),
        ):
            curlen = len(new.get(attr, ''))
            if curlen < minlen or curlen > maxlen:
                verrors.add(
                    f's3_update.{attr}',
                    f'Attribute should be {minlen} to {maxlen} in length')

        if not new['storage_path']:
            verrors.add('s3_update.storage_path', 'Storage path is required')
        else:
            await check_path_resides_within_volume(verrors, self.middleware,
                                                   's3_update.storage_path',
                                                   new['storage_path'])

            if not verrors:
                if new['storage_path'].rstrip('/').count('/') < 3:
                    verrors.add(
                        's3_update.storage_path',
                        'Top level datasets are not allowed. i.e /mnt/tank/dataset is allowed'
                    )
                else:
                    # If the storage_path does not exist, let's create it
                    if not os.path.exists(new['storage_path']):
                        os.makedirs(new['storage_path'])

        if new['certificate']:
            verrors.extend(
                (await
                 self.middleware.call('certificate.cert_services_validation',
                                      new['certificate'],
                                      's3_update.certificate', False)))

        if verrors:
            raise verrors

        new['disks'] = new.pop('storage_path')

        await self._update_service(old, new)

        if (await self.middleware.call('filesystem.stat',
                                       new['disks']))['user'] != 'minio':
            await self.middleware.call('notifier.winacl_reset', new['disks'],
                                       'minio', 'minio')

        return await self.config()
Exemplo n.º 23
0
class ISCSIPortalService(CRUDService):

    class Config:
        datastore = 'services.iscsitargetportal'
        datastore_extend = 'iscsi.portal.config_extend'
        datastore_prefix = 'iscsi_target_portal_'
        namespace = 'iscsi.portal'

    @private
    async def config_extend(self, data):
        data['listen'] = []
        for portalip in await self.middleware.call(
            'datastore.query',
            'services.iscsitargetportalip',
            [('portal', '=', data['id'])],
            {'prefix': 'iscsi_target_portalip_'}
        ):
            data['listen'].append({
                'ip': portalip['ip'],
                'port': portalip['port'],
            })
        data['discovery_authmethod'] = AUTHMETHOD_LEGACY_MAP.get(
            data.pop('discoveryauthmethod')
        )
        data['discovery_authgroup'] = data.pop('discoveryauthgroup')
        return data

    async def __validate(self, verrors, data, schema, old=None):
        if not data['listen']:
            verrors.add(f'{schema}.listen', 'At least one listen entry is required.')
        else:
            system_ips = [
                ip['address'] for ip in await self.middleware.call('interface.ip_in_use')
            ]
            system_ips.extend(['0.0.0.0', '::'])
            new_ips = set(i['ip'] for i in data['listen']) - set(i['ip'] for i in old['listen']) if old else set()
            for i in data['listen']:
                filters = [
                    ('iscsi_target_portalip_ip', '=', i['ip']),
                    ('iscsi_target_portalip_port', '=', i['port']),
                ]
                if schema == 'iscsiportal_update':
                    filters.append(('iscsi_target_portalip_portal', '!=', data['id']))
                if await self.middleware.call(
                    'datastore.query', 'services.iscsitargetportalip', filters
                ):
                    verrors.add(f'{schema}.listen', f'{i["ip"]}:{i["port"]} already in use.')

                if (
                    (i['ip'] in new_ips or not new_ips) and
                    i['ip'] not in system_ips
                ):
                    verrors.add(f'{schema}.listen', f'IP {i["ip"]} not configured on this system.')

        if data['discovery_authgroup']:
            if not await self.middleware.call(
                'datastore.query', 'services.iscsitargetauthcredential',
                [('iscsi_target_auth_tag', '=', data['discovery_authgroup'])]
            ):
                verrors.add(
                    f'{schema}.discovery_authgroup',
                    f'Auth Group "{data["discovery_authgroup"]}" not found.',
                    errno.ENOENT,
                )
        elif data['discovery_authmethod'] in ('CHAP', 'CHAP_MUTUAL'):
            verrors.add(f'{schema}.discovery_authgroup', 'This field is required if discovery method is '
                                                         'set to CHAP or CHAP Mutual.')

    @accepts(Dict(
        'iscsiportal_create',
        Str('comment'),
        Str('discovery_authmethod', default='NONE', enum=['NONE', 'CHAP', 'CHAP_MUTUAL']),
        Int('discovery_authgroup', default=None, null=True),
        List('listen', required=True, items=[
            Dict(
                'listen',
                IPAddr('ip', required=True),
                Int('port', default=3260, validators=[Range(min=1, max=65535)]),
            ),
        ], default=[]),
        register=True,
    ))
    async def do_create(self, data):
        """
        Create a new iSCSI Portal.

        `discovery_authgroup` is required for CHAP and CHAP_MUTUAL.
        """
        verrors = ValidationErrors()
        await self.__validate(verrors, data, 'iscsiportal_create')
        if verrors:
            raise verrors

        # tag attribute increments sequentially
        data['tag'] = (await self.middleware.call(
            'datastore.query', self._config.datastore, [], {'count': True}
        )) + 1

        listen = data.pop('listen')
        data['discoveryauthgroup'] = data.pop('discovery_authgroup', None)
        data['discoveryauthmethod'] = AUTHMETHOD_LEGACY_MAP.inv.get(data.pop('discovery_authmethod'), 'None')
        pk = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix}
        )
        try:
            await self.__save_listen(pk, listen)
        except Exception as e:
            await self.middleware.call('datastore.delete', self._config.datastore, pk)
            raise e

        await self._service_change('iscsitarget', 'reload')

        return await self._get_instance(pk)

    async def __save_listen(self, pk, new, old=None):
        """
        Update database with a set new listen IP:PORT tuples.
        It will delete no longer existing addresses and add new ones.
        """
        new_listen_set = set([tuple(i.items()) for i in new])
        old_listen_set = set([tuple(i.items()) for i in old]) if old else set()
        for i in new_listen_set - old_listen_set:
            i = dict(i)
            await self.middleware.call(
                'datastore.insert',
                'services.iscsitargetportalip',
                {'portal': pk, 'ip': i['ip'], 'port': i['port']},
                {'prefix': 'iscsi_target_portalip_'}
            )

        for i in old_listen_set - new_listen_set:
            i = dict(i)
            portalip = await self.middleware.call(
                'datastore.query',
                'services.iscsitargetportalip',
                [('portal', '=', pk), ('ip', '=', i['ip']), ('port', '=', i['port'])],
                {'prefix': 'iscsi_target_portalip_'}
            )
            if portalip:
                await self.middleware.call(
                    'datastore.delete', 'services.iscsitargetportalip', portalip[0]['id']
                )

    @accepts(
        Int('id'),
        Patch(
            'iscsiportal_create',
            'iscsiportal_update',
            ('attr', {'update': True})
        )
    )
    async def do_update(self, pk, data):
        """
        Update iSCSI Portal `id`.
        """

        old = await self._get_instance(pk)

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()
        await self.__validate(verrors, new, 'iscsiportal_update', old)
        if verrors:
            raise verrors

        listen = new.pop('listen')
        new['discoveryauthgroup'] = new.pop('discovery_authgroup', None)
        new['discoveryauthmethod'] = AUTHMETHOD_LEGACY_MAP.inv.get(new.pop('discovery_authmethod'), 'None')

        await self.__save_listen(pk, listen, old['listen'])

        await self.middleware.call(
            'datastore.update', self._config.datastore, pk, new,
            {'prefix': self._config.datastore_prefix}
        )

        await self._service_change('iscsitarget', 'reload')

        return await self._get_instance(pk)

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete iSCSI Portal `id`.
        """
        result = await self.middleware.call('datastore.delete', self._config.datastore, id)

        for i, portal in enumerate(await self.middleware.call('iscsi.portal.query', [], {'order_by': ['tag']})):
            await self.middleware.call(
                'datastore.update', self._config.datastore, portal['id'], {'tag': i + 1},
                {'prefix': self._config.datastore_prefix}
            )

        await self._service_change('iscsitarget', 'reload')

        return result
Exemplo n.º 24
0
class NFSService(SystemServiceService):
    class Config:
        service = "nfs"
        service_verb = "restart"
        datastore_prefix = "nfs_srv_"
        datastore_extend = 'nfs.nfs_extend'

    @private
    def nfs_extend(self, nfs):
        nfs["userd_manage_gids"] = nfs.pop("16")
        return nfs

    @private
    def nfs_compress(self, nfs):
        nfs["16"] = nfs.pop("userd_manage_gids")
        return nfs

    @accepts(
        Dict('nfs_update',
             Int('servers', validators=[Range(min=1, max=256)]),
             Bool('udp'),
             Bool('allow_nonroot'),
             Bool('v4'),
             Bool('v4_v3owner'),
             Bool('v4_krb'),
             List('bindip', items=[IPAddr('ip')]),
             Int('mountd_port',
                 null=True,
                 validators=[Range(min=1, max=65535)]),
             Int('rpcstatd_port',
                 null=True,
                 validators=[Range(min=1, max=65535)]),
             Int('rpclockd_port',
                 null=True,
                 validators=[Range(min=1, max=65535)]),
             Bool('userd_manage_gids'),
             Bool('mountd_log'),
             Bool('statd_lockd_log'),
             update=True))
    async def do_update(self, data):
        """
        Update NFS Service Configuration.

        `servers` represents number of servers to create.

        When `allow_nonroot` is set, it allows non-root mount requests to be served.

        `bindip` is a list of IP's on which NFS will listen for requests. When it is unset/empty, NFS listens on
        all available addresses.

        `v4` when set means that we switch from NFSv3 to NFSv4.

        `v4_v3owner` when set means that system will use NFSv3 ownership model for NFSv4.

        `mountd_port` specifies the port mountd(8) binds to.

        `rpcstatd_port` specifies the port rpc.statd(8) binds to.

        `rpclockd_port` specifies the port rpclockd_port(8) binds to.

        .. examples(websocket)::

          Update NFS Service Configuration to listen on 192.168.0.10 and use NFSv4

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "pool.resilver.update",
                "params": [{
                    "bindip": [
                        "192.168.0.10"
                    ],
                    "v4": true
                }]
            }
        """
        if data.get("v4") is False:
            data.setdefault("v4_v3owner", False)

        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        if new["v4"] and new["v4_krb"] and not await self.middleware.call(
                "system.is_freenas"):
            if await self.middleware.call("failover.licensed"):
                gc = await self.middleware.call("datastore.config",
                                                "network.globalconfiguration")
                if not gc["gc_hostname_virtual"] or gc["gc_domain"]:
                    verrors.add(
                        "nfs_update.v4",
                        "Enabling kerberos authentication on TrueNAS HA requires setting the virtual hostname and "
                        "domain")

        if new["v4"] and new["v4_krb"] and await self.middleware.call(
                'activedirectory.get_state') != "DISABLED":
            """
            In environments with kerberized NFSv4 enabled, we need to tell winbindd to not prefix
            usernames with the short form of the AD domain. Directly update the db and regenerate
            the smb.conf to avoid having a service disruption due to restarting the samba server.
            """
            if await self.middleware.call('smb.get_smb_ha_mode') == 'LEGACY':
                raise ValidationError(
                    'nfs_update.v4_krb',
                    'Enabling kerberos authentication on TrueNAS HA requires '
                    'the system dataset to be located on a data pool.')
            ad = await self.middleware.call('activedirectory.config')
            await self.middleware.call('datastore.update',
                                       'directoryservice.activedirectory',
                                       ad['id'],
                                       {'ad_use_default_domain': True})
            await self.middleware.call('etc.generate', 'smb')
            await self.middleware.call('service.reload', 'cifs')

        if not new["v4"] and new["v4_v3owner"]:
            verrors.add("nfs_update.v4_v3owner",
                        "This option requires enabling NFSv4")

        if new["v4_v3owner"] and new["userd_manage_gids"]:
            verrors.add(
                "nfs_update.userd_manage_gids",
                "This option is incompatible with NFSv3 ownership model for NFSv4"
            )

        if verrors:
            raise verrors

        self.nfs_compress(new)

        await self._update_service(old, new)

        self.nfs_extend(new)

        return new

    @private
    def setup_v4(self):
        config = self.middleware.call_sync("nfs.config")

        if config["v4_krb"]:
            subprocess.run(["service", "gssd", "onerestart"],
                           stdout=subprocess.DEVNULL,
                           stderr=subprocess.DEVNULL)
        else:
            subprocess.run(["service", "gssd", "forcestop"],
                           stdout=subprocess.DEVNULL,
                           stderr=subprocess.DEVNULL)

        if config["v4"]:
            sysctl.filter("vfs.nfsd.server_max_nfsvers")[0].value = 4
            if config["v4_v3owner"]:
                # Per RFC7530, sending NFSv3 style UID/GIDs across the wire is now allowed
                # You must have both of these sysctl"s set to allow the desired functionality
                sysctl.filter("vfs.nfsd.enable_stringtouid")[0].value = 1
                sysctl.filter("vfs.nfs.enable_uidtostring")[0].value = 1
                subprocess.run(["service", "nfsuserd", "forcestop"],
                               stdout=subprocess.DEVNULL,
                               stderr=subprocess.DEVNULL)
            else:
                sysctl.filter("vfs.nfsd.enable_stringtouid")[0].value = 0
                sysctl.filter("vfs.nfs.enable_uidtostring")[0].value = 0
                subprocess.run(["service", "nfsuserd", "onerestart"],
                               stdout=subprocess.DEVNULL,
                               stderr=subprocess.DEVNULL)
        else:
            sysctl.filter("vfs.nfsd.server_max_nfsvers")[0].value = 3
            if config["userd_manage_gids"]:
                subprocess.run(["service", "nfsuserd", "onerestart"],
                               stdout=subprocess.DEVNULL,
                               stderr=subprocess.DEVNULL)
            else:
                subprocess.run(["service", "nfsuserd", "forcestop"],
                               stdout=subprocess.DEVNULL,
                               stderr=subprocess.DEVNULL)
Exemplo n.º 25
0
class iSCSITargetExtentService(CRUDService):

    class Config:
        namespace = 'iscsi.extent'
        datastore = 'services.iscsitargetextent'
        datastore_prefix = 'iscsi_target_extent_'
        datastore_extend = 'iscsi.extent.extend'

    @accepts(Dict(
        'iscsi_extent_create',
        Str('name', required=True),
        Str('type', enum=['DISK', 'FILE'], default='DISK'),
        Str('disk', default=None, null=True),
        Str('serial', default=None, null=True),
        Str('path', default=None, null=True),
        Int('filesize', default=0),
        Int('blocksize', enum=[512, 1024, 2048, 4096], default=512),
        Bool('pblocksize'),
        Int('avail_threshold', validators=[Range(min=1, max=99)], null=True),
        Str('comment'),
        Bool('insecure_tpc', default=True),
        Bool('xen'),
        Str('rpm', enum=['UNKNOWN', 'SSD', '5400', '7200', '10000', '15000'],
            default='SSD'),
        Bool('ro'),
        register=True
    ))
    async def do_create(self, data):
        verrors = ValidationErrors()
        await self.compress(data)
        await self.validate(data)
        await self.clean(data, 'iscsi_extent_create', verrors)

        if verrors:
            raise verrors

        await self.save(data, 'iscsi_extent_create', verrors)

        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix}
        )

        return await self._get_instance(data['id'])

    @accepts(
        Int('id'),
        Patch(
            'iscsi_extent_create',
            'iscsi_extent_update',
            ('attr', {'update': True})
        )
    )
    async def do_update(self, id, data):
        verrors = ValidationErrors()
        old = await self._get_instance(id)

        new = old.copy()
        new.update(data)

        await self.compress(new)
        await self.validate(new)
        await self.clean(
            new, 'iscsi_extent_update', verrors, old=old
        )

        if verrors:
            raise verrors

        await self.save(new, 'iscsi_extent_update', verrors)

        await self.middleware.call(
            'datastore.update',
            self._config.datastore,
            id,
            new,
            {'prefix': self._config.datastore_prefix}
        )

        return await self._get_instance(id)

    @accepts(
        Int('id'),
        Bool('remove', default=False),
    )
    async def do_delete(self, id, remove):
        data = await self._get_instance(id)

        if remove:
            await self.compress(data)
            delete = await self.remove_extent_file(data)

            if delete is not True:
                raise CallError('Failed to remove extent file')

        for target_to_extent in await self.middleware.call('iscsi.targetextent.query', [['extent', '=', id]]):
            await self.middleware.call('iscsi.targetextent.delete', target_to_extent['id'])

        return await self.middleware.call(
            'datastore.delete', self._config.datastore, id
        )

    @private
    async def validate(self, data):
        data['serial'] = await self.extent_serial(data['serial'])
        data['naa'] = self.extent_naa(data.get('naa'))

    @private
    async def compress(self, data):
        extent_type = data['type']
        extent_rpm = data['rpm']

        if extent_type == 'DISK':
            extent_disk = data['disk']

            if extent_disk.startswith('zvol'):
                data['type'] = 'ZVOL'
            elif extent_disk.startswith('hast'):
                data['type'] = 'HAST'
            else:
                data['type'] = 'Disk'
        elif extent_type == 'FILE':
            data['type'] = 'File'

        if extent_rpm == 'UNKNOWN':
            data['rpm'] = 'Unknown'

        return data

    @private
    async def extend(self, data):
        extent_type = data['type'].upper()
        extent_rpm = data['rpm'].upper()

        if extent_type != 'FILE':
            # ZVOL and HAST are type DISK
            extent_type = 'DISK'
            # If extent is set to a disk ( not ZVOL and HAST ) - let's reflect this in the output

            disk = await self.middleware.call('disk.query', [['identifier', '=', data['path']]])
            if disk:
                data['disk'] = disk[0]['name']
        else:
            extent_size = data['filesize']

            # Legacy Compat for having 2[KB, MB, GB, etc] in database
            if not str(extent_size).isdigit():
                suffixes = {
                    'PB': 1125899906842624,
                    'TB': 1099511627776,
                    'GB': 1073741824,
                    'MB': 1048576,
                    'KB': 1024,
                    'B': 1
                }
                for x in suffixes.keys():
                    if str(extent_size).upper().endswith(x):
                        extent_size = str(extent_size).upper().strip(x)
                        extent_size = int(extent_size) * suffixes[x]

                        data['filesize'] = extent_size

        data['rpm'] = extent_rpm
        data['type'] = extent_type

        return data

    @private
    async def clean(self, data, schema_name, verrors, old=None):
        await self.clean_name(data, schema_name, verrors, old=old)
        await self.clean_type_and_path(data, schema_name, verrors)
        await self.clean_size(data, schema_name, verrors)

    @private
    async def clean_name(self, data, schema_name, verrors, old=None):
        name = data['name']
        old = old['name'] if old is not None else None
        serial = data['serial']
        name_filters = [('name', '=', name)]

        if '"' in name:
            verrors.add(f'{schema_name}.name', 'Double quotes are not allowed')

        if '"' in serial:
            verrors.add(f'{schema_name}.serial', 'Double quotes are not allowed')

        if name != old or old is None:
            name_result = await self.middleware.call(
                'datastore.query', self._config.datastore,
                name_filters,
                {'prefix': self._config.datastore_prefix})

            if name_result:
                verrors.add(f'{schema_name}.name',
                            'Extent name must be unique')

    @private
    async def clean_type_and_path(self, data, schema_name, verrors):
        extent_type = data['type']
        disk = data['disk']
        path = data['path']

        if extent_type is None:
            return data

        if extent_type == 'Disk':
            if not disk:
                verrors.add(f'{schema_name}.disk', 'This field is required')
        elif extent_type == 'ZVOL':
            if disk.startswith('zvol') and not os.path.exists(f'/dev/{disk}'):
                verrors.add(f'{schema_name}.disk',
                            f'ZVOL {disk} does not exist')
        elif extent_type == 'File':
            if not path:
                verrors.add(f'{schema_name}.path', 'This field is required')
                raise verrors  # They need this for anything else

            if '/iocage' in path:
                    verrors.add(
                        f'{schema_name}.path',
                        'You need to specify a filepath outside of a jail root'
                    )

            if (os.path.exists(path) and not
                    os.path.isfile(path)) or path[-1] == '/':
                verrors.add(f'{schema_name}.path',
                            'You need to specify a filepath not a directory')

            await check_path_resides_within_volume(
                verrors, self.middleware, f'{schema_name}.path', path
            )

        return data

    @private
    async def clean_size(self, data, schema_name, verrors):
        extent_type = data['type']
        path = data['path']
        size = data['filesize']
        blocksize = data['blocksize']

        if extent_type != 'FILE':
            return data

        if (
            size == 0 and path and (not os.path.exists(path) or (
                os.path.exists(path) and not
                os.path.isfile(path)
            ))
        ):
            verrors.add(
                f'{schema_name}.path',
                'The file must exist if the extent size is set to auto (0)')
        elif extent_type == 'FILE' and not path:
            verrors.add(f'{schema_name}.path', 'This field is required')

        if size and size != 0 and blocksize:
            if float(size) % blocksize:
                verrors.add(f'{schema_name}.filesize',
                            'File size must be a multiple of block size')

        return data

    @private
    async def extent_serial(self, serial):
        # TODO Just ported, let's do something different later? - Brandon
        if serial is None:
            try:
                nic = (await self.middleware.call('interface.query',
                                                  [['name', 'rnin', 'vlan'],
                                                   ['name', 'rnin', 'lagg'],
                                                   ['name', 'rnin', 'epair'],
                                                   ['name', 'rnin', 'vnet'],
                                                   ['name', 'rnin', 'bridge']])
                       )[0]
                mac = nic['link_address'].replace(':', '')

                ltg = await self.query()
                if len(ltg) > 0:
                    lid = ltg[0]['id']
                else:
                    lid = 0
                return f'{mac.strip()}{lid:02}'
            except Exception:
                return '10000001'
        else:
            return serial

    @private
    def extent_naa(self, naa):
        if naa is None:
            return '0x6589cfc000000' + hashlib.sha256(str(uuid.uuid4()).encode()).hexdigest()[0:19]
        else:
            return naa

    @accepts(List('exclude', default=[]))
    async def disk_choices(self, exclude):
        """
        Exclude will exclude the path from being in the used_zvols list,
        allowing the user to keep the same item on update
        """
        diskchoices = {}
        disk_query = await self.query([('type', '=', 'Disk')])

        diskids = [i['path'] for i in disk_query]
        used_disks = [d['name'] for d in await self.middleware.call(
            'disk.query', [('identifier', 'in', diskids)])]

        zvol_query_filters = [('type', '=', 'ZVOL')]

        for e in exclude:
            if e:
                zvol_query_filters.append(('path', '!=', e))

        zvol_query = await self.query(zvol_query_filters)

        used_zvols = [i['path'] for i in zvol_query]

        async for pdisk in await self.middleware.call('pool.get_disks'):
            used_disks.append(pdisk)

        zfs_snaps = await self.middleware.call(
            'zfs.snapshot.query', [], {'select': ['name'], 'order_by': ['name']}
        )

        zvols = await self.middleware.call(
            'pool.dataset.query',
            [('type', '=', 'VOLUME')]
        )

        zvol_list = [ds['name'] for ds in zvols]

        for zvol in zvols:
            zvol_name = zvol['name']
            zvol_size = zvol['volsize']['value']
            if f'zvol/{zvol_name}' not in used_zvols:
                diskchoices[f'zvol/{zvol_name}'] = f'{zvol_name} ({zvol_size})'

        for snap in zfs_snaps:
            ds_name, snap_name = snap['name'].rsplit('@', 1)
            if ds_name in zvol_list:
                diskchoices[f'zvol/{snap["name"]}'] = f'{snap["name"]} [ro]'

        notifier_disks = await self.middleware.call('notifier.get_disks')
        for name, disk in notifier_disks.items():
            if name in used_disks:
                continue
            size = await self.middleware.call('notifier.humanize_size',
                                              disk['capacity'])
            diskchoices[name] = f'{name} ({size})'

        return diskchoices

    @private
    async def save(self, data, schema_name, verrors):

        extent_type = data['type']
        disk = data.pop('disk', None)

        if extent_type == 'File':
            path = data['path']
            dirs = '/'.join(path.split('/')[:-1])

            if not os.path.exists(dirs):
                try:
                    os.makedirs(dirs)
                except Exception as e:
                    self.logger.error(
                        f'Unable to create dirs for extent file: {e}')

            if not os.path.exists(path):
                extent_size = data['filesize']

                await run(['truncate', '-s', str(extent_size), path])

            await self._service_change('iscsitarget', 'reload')
        else:
            data['path'] = disk

            if disk.startswith('multipath'):
                await self.middleware.call('disk.unlabel', disk)
                await self.middleware.call('disk.label', disk, f'extent_{disk}')
            elif not disk.startswith('hast') and not disk.startswith('zvol'):
                disk_filters = [('name', '=', disk), ('expiretime', '=', None)]
                try:
                    disk_object = (await self.middleware.call('disk.query',
                                                              disk_filters))[0]
                    disk_identifier = disk_object.get('identifier', None)
                    data['path'] = disk_identifier

                    if disk_identifier.startswith('{devicename}') or disk_identifier.startswith(
                        '{uuid}'
                    ):
                        try:
                            await self.middleware.call('disk.label', disk, f'extent_{disk}')
                        except Exception as e:
                            verrors.add(
                                f'{schema_name}.disk',
                                f'Serial not found and glabel failed for {disk}: {str(e)}'
                            )

                            if verrors:
                                raise verrors
                        await self.middleware.call(
                            'disk.sync', disk.replace('/dev/', '')
                        )
                except IndexError:
                    # It's not a disk, but a ZVOL
                    pass

    @private
    async def remove_extent_file(self, data):
        if data['type'] == 'File':
            try:
                os.unlink(data['path'])
            except Exception as e:
                return e

        return True
Exemplo n.º 26
0
class SMARTTestService(CRUDService):
    class Config:
        datastore = 'tasks.smarttest'
        datastore_extend = 'smart.test.smart_test_extend'
        datastore_prefix = 'smarttest_'
        namespace = 'smart.test'

    @private
    async def smart_test_extend(self, data):
        disks = data.pop('disks')
        data['disks'] = [disk['disk_identifier'] for disk in disks]
        test_type = {
            'L': 'LONG',
            'S': 'SHORT',
            'C': 'CONVEYANCE',
            'O': 'OFFLINE',
        }
        data['type'] = test_type[data.pop('type')]
        Cron.convert_db_format_to_schedule(data)
        return data

    @private
    async def validate_data(self, data, schema):
        verrors = ValidationErrors()

        smart_tests = await self.query(filters=[('type', '=', data['type'])])
        configured_disks = [d for test in smart_tests for d in test['disks']]
        disks_dict = {
            disk['identifier']: disk['name']
            for disk in (await self.middleware.call('disk.query'))
        }

        disks = data.get('disks')
        used_disks = []
        invalid_disks = []
        for disk in disks:
            if disk in configured_disks:
                used_disks.append(disks_dict[disk])
            if disk not in disks_dict.keys():
                invalid_disks.append(disk)

        if used_disks:
            verrors.add(
                f'{schema}.disks',
                f'The following disks already have tests for this type: {", ".join(used_disks)}'
            )

        if invalid_disks:
            verrors.add(
                f'{schema}.disks',
                f'The following disks are invalid: {", ".join(invalid_disks)}')

        return verrors

    @accepts(
        Dict('smart_task_create',
             Cron('schedule', exclude=['minute']),
             Str('desc'),
             Bool('all_disks', default=False),
             List('disks', items=[Str('disk')], default=[]),
             Str('type',
                 enum=['LONG', 'SHORT', 'CONVEYANCE', 'OFFLINE'],
                 required=True),
             register=True))
    async def do_create(self, data):
        """
        Create a SMART Test Task.

        `disks` is a list of valid disks which should be monitored in this task.

        `type` is specified to represent the type of SMART test to be executed.

        `all_disks` when enabled sets the task to cover all disks in which case `disks` is not required.

        .. examples(websocket)::

          Create a SMART Test Task which executes after every 30 minutes.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "smart.test.create",
                "params": [{
                    "schedule": {
                        "minute": "30",
                        "hour": "*",
                        "dom": "*",
                        "month": "*",
                        "dow": "*"
                    },
                    "all_disks": true,
                    "type": "OFFLINE",
                    "disks": []
                }]
            }
        """
        data['type'] = data.pop('type')[0]
        verrors = await self.validate_data(data, 'smart_test_create')

        if data['all_disks']:
            if data.get('disks'):
                verrors.add('smart_test_create.disks',
                            'This test is already enabled for all disks')
        else:
            if not data.get('disks'):
                verrors.add('smart_test_create.disks',
                            'This field is required')

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(data)

        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})

        asyncio.ensure_future(self._service_change('smartd', 'restart'))

        return data

    @accepts(Int('id', validators=[Range(min=1)]),
             Patch('smart_task_create', 'smart_task_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        """
        Update SMART Test Task of `id`.
        """
        old = await self.query(filters=[('id', '=', id)],
                               options={'get': True})
        new = old.copy()
        new.update(data)

        new['type'] = new.pop('type')[0]
        old['type'] = old.pop('type')[0]
        new_disks = [disk for disk in new['disks'] if disk not in old['disks']]
        deleted_disks = [
            disk for disk in old['disks'] if disk not in new['disks']
        ]
        if old['type'] == new['type']:
            new['disks'] = new_disks
        verrors = await self.validate_data(new, 'smart_test_update')

        new['disks'] = [
            disk for disk in chain(new_disks, old['disks'])
            if disk not in deleted_disks
        ]

        if new['all_disks']:
            if new.get('disks'):
                verrors.add('smart_test_update.disks',
                            'This test is already enabled for all disks')
        else:
            if not new.get('disks'):
                verrors.add('smart_test_update.disks',
                            'This field is required')

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(new)

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})

        asyncio.ensure_future(self._service_change('smartd', 'restart'))

        return await self.query(filters=[('id', '=', id)],
                                options={'get': True})

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete SMART Test Task of `id`.
        """
        response = await self.middleware.call('datastore.delete',
                                              self._config.datastore, id)

        asyncio.ensure_future(self._service_change('smartd', 'restart'))

        return response

    @filterable
    async def results(self, filters, options):
        """
        Get disk(s) S.M.A.R.T. test(s) results.

        .. examples(websocket)::

          Get all disks tests results

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "smart.test.results",
                "params": []
            }

            returns

            :::javascript

            [
              # ATA disk
              {
                "disk": "ada0",
                "tests": [
                  {
                    "num": 1,
                    "description": "Short offline",
                    "status": "SUCCESS",
                    "status_verbose": "Completed without error",
                    "remaining": 0.0,
                    "lifetime": 16590,
                    "lba_of_first_error": None,
                  }
                ]
              },
              # SCSI disk
              {
                "disk": "ada1",
                "tests": [
                  {
                    "num": 1,
                    "description": "Background long",
                    "status": "FAILED",
                    "status_verbose": "Completed, segment failed",
                    "segment_number": None,
                    "lifetime": 3943,
                    "lba_of_first_error": None,
                  }
                ]
              },
            ]

          Get specific disk test results

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "smart.test.results",
                "params": [
                  [["disk", "=", "ada0"]],
                  {"get": true}
                ]
            }

            returns

            :::javascript

            {
              "disk": "ada0",
              "tests": [
                {
                  "num": 1,
                  "description": "Short offline",
                  "status": "SUCCESS",
                  "status_verbose": "Completed without error",
                  "remaining": 0.0,
                  "lifetime": 16590,
                  "lba_of_first_error": None,
                }
              ]
            }
        """

        get = (options or {}).pop("get", False)

        disks = filter_list(
            [{
                "disk": disk["name"]
            } for disk in await self.middleware.call("disk.query")],
            filters,
            options,
        )

        devices = await camcontrol_list()
        return filter_list(
            list(
                filter(
                    None, await asyncio_map(
                        functools.partial(annotate_disk_smart_tests,
                                          self.middleware, devices), disks,
                        16))),
            [],
            {"get": get},
        )
Exemplo n.º 27
0
class ACLBase(ServicePartBase):

    @accepts(
        Dict(
            'filesystem_acl',
            Str('path', required=True),
            Int('uid', null=True, default=None, validators=[Range(min=-1, max=2147483647)]),
            Int('gid', null=True, default=None, validators=[Range(min=-1, max=2147483647)]),
            OROperator(
                List(
                    'nfs4_acl',
                    items=[Dict(
                        'nfs4_ace',
                        Str('tag', enum=['owner@', 'group@', 'everyone@', 'USER', 'GROUP']),
                        Int('id', null=True, validators=[Range(min=-1, max=2147483647)]),
                        Str('type', enum=['ALLOW', 'DENY']),
                        Dict(
                            'perms',
                            Bool('READ_DATA'),
                            Bool('WRITE_DATA'),
                            Bool('APPEND_DATA'),
                            Bool('READ_NAMED_ATTRS'),
                            Bool('WRITE_NAMED_ATTRS'),
                            Bool('EXECUTE'),
                            Bool('DELETE_CHILD'),
                            Bool('READ_ATTRIBUTES'),
                            Bool('WRITE_ATTRIBUTES'),
                            Bool('DELETE'),
                            Bool('READ_ACL'),
                            Bool('WRITE_ACL'),
                            Bool('WRITE_OWNER'),
                            Bool('SYNCHRONIZE'),
                            Str('BASIC', enum=['FULL_CONTROL', 'MODIFY', 'READ', 'TRAVERSE']),
                        ),
                        Dict(
                            'flags',
                            Bool('FILE_INHERIT'),
                            Bool('DIRECTORY_INHERIT'),
                            Bool('NO_PROPAGATE_INHERIT'),
                            Bool('INHERIT_ONLY'),
                            Bool('INHERITED'),
                            Str('BASIC', enum=['INHERIT', 'NOINHERIT']),
                        ),
                        register=True
                    )],
                    register=True
                ),
                List(
                    'posix1e_acl',
                    items=[Dict(
                        'posix1e_ace',
                        Bool('default', default=False),
                        Str('tag', enum=['USER_OBJ', 'GROUP_OBJ', 'USER', 'GROUP', 'OTHER', 'MASK']),
                        Int('id', default=-1, validators=[Range(min=-1, max=2147483647)]),
                        Dict(
                            'perms',
                            Bool('READ', default=False),
                            Bool('WRITE', default=False),
                            Bool('EXECUTE', default=False),
                        ),
                        register=True
                    )],
                    register=True
                ),
                name='dacl',
            ),
            Dict(
                'nfs41_flags',
                Bool('autoinherit', default=False),
                Bool('protected', default=False),
            ),
            Str('acltype', enum=[x.name for x in ACLType], null=True),
            Dict(
                'options',
                Bool('stripacl', default=False),
                Bool('recursive', default=False),
                Bool('traverse', default=False),
                Bool('canonicalize', default=True)
            )
        )
    )
    @returns()
    @job(lock="perm_change")
    def setacl(self, job, data):
        """
        Set ACL of a given path. Takes the following parameters:
        `path` full path to directory or file.

        Paths on clustered volumes may be specifed with the path prefix
        `CLUSTER:<volume name>`. For example, to list directories
        in the directory 'data' in the clustered volume `smb01`, the
        path should be specified as `CLUSTER:smb01/data`.

        `dacl` ACL entries. Formatting depends on the underlying `acltype`. NFS4ACL requires
        NFSv4 entries. POSIX1e requires POSIX1e entries.

        `uid` the desired UID of the file user. If set to None (the default), then user is not changed.

        `gid` the desired GID of the file group. If set to None (the default), then group is not changed.

        `recursive` apply the ACL recursively

        `traverse` traverse filestem boundaries (ZFS datasets)

        `strip` convert ACL to trivial. ACL is trivial if it can be expressed as a file mode without
        losing any access rules.

        `canonicalize` reorder ACL entries so that they are in concanical form as described
        in the Microsoft documentation MS-DTYP 2.4.5 (ACL). This only applies to NFSv4 ACLs.

        For case of NFSv4 ACLs  USER_OBJ, GROUP_OBJ, and EVERYONE with owner@, group@, everyone@ for
        consistency with getfacl and setfacl. If one of aforementioned special tags is used, 'id' must
        be set to None.

        An inheriting empty everyone@ ACE is appended to non-trivial ACLs in order to enforce Windows
        expectations regarding permissions inheritance. This entry is removed from NT ACL returned
        to SMB clients when 'ixnas' samba VFS module is enabled.
        """

    @accepts(
        Str('path'),
        Bool('simplified', default=True),
        Bool('resolve_ids', default=False),
    )
    @returns(Dict(
        'truenas_acl',
        Str('path'),
        Bool('trivial'),
        Str('acltype', enum=[x.name for x in ACLType], null=True),
        OROperator(
            Ref('nfs4_acl'),
            Ref('posix1e_acl'),
            name='acl'
        )
    ))
    def getacl(self, path, simplified, resolve_ids):
        """
        Return ACL of a given path. This may return a POSIX1e ACL or a NFSv4 ACL. The acl type is indicated
        by the `acltype` key.

        `simplified` - effect of this depends on ACL type on underlying filesystem. In the case of
        NFSv4 ACLs simplified permissions and flags are returned for ACL entries where applicable.
        NFSv4 errata below. In the case of POSIX1E ACls, this setting has no impact on returned ACL.

        `resolve_ids` - adds additional `who` key to each ACL entry, that converts the numeric id to
        a user name or group name. In the case of owner@ and group@ (NFSv4) or USER_OBJ and GROUP_OBJ
        (POSIX1E), st_uid or st_gid will be converted from stat() return for file. In the case of
        MASK (POSIX1E), OTHER (POSIX1E), everyone@ (NFSv4), key `who` will be included, but set to null.
        In case of failure to resolve the id to a name, `who` will be set to null. This option should
        only be used if resolving ids to names is required.

        Errata about ACLType NFSv4:

        `simplified` returns a shortened form of the ACL permset and flags where applicable. If permissions
        have been simplified, then the `perms` object will contain only a single `BASIC` key with a string
        describing the underlying permissions set.

        `TRAVERSE` sufficient rights to traverse a directory, but not read contents.

        `READ` sufficient rights to traverse a directory, and read file contents.

        `MODIFIY` sufficient rights to traverse, read, write, and modify a file.

        `FULL_CONTROL` all permissions.

        If the permisssions do not fit within one of the pre-defined simplified permissions types, then
        the full ACL entry will be returned.
        """

    @accepts(
        Dict(
            'filesystem_ownership',
            Str('path', required=True),
            Int('uid', null=True, default=None, validators=[Range(min=-1, max=2147483647)]),
            Int('gid', null=True, default=None, validators=[Range(min=-1, max=2147483647)]),
            Dict(
                'options',
                Bool('recursive', default=False),
                Bool('traverse', default=False)
            )
        )
    )
    @returns()
    @job(lock="perm_change")
    def chown(self, job, data):
        """
        Change owner or group of file at `path`.

        `uid` and `gid` specify new owner of the file. If either
        key is absent or None, then existing value on the file is not
        changed.

        `recursive` performs action recursively, but does
        not traverse filesystem mount points.

        If `traverse` and `recursive` are specified, then the chown
        operation will traverse filesystem mount points.
        """

    @accepts(
        Dict(
            'filesystem_permission',
            Str('path', required=True),
            UnixPerm('mode', null=True),
            Int('uid', null=True, default=None, validators=[Range(min=-1, max=2147483647)]),
            Int('gid', null=True, default=None, validators=[Range(min=-1, max=2147483647)]),
            Dict(
                'options',
                Bool('stripacl', default=False),
                Bool('recursive', default=False),
                Bool('traverse', default=False),
            )
        )
    )
    @returns()
    @job(lock="perm_change")
    def setperm(self, job, data):
        """
        Set unix permissions on given `path`.

        Paths on clustered volumes may be specifed with the path prefix
        `CLUSTER:<volume name>`. For example, to list directories
        in the directory 'data' in the clustered volume `smb01`, the
        path should be specified as `CLUSTER:smb01/data`.

        If `mode` is specified then the mode will be applied to the
        path and files and subdirectories depending on which `options` are
        selected. Mode should be formatted as string representation of octal
        permissions bits.

        `uid` the desired UID of the file user. If set to None (the default), then user is not changed.

        `gid` the desired GID of the file group. If set to None (the default), then group is not changed.

        `stripacl` setperm will fail if an extended ACL is present on `path`,
        unless `stripacl` is set to True.

        `recursive` remove ACLs recursively, but do not traverse dataset
        boundaries.

        `traverse` remove ACLs from child datasets.

        If no `mode` is set, and `stripacl` is True, then non-trivial ACLs
        will be converted to trivial ACLs. An ACL is trivial if it can be
        expressed as a file mode without losing any access rules.

        """

    @accepts(Str('path', required=False, default=''))
    @returns(List('acl_choices', items=[Str("choice")]))
    async def default_acl_choices(self, path):
        """
        `DEPRECATED`
        Returns list of names of ACL templates. Wrapper around
        filesystem.acltemplate.query.
        """

    @accepts(
        Str('acl_type', default='POSIX_OPEN'),
        Str('share_type', default='NONE', enum=['NONE', 'SMB', 'NFS']),
    )
    @returns(OROperator(Ref('nfs4_acl'), Ref('posix1e_acl'), name='acl'))
    async def get_default_acl(self, acl_type, share_type):
        """
Exemplo n.º 28
0
class CloudSyncService(TaskPathService):

    local_fs_lock_manager = FsLockManager()
    remote_fs_lock_manager = FsLockManager()
    share_task_type = 'CloudSync'

    class Config:
        datastore = "tasks.cloudsync"
        datastore_extend = "cloudsync.extend"
        cli_namespace = "task.cloud_sync"

    @filterable
    async def query(self, filters, options):
        """
        Query all Cloud Sync Tasks with `query-filters` and `query-options`.
        """
        tasks_or_task = await super().query(filters, options)

        jobs = {}
        for j in await self.middleware.call("core.get_jobs", [('OR', [("method", "=", "cloudsync.sync"),
                                                                      ("method", "=", "cloudsync.restore")])],
                                            {"order_by": ["id"]}):
            try:
                task_id = int(j["arguments"][0])
            except (IndexError, ValueError):
                continue

            if task_id in jobs and jobs[task_id]["state"] == "RUNNING":
                continue

            jobs[task_id] = j

        if isinstance(tasks_or_task, list):
            for task in tasks_or_task:
                task["job"] = jobs.get(task["id"])
        else:
            tasks_or_task["job"] = jobs.get(tasks_or_task["id"])

        return tasks_or_task

    @private
    async def extend(self, cloud_sync):
        cloud_sync["credentials"] = cloud_sync.pop("credential")

        Cron.convert_db_format_to_schedule(cloud_sync)

        return cloud_sync

    @private
    async def _compress(self, cloud_sync):
        cloud_sync["credential"] = cloud_sync.pop("credentials")

        Cron.convert_schedule_to_db_format(cloud_sync)

        cloud_sync.pop('job', None)
        cloud_sync.pop(self.locked_field, None)

        return cloud_sync

    @private
    async def _get_credentials(self, credentials_id):
        try:
            return await self.middleware.call("datastore.query", "system.cloudcredentials",
                                              [("id", "=", credentials_id)], {"get": True})
        except IndexError:
            return None

    @private
    async def _basic_validate(self, verrors, name, data):
        if data["encryption"]:
            if not data["encryption_password"]:
                verrors.add(f"{name}.encryption_password", "This field is required when encryption is enabled")

        credentials = await self._get_credentials(data["credentials"])
        if not credentials:
            verrors.add(f"{name}.credentials", "Invalid credentials")

        try:
            shlex.split(data["args"])
        except ValueError as e:
            verrors.add(f"{name}.args", f"Parse error: {e.args[0]}")

        if verrors:
            raise verrors

        provider = REMOTES[credentials["provider"]]

        schema = []

        if provider.buckets:
            schema.append(Str("bucket", required=True, empty=False))

        schema.append(Str("folder", required=True))

        schema.extend(provider.task_schema)

        schema.extend(self.common_task_schema(provider))

        attributes_verrors = validate_attributes(schema, data, additional_attrs=True)

        if not attributes_verrors:
            await provider.pre_save_task(data, credentials, verrors)

        verrors.add_child(f"{name}.attributes", attributes_verrors)

    @private
    async def _validate(self, verrors, name, data):
        await self._basic_validate(verrors, name, data)

        for i, (limit1, limit2) in enumerate(zip(data["bwlimit"], data["bwlimit"][1:])):
            if limit1["time"] >= limit2["time"]:
                verrors.add(f"{name}.bwlimit.{i + 1}.time", f"Invalid time order: {limit1['time']}, {limit2['time']}")

        await self.validate_path_field(data, name, verrors)

        if data["snapshot"]:
            if data["direction"] != "PUSH":
                verrors.add(f"{name}.snapshot", "This option can only be enabled for PUSH tasks")
            if data["transfer_mode"] == "MOVE":
                verrors.add(f"{name}.snapshot", "This option can not be used for MOVE transfer mode")
            if await self.middleware.call("pool.dataset.query",
                                          [["name", "^", os.path.relpath(data["path"], "/mnt") + "/"],
                                           ["type", "=", "FILESYSTEM"]]):
                verrors.add(f"{name}.snapshot", "This option is only available for datasets that have no further "
                                                "nesting")

    @private
    async def _validate_folder(self, verrors, name, data):
        if data["direction"] == "PULL":
            folder = data["attributes"]["folder"].rstrip("/")
            if folder:
                folder_parent = os.path.normpath(os.path.join(folder, ".."))
                if folder_parent == ".":
                    folder_parent = ""
                folder_basename = os.path.basename(folder)
                ls = await self.list_directory(dict(
                    credentials=data["credentials"],
                    encryption=data["encryption"],
                    filename_encryption=data["filename_encryption"],
                    encryption_password=data["encryption_password"],
                    encryption_salt=data["encryption_salt"],
                    attributes=dict(data["attributes"], folder=folder_parent),
                    args=data["args"],
                ))
                for item in ls:
                    if item["Name"] == folder_basename:
                        if not item["IsDir"]:
                            verrors.add(f"{name}.attributes.folder", "This is not a directory")
                        break
                else:
                    verrors.add(f"{name}.attributes.folder", "Directory does not exist")

        if data["direction"] == "PUSH":
            credentials = await self._get_credentials(data["credentials"])

            provider = REMOTES[credentials["provider"]]

            if provider.readonly:
                verrors.add(f"{name}.direction", "This remote is read-only")

    @accepts(Dict(
        "cloud_sync_create",
        Str("description", default=""),
        Str("direction", enum=["PUSH", "PULL"], required=True),
        Str("transfer_mode", enum=["SYNC", "COPY", "MOVE"], required=True),
        Str("path", required=True),
        Int("credentials", required=True),
        Bool("encryption", default=False),
        Bool("filename_encryption", default=False),
        Str("encryption_password", default=""),
        Str("encryption_salt", default=""),
        Cron(
            "schedule",
            defaults={"minute": "00"},
            required=True
        ),
        Bool("follow_symlinks", default=False),
        Int("transfers", null=True, default=None, validators=[Range(min=1)]),
        List("bwlimit", items=[Dict("cloud_sync_bwlimit",
                                    Str("time", validators=[Time()]),
                                    Int("bandwidth", validators=[Range(min=1)], null=True))]),
        List("exclude", items=[Str("path", empty=False)]),
        Dict("attributes", additional_attrs=True, required=True),
        Bool("snapshot", default=False),
        Str("pre_script", default="", max_length=None),
        Str("post_script", default="", max_length=None),
        Str("args", default="", max_length=None),
        Bool("enabled", default=True),
        register=True,
    ))
    async def do_create(self, cloud_sync):
        """
        Creates a new cloud_sync entry.

        .. examples(websocket)::

          Create a new cloud_sync using amazon s3 attributes, which is supposed to run every hour.

            :::javascript
            {
              "id": "6841f242-840a-11e6-a437-00e04d680384",
              "msg": "method",
              "method": "cloudsync.create",
              "params": [{
                "description": "s3 sync",
                "path": "/mnt/tank",
                "credentials": 1,
                "minute": "00",
                "hour": "*",
                "daymonth": "*",
                "month": "*",
                "attributes": {
                  "bucket": "mybucket",
                  "folder": ""
                },
                "enabled": true
              }]
            }
        """

        verrors = ValidationErrors()

        await self._validate(verrors, "cloud_sync", cloud_sync)

        if verrors:
            raise verrors

        await self._validate_folder(verrors, "cloud_sync", cloud_sync)

        if verrors:
            raise verrors

        cloud_sync = await self._compress(cloud_sync)

        cloud_sync["id"] = await self.middleware.call("datastore.insert", "tasks.cloudsync", cloud_sync)
        await self.middleware.call("service.restart", "cron")

        cloud_sync = await self.extend(cloud_sync)
        return cloud_sync

    @accepts(Int("id"), Patch("cloud_sync_create", "cloud_sync_update", ("attr", {"update": True})))
    async def do_update(self, id, data):
        """
        Updates the cloud_sync entry `id` with `data`.
        """
        cloud_sync = await self.get_instance(id)

        # credentials is a foreign key for now
        if cloud_sync["credentials"]:
            cloud_sync["credentials"] = cloud_sync["credentials"]["id"]

        cloud_sync.update(data)

        verrors = ValidationErrors()

        await self._validate(verrors, "cloud_sync_update", cloud_sync)

        if verrors:
            raise verrors

        await self._validate_folder(verrors, "cloud_sync_update", cloud_sync)

        if verrors:
            raise verrors

        cloud_sync = await self._compress(cloud_sync)

        await self.middleware.call("datastore.update", "tasks.cloudsync", id, cloud_sync)
        await self.middleware.call("service.restart", "cron")

        return await self.get_instance(id)

    @accepts(Int("id"))
    async def do_delete(self, id):
        """
        Deletes cloud_sync entry `id`.
        """
        await self.middleware.call("cloudsync.abort", id)
        await self.middleware.call("datastore.delete", "tasks.cloudsync", id)
        await self.middleware.call("alert.oneshot_delete", "CloudSyncTaskFailed", id)
        await self.middleware.call("service.restart", "cron")

    @accepts(Int("credentials_id"))
    async def list_buckets(self, credentials_id):
        credentials = await self._get_credentials(credentials_id)
        if not credentials:
            raise CallError("Invalid credentials")

        provider = REMOTES[credentials["provider"]]

        if not provider.buckets:
            raise CallError("This provider does not use buckets")

        return await self.ls({"credentials": credentials}, "")

    @accepts(Dict(
        "cloud_sync_ls",
        Int("credentials", required=True),
        Bool("encryption", default=False),
        Bool("filename_encryption", default=False),
        Str("encryption_password", default=""),
        Str("encryption_salt", default=""),
        Dict("attributes", required=True, additional_attrs=True),
        Str("args", default=""),
    ))
    async def list_directory(self, cloud_sync):
        """
        List contents of a remote bucket / directory.

        If remote supports buckets, path is constructed by two keys "bucket"/"folder" in `attributes`.
        If remote does not support buckets, path is constructed using "folder" key only in `attributes`.
        "folder" is directory name and "bucket" is bucket name for remote.

        Path examples:

        S3 Service
        `bucketname/directory/name`

        Dropbox Service
        `directory/name`

        `credentials` is a valid id of a Cloud Sync Credential which will be used to connect to the provider.
        """
        verrors = ValidationErrors()

        await self._basic_validate(verrors, "cloud_sync", dict(cloud_sync))

        if verrors:
            raise verrors

        credentials = await self._get_credentials(cloud_sync["credentials"])

        path = get_remote_path(REMOTES[credentials["provider"]], cloud_sync["attributes"])

        return await self.ls(dict(cloud_sync, credentials=credentials), path)

    @private
    async def ls(self, config, path):
        await self.middleware.call("network.general.will_perform_activity", "cloud_sync")

        decrypt_filenames = config.get("encryption") and config.get("filename_encryption")
        async with RcloneConfig(config) as config:
            proc = await run(["rclone", "--config", config.config_path, "lsjson", "remote:" + path],
                             check=False, encoding="utf8", errors="ignore")
            if proc.returncode == 0:
                result = json.loads(proc.stdout)

                if decrypt_filenames:
                    if result:
                        decrypted_names = {}
                        proc = await run((["rclone", "--config", config.config_path, "cryptdecode", "encrypted:"] +
                                         [item["Name"] for item in result]),
                                         check=False, encoding="utf8", errors="ignore")
                        for line in proc.stdout.splitlines():
                            try:
                                encrypted, decrypted = line.rstrip("\r\n").split(" \t ", 1)
                            except ValueError:
                                continue

                            if decrypted != "Failed to decrypt":
                                decrypted_names[encrypted] = decrypted

                        for item in result:
                            if item["Name"] in decrypted_names:
                                item["Decrypted"] = decrypted_names[item["Name"]]

                return result
            else:
                raise CallError(proc.stderr, extra={"excerpt": lsjson_error_excerpt(proc.stderr)})

    @item_method
    @accepts(
        Int("id"),
        Dict(
            "cloud_sync_sync_options",
            Bool("dry_run", default=False),
            register=True,
        )
    )
    @job(lock=lambda args: "cloud_sync:{}".format(args[-1]), lock_queue_size=1, logs=True)
    async def sync(self, job, id, options):
        """
        Run the cloud_sync job `id`, syncing the local data to remote.
        """

        cloud_sync = await self.get_instance(id)
        if cloud_sync['locked']:
            await self.middleware.call('cloudsync.generate_locked_alert', id)
            return

        await self._sync(cloud_sync, options, job)

    @accepts(
        Patch("cloud_sync_create", "cloud_sync_sync_onetime"),
        Patch("cloud_sync_sync_options", "cloud_sync_sync_onetime_options"),
    )
    @job(logs=True)
    async def sync_onetime(self, job, cloud_sync, options):
        """
        Run cloud sync task without creating it.
        """
        verrors = ValidationErrors()

        await self._validate(verrors, "cloud_sync_sync_onetime", cloud_sync)

        if verrors:
            raise verrors

        await self._validate_folder(verrors, "cloud_sync_sync_onetime", cloud_sync)

        if verrors:
            raise verrors

        cloud_sync["credentials"] = await self._get_credentials(cloud_sync["credentials"])

        await self._sync(cloud_sync, options, job)

    async def _sync(self, cloud_sync, options, job):
        credentials = cloud_sync["credentials"]

        local_path = cloud_sync["path"]
        local_direction = FsLockDirection.READ if cloud_sync["direction"] == "PUSH" else FsLockDirection.WRITE

        remote_path = get_remote_path(REMOTES[credentials["provider"]], cloud_sync["attributes"])
        remote_direction = FsLockDirection.READ if cloud_sync["direction"] == "PULL" else FsLockDirection.WRITE

        directions = {
            FsLockDirection.READ: "reading",
            FsLockDirection.WRITE: "writing",
        }

        job.set_progress(0, f"Locking local path {local_path!r} for {directions[local_direction]}")
        async with self.local_fs_lock_manager.lock(local_path, local_direction):
            job.set_progress(0, f"Locking remote path {remote_path!r} for {directions[remote_direction]}")
            async with self.remote_fs_lock_manager.lock(f"{credentials['id']}/{remote_path}", remote_direction):
                job.set_progress(0, "Starting")
                try:
                    await rclone(self.middleware, job, cloud_sync, options["dry_run"])
                    if "id" in cloud_sync:
                        await self.middleware.call("alert.oneshot_delete", "CloudSyncTaskFailed", cloud_sync["id"])
                except Exception:
                    if "id" in cloud_sync:
                        await self.middleware.call("alert.oneshot_create", "CloudSyncTaskFailed", {
                            "id": cloud_sync["id"],
                            "name": cloud_sync["description"],
                        })
                    raise

    @item_method
    @accepts(Int("id"))
    async def abort(self, id):
        """
        Aborts cloud sync task.
        """

        cloud_sync = await self._get_instance(id)

        if cloud_sync["job"] is None:
            return False

        if cloud_sync["job"]["state"] not in ["WAITING", "RUNNING"]:
            return False

        await self.middleware.call("core.job_abort", cloud_sync["job"]["id"])
        return True

    @accepts()
    async def providers(self):
        """
        Returns a list of dictionaries of supported providers for Cloud Sync Tasks.

        `credentials_schema` is JSON schema for credentials attributes.

        `task_schema` is JSON schema for task attributes.

        `buckets` is a boolean value which is set to "true" if provider supports buckets.

        Example of a single provider:

        [
            {
                "name": "AMAZON_CLOUD_DRIVE",
                "title": "Amazon Cloud Drive",
                "credentials_schema": [
                    {
                        "property": "client_id",
                        "schema": {
                            "title": "Amazon Application Client ID",
                            "_required_": true,
                            "type": "string"
                        }
                    },
                    {
                        "property": "client_secret",
                        "schema": {
                            "title": "Application Key",
                            "_required_": true,
                            "type": "string"
                        }
                    }
                ],
                "credentials_oauth": null,
                "buckets": false,
                "bucket_title": "Bucket",
                "task_schema": []
            }
        ]
        """
        return sorted(
            [
                {
                    "name": provider.name,
                    "title": provider.title,
                    "credentials_schema": [
                        {
                            "property": field.name,
                            "schema": field.to_json_schema()
                        }
                        for field in provider.credentials_schema
                    ],
                    "credentials_oauth": (
                        f"{OAUTH_URL}/{(provider.credentials_oauth_name or provider.name.lower())}"
                        if provider.credentials_oauth else None
                    ),
                    "buckets": provider.buckets,
                    "bucket_title": provider.bucket_title,
                    "task_schema": [
                        {
                            "property": field.name,
                            "schema": field.to_json_schema()
                        }
                        for field in provider.task_schema + self.common_task_schema(provider)
                    ],
                }
                for provider in REMOTES.values()
            ],
            key=lambda provider: provider["title"].lower()
        )

    def common_task_schema(self, provider):
        schema = []

        if provider.fast_list:
            schema.append(Bool("fast_list", default=False, title="Use --fast-list", description=textwrap.dedent("""\
                Use fewer transactions in exchange for more RAM. This may also speed up or slow down your
                transfer. See [rclone documentation](https://rclone.org/docs/#fast-list) for more details.
            """).rstrip()))

        return schema
Exemplo n.º 29
0
class UPSService(SystemServiceService):
    try:
        DRIVERS_AVAILABLE = set(os.listdir(DRIVER_BIN_DIR))
    except FileNotFoundError:
        DRIVERS_AVAILABLE = set()

    class Config:
        datastore = 'services.ups'
        datastore_prefix = 'ups_'
        datastore_extend = 'ups.ups_config_extend'
        service = 'ups'
        service_verb = 'restart'
        cli_namespace = 'service.ups'

    @private
    async def ups_config_extend(self, data):
        data['mode'] = data['mode'].upper()
        data['shutdown'] = data['shutdown'].upper()
        data['toemail'] = [v for v in data['toemail'].split(';') if v]
        host = 'localhost' if data['mode'] == 'MASTER' else data['remotehost']
        data[
            'complete_identifier'] = f'{data["identifier"]}@{host}:{data["remoteport"]}'
        return data

    @accepts()
    async def port_choices(self):
        ports = [x for x in glob.glob('/dev/cua*') if x.find('.') == -1]
        ports.extend(glob.glob('/dev/ugen*'))
        ports.extend(glob.glob('/dev/uhid*'))
        ports.append('auto')
        return ports

    @accepts()
    def driver_choices(self):
        """
        Returns choices of UPS drivers supported by the system.
        """
        ups_choices = {}
        if osc.IS_LINUX:
            driver_list = '/usr/share/nut/driver.list'
        else:
            driver_list = '/conf/base/etc/local/nut/driver.list'
        if os.path.exists(driver_list):
            with open(driver_list, 'rb') as f:
                d = f.read().decode('utf8', 'ignore')
            r = io.StringIO()
            for line in re.sub(r'[ \t]+', ' ', d, flags=re.M).split('\n'):
                r.write(line.strip() + '\n')
            r.seek(0)
            reader = csv.reader(r, delimiter=' ', quotechar='"')
            for row in reader:
                if len(row) == 0 or row[0].startswith('#'):
                    continue
                if row[-2] == '#':
                    last = -3
                else:
                    last = -1
                driver_str = row[last]
                driver_annotation = ''
                m = re.match(r'(.+) \((.+)\)',
                             driver_str)  # "blazer_usb (USB ID 0665:5161)"
                if m:
                    driver_str, driver_annotation = m.group(1), m.group(2)
                for driver in driver_str.split(
                        ' or '):  # can be "blazer_ser or blazer_usb"
                    driver = driver.strip()
                    if driver not in self.DRIVERS_AVAILABLE:
                        continue
                    for i, field in enumerate(list(row)):
                        row[i] = field
                    ups_choices['$'.join(
                        [driver, row[3]])] = '%s (%s)' % (' '.join(
                            filter(None, row[0:last])), ', '.join(
                                filter(None, [driver, driver_annotation])))
        return ups_choices

    @private
    async def validate_data(self, data, schema):
        verrors = ValidationErrors()

        driver = data.get('driver')
        if driver:
            if driver not in (
                    await self.middleware.call('ups.driver_choices')).keys():
                verrors.add(
                    f'{schema}.driver',
                    'Driver selected does not match local machine\'s driver list'
                )

        identifier = data['identifier']
        if identifier:
            if not re.search(r'^[a-z0-9\.\-_]+$', identifier, re.I):
                verrors.add(f'{schema}.identifier',
                            'Use alphanumeric characters, ".", "-" and "_"')

        for field in ['monpwd', 'monuser']:
            if not data.get(field):
                verrors.add(f'{schema}.{field}', 'This field is required.')
            elif re.search(r'[ #]', data[field], re.I):
                verrors.add(f'{schema}.{field}',
                            'Spaces or number signs are not allowed.')

        mode = data.get('mode')
        if mode == 'MASTER':
            for field in filter(lambda f: not data[f], ['port', 'driver']):
                verrors.add(f'{schema}.{field}', 'This field is required')
        else:
            if not data.get('remotehost'):
                verrors.add(f'{schema}.remotehost', 'This field is required')

        to_emails = data.get('toemail')
        if to_emails:
            data['toemail'] = ';'.join(to_emails)
        else:
            data['toemail'] = ''

        data['mode'] = data['mode'].lower()
        data['shutdown'] = data['shutdown'].lower()

        return verrors, data

    @accepts(
        Dict('ups_update',
             Bool('emailnotify'),
             Bool('powerdown'),
             Bool('rmonitor'),
             Int('nocommwarntime', null=True),
             Int('remoteport', validators=[Port()]),
             Int('shutdowntimer'),
             Int('hostsync', validators=[Range(min=0)]),
             Str('description'),
             Str('driver'),
             Str('extrausers', max_length=None),
             Str('identifier', empty=False),
             Str('mode', enum=['MASTER', 'SLAVE']),
             Str('monpwd', empty=False),
             Str('monuser', empty=False),
             Str('options', max_length=None),
             Str('optionsupsd', max_length=None),
             Str('port'),
             Str('remotehost'),
             Str('shutdown', enum=['LOWBATT', 'BATT']),
             Str('shutdowncmd', null=True),
             Str('subject'),
             List('toemail', items=[Str('email', validators=[Email()])]),
             update=True))
    async def do_update(self, data):
        """
        Update UPS Service Configuration.

        `emailnotify` when enabled, sends out notifications of different UPS events via email.

        `powerdown` when enabled, sets UPS to power off after shutting down the system.

        `nocommwarntime` is a value in seconds which makes UPS Service wait the specified seconds before alerting that
        the Service cannot reach configured UPS.

        `shutdowntimer` is a value in seconds which tells the Service to wait specified seconds for the UPS before
        initiating a shutdown. This only applies when `shutdown` is set to "BATT".

        `shutdowncmd` is the command which is executed to initiate a shutdown. It defaults to "poweroff".

        `toemail` is a list of valid email id's on which notification emails are sent.
        """
        config = await self.config()
        config.pop('complete_identifier')
        old_config = config.copy()
        config.update(data)
        verros, config = await self.validate_data(config, 'ups_update')
        if verros:
            raise verros

        old_config['mode'] = old_config['mode'].lower()
        old_config['shutdown'] = old_config['shutdown'].lower()
        old_config['toemail'] = ';'.join(
            old_config['toemail']) if old_config['toemail'] else ''

        if len(set(old_config.items()) ^ set(config.items())) > 0:
            if config['identifier'] != old_config['identifier']:
                await self.dismiss_alerts()

            await self._update_service(old_config, config)

        return await self.config()

    @private
    async def alerts_mapping(self):
        return {
            'LOWBATT': 'UPSBatteryLow',
            'COMMBAD': 'UPSCommbad',
            'COMMOK': 'UPSCommok',
            'ONBATT': 'UPSOnBattery',
            'ONLINE': 'UPSOnline',
            'REPLBATT': 'UPSReplbatt'
        }

    @private
    async def dismiss_alerts(self):
        config = await self.config()

        for alert in (await self.alerts_mapping()).values():
            await self.middleware.call('alert.oneshot_delete', alert,
                                       {'ups': config['identifier']})

    @private
    @accepts(Str('notify_type'))
    async def upssched_event(self, notify_type):
        config = await self.config()
        upsc_identifier = config['complete_identifier']
        cp = await run('upsc', upsc_identifier, check=False)
        if cp.returncode:
            stats_output = ''
            self.logger.error('Failed to retrieve ups information: %s',
                              cp.stderr.decode())
        else:
            stats_output = cp.stdout.decode()

        if RE_TEST_IN_PROGRESS.search(stats_output):
            self.logger.debug(
                'Self test is in progress and %r notify event should be ignored',
                notify_type)
            return

        if notify_type.lower() == 'shutdown':
            # Before we start FSD with upsmon, lets ensure that ups is not ONLINE (OL).
            # There are cases where battery/charger issues can result in ups.status being "OL LB" at the
            # same time. This will ensure that we don't initiate a shutdown if ups is OL.
            ups_status = RE_UPS_STATUS.findall(stats_output)
            if ups_status and 'ol' in ups_status[0].lower():
                self.middleware.logger.debug(
                    f'Shutdown not initiated as ups.status ({ups_status[0]}) indicates '
                    f'{config["identifier"]} is ONLINE (OL).')
            else:
                # if we shutdown the active node while the passive is still online
                # then we're just going to cause a failover event. Shut the passive down
                # first and then shut the active node down
                if await self.middleware.call('failover.licensed'):
                    if await self.middleware.call('failover.status'
                                                  ) == 'MASTER':
                        syslog.syslog(
                            syslog.LOG_NOTICE,
                            'upssched-cmd "issuing shutdown" for passive node')
                        try:
                            await self.middleware.call('failover.call_remote',
                                                       'ups.upssched_event',
                                                       'shutdown')
                        except Exception as e:
                            syslog.syslog(
                                syslog.LOG_ERROR,
                                f'failed shutting down passive node with error {e}'
                            )

                syslog.syslog(syslog.LOG_NOTICE,
                              'upssched-cmd "issuing shutdown"')
                await run('upsmon', '-c', 'fsd', check=False)

        elif 'notify' in notify_type.lower():
            # notify_type is expected to be of the following format
            # NOTIFY-EVENT i.e NOTIFY-LOWBATT
            notify_type = notify_type.split('-')[-1]

            # We would like to send alerts for the following events
            alert_mapping = await self.alerts_mapping()

            await self.dismiss_alerts()

            if notify_type in alert_mapping:
                await self.middleware.call('alert.oneshot_create',
                                           alert_mapping[notify_type],
                                           {'ups': config['identifier']})

            if config['emailnotify']:
                # Email user with the notification event and details
                # We send the email in the following format ( inclusive line breaks )

                # NOTIFICATION: 'LOWBATT'
                # UPS: 'ups'
                #
                # Statistics recovered:
                #
                # 1) Battery charge (percent)
                # battery.charge: 5
                #
                # 2) Remaining battery level when UPS switches to LB (percent)
                # battery.charge.low: 10
                #
                # 3) Battery runtime (seconds)
                # battery.runtime: 1860
                #
                # 4) Remaining battery runtime when UPS switches to LB (seconds)
                # battery.runtime.low: 900

                ups_name = config['identifier']
                hostname = socket.gethostname()
                current_time = datetime.datetime.now(tz=dateutil.tz.tzlocal(
                )).strftime('%a %b %d %H:%M:%S %Z %Y')
                ups_subject = config['subject'].replace('%d',
                                                        current_time).replace(
                                                            '%h', hostname)
                body = f'NOTIFICATION: {notify_type!r}\n\nUPS: {ups_name!r}\n\n'

                # Let's gather following stats
                data_points = {
                    'battery.charge':
                    'Battery charge (percent)',
                    'battery.charge.low':
                    'Battery level remaining (percent) when UPS switches to Low Battery (LB)',
                    'battery.charge.status':
                    'Battery charge status',
                    'battery.runtime':
                    'Battery runtime (seconds)',
                    'battery.runtime.low':
                    'Battery runtime remaining (seconds) when UPS switches to Low Battery (LB)',
                    'battery.runtime.restart':
                    'Minimum battery runtime (seconds) to allow UPS restart after power-off',
                }

                stats_output = (await run('upsc', upsc_identifier,
                                          check=False)).stdout
                recovered_stats = re.findall(
                    fr'({"|".join(data_points)}): (.*)',
                    '' if not stats_output else stats_output.decode())

                if recovered_stats:
                    body += 'Statistics recovered:\n\n'
                    # recovered_stats is expected to be a list in this format
                    # [('battery.charge', '5'), ('battery.charge.low', '10'), ('battery.runtime', '1860')]
                    for index, stat in enumerate(recovered_stats):
                        body += f'{index + 1}) {data_points[stat[0]]}\n  {stat[0]}: {stat[1]}\n\n'

                else:
                    body += 'Statistics could not be recovered\n'

                # Subject and body defined, send email
                job = await self.middleware.call('mail.send', {
                    'subject': ups_subject,
                    'text': body,
                    'to': config['toemail']
                })

                await job.wait()
                if job.error:
                    self.middleware.logger.debug(
                        f'Failed to send UPS status email: {job.error}')

        else:
            self.middleware.logger.debug(
                f'Unrecognized UPS notification event: {notify_type}')
Exemplo n.º 30
0
class SytemAdvancedService(ConfigService):
    class Config:
        datastore = 'system.advanced'
        datastore_prefix = 'adv_'
        datastore_extend = 'system.advanced.system_advanced_extend'
        namespace = 'system.advanced'

    @accepts()
    async def serial_port_choices(self):
        """
        Get available choices for `serialport` attribute in `system.advanced.update`.
        """
        if (not await self.middleware.call('system.is_freenas') and await
                self.middleware.call('failover.hardware') == 'ECHOSTREAM'):
            ports = {'0x3f8': '0x3f8'}
        else:
            pipe = await Popen(
                "/usr/sbin/devinfo -u | grep -A 99999 '^I/O ports:' | "
                "sed -En 's/ *([0-9a-fA-Fx]+).*\(uart[0-9]+\)/\\1/p'",
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                shell=True)
            ports = {
                y: y
                for y in (await pipe.communicate()
                          )[0].decode().strip().strip('\n').split('\n') if y
            }

        if not ports or (await self.config())['serialport'] == '0x2f8':
            # We should always add 0x2f8 if ports is false or current value is the default one in db
            # i.e 0x2f8
            ports['0x2f8'] = '0x2f8'

        return ports

    @private
    async def system_advanced_extend(self, data):

        if data.get('sed_user'):
            data['sed_user'] = data.get('sed_user').upper()

        return data

    async def __validate_fields(self, schema, data):
        verrors = ValidationErrors()

        user = data.get('periodic_notifyuser')
        if user:
            if not (await self.middleware.call('notifier.get_user_object',
                                               user)):
                verrors.add(f'{schema}.periodic_notifyuser',
                            'Specified user does not exist')

        serial_choice = data.get('serialport')
        if data.get('serialconsole'):

            if not serial_choice:
                verrors.add(
                    f'{schema}.serialport',
                    'Please specify a serial port when serial console option is checked'
                )
            elif serial_choice not in await self.serial_port_choices():
                verrors.add(
                    f'{schema}.serialport',
                    'Serial port specified has not been identified by the system'
                )

        elif not serial_choice:
            # TODO: THIS CHECK CAN BE REMOVED WHEN WE DISALLOW NONE VALUES IN THE SCHEMA LAYER

            verrors.add(f'{schema}.serialport',
                        'Empty serial port is not allowed')

        return verrors, data

    @accepts(
        Dict('system_advanced_update',
             Bool('advancedmode'),
             Bool('autotune'),
             Int('boot_scrub', validators=[Range(min=1)]),
             Bool('consolemenu'),
             Bool('consolemsg'),
             Bool('debugkernel'),
             Bool('fqdn_syslog'),
             Str('motd'),
             Str('periodic_notifyuser'),
             Bool('powerdaemon'),
             Bool('serialconsole'),
             Str('serialport'),
             Str('serialspeed',
                 enum=['9600', '19200', '38400', '57600', '115200']),
             Int('swapondrive', validators=[Range(min=0)]),
             Bool('traceback'),
             Bool('uploadcrash'),
             Bool('anonstats'),
             Str('sed_user', enum=['USER', 'MASTER']),
             Str('sed_passwd', private=True),
             update=True))
    async def do_update(self, data):
        config_data = await self.config()
        original_data = config_data.copy()
        config_data.update(data)

        verrors, config_data = await self.__validate_fields(
            'advanced_settings_update', config_data)
        if verrors:
            raise verrors

        if len(set(config_data.items()) ^ set(original_data.items())) > 0:
            if original_data.get('sed_user'):
                original_data['sed_user'] = original_data['sed_user'].lower()
            if config_data.get('sed_user'):
                config_data['sed_user'] = config_data['sed_user'].lower()

            # PASSWORD ENCRYPTION FOR SED IS BEING DONE IN THE MODEL ITSELF

            await self.middleware.call(
                'datastore.update', self._config.datastore, config_data['id'],
                config_data, {'prefix': self._config.datastore_prefix})

            if original_data['boot_scrub'] != config_data['boot_scrub']:
                await self.middleware.call('service.restart', 'cron')

            loader_reloaded = False
            if original_data['motd'] != config_data['motd']:
                await self.middleware.call('service.start', 'motd',
                                           {'onetime': False})

            if original_data['consolemenu'] != config_data['consolemenu']:
                await self.middleware.call('service.start', 'ttys',
                                           {'onetime': False})

            if original_data['powerdaemon'] != config_data['powerdaemon']:
                await self.middleware.call('service.restart', 'powerd',
                                           {'onetime': False})

            if original_data['serialconsole'] != config_data['serialconsole']:
                await self.middleware.call('service.start', 'ttys',
                                           {'onetime': False})
                if not loader_reloaded:
                    await self.middleware.call('service.reload', 'loader',
                                               {'onetime': False})
                    loader_reloaded = True
            elif (original_data['serialspeed'] != config_data['serialspeed']
                  or original_data['serialport'] != config_data['serialport']):
                if not loader_reloaded:
                    await self.middleware.call('service.reload', 'loader',
                                               {'onetime': False})
                    loader_reloaded = True

            if original_data['autotune'] != config_data['autotune']:
                if not loader_reloaded:
                    await self.middleware.call('service.reload', 'loader',
                                               {'onetime': False})
                    loader_reloaded = True
                await self.middleware.call('system.advanced.autotune',
                                           'loader')
                await self.middleware.call('system.advanced.autotune',
                                           'sysctl')

            if (original_data['debugkernel'] != config_data['debugkernel']
                    and not loader_reloaded):
                await self.middleware.call('service.reload', 'loader',
                                           {'onetime': False})

            if original_data['periodic_notifyuser'] != config_data[
                    'periodic_notifyuser']:
                await self.middleware.call('service.start', 'ix-periodic',
                                           {'onetime': False})

            if original_data['fqdn_syslog'] != config_data['fqdn_syslog']:
                await self.middleware.call('service.restart', 'syslogd',
                                           {'onetime': False})

        return await self.config()

    @private
    def autotune(self, conf='loader'):
        if self.middleware.call_sync('system.is_freenas'):
            kernel_reserved = 1073741824
            userland_reserved = 2417483648
        else:
            kernel_reserved = 6442450944
            userland_reserved = 4831838208
        cp = subprocess.run([
            'autotune', '-o', f'--kernel-reserved={kernel_reserved}',
            f'--userland-reserved={userland_reserved}', '--conf', conf
        ],
                            capture_output=True)
        return cp.returncode