Exemple #1
0
class TFTPService(SystemServiceService):

    class Config:
        service = "tftp"
        datastore_prefix = "tftp_"

    @accepts(Dict(
        'tftp_update',
        Bool('newfiles'),
        Dir('directory'),
        Str('host', validators=[IpAddress()]),
        Int('port', validators=[Port()]),
        Str('options'),
        Str('umask', validators=[Match(r"^[0-7]{3}$")]),
        Str('username'),
        update=True
    ))
    async def do_update(self, data):
        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        if new["directory"]:
            await check_path_resides_within_volume(verrors, self.middleware, "tftp_update.directory", new["directory"])

        if verrors:
            raise verrors

        await self._update_service(old, new)

        return await self.config()
Exemple #2
0
class TFTPService(SystemServiceService):
    class Config:
        service = "tftp"
        datastore_prefix = "tftp_"
        cli_namespace = "service.tftp"

    ENTRY = Dict(
        'tftp_entry',
        Bool('newfiles', required=True),
        Str('directory', required=True),
        Str('host', validators=[IpAddress()], required=True),
        Int('port', validators=[Port()], required=True),
        Str('options', required=True),
        Str('umask', required=True, validators=[Match(r'^[0-7]{3}$')]),
        Str('username', required=True),
        Int('id', required=True),
    )

    @accepts(
        Patch(
            'tftp_entry',
            'tftp_update',
            ('rm', {
                'name': 'id'
            }),
            ('replace', Dir('directory')),
            ('attr', {
                'update': True
            }),
        ))
    async def do_update(self, data):
        """
        Update TFTP Service Configuration.

        `newfiles` when set enables network devices to send files to the system.

        `username` sets the user account which will be used to access `directory`. It should be ensured `username`
        has access to `directory`.
        """
        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        if new["directory"]:
            await check_path_resides_within_volume(verrors, self.middleware,
                                                   "tftp_update.directory",
                                                   new["directory"])

        if verrors:
            raise verrors

        await self._update_service(old, new)

        return await self.config()
Exemple #3
0
class OpenVPNClientService(SystemServiceService):
    class Config:
        namespace = 'openvpn.client'
        service = 'openvpn_client'
        service_model = 'openvpnclient'
        service_verb = 'restart'
        datastore_extend = 'openvpn.client.client_extend'

    @private
    async def client_extend(self, data):
        data.update({
            'client_certificate':
            None if not data['client_certificate'] else
            data['client_certificate']['id'],
            'root_ca':
            None if not data['root_ca'] else data['root_ca']['id'],
            'tls_crypt_auth_enabled':
            bool(data['tls_crypt_auth']),
            'interface':
            'openvpn-client',
        })
        return data

    @accepts()
    async def authentication_algorithm_choices(self):
        """
        Returns a dictionary of valid authentication algorithms which can be used with OpenVPN server.
        """
        return OpenVPN.digests()

    @accepts()
    async def cipher_choices(self):
        """
        Returns a dictionary of valid ciphers which can be used with OpenVPN server.
        """
        return OpenVPN.ciphers()

    @private
    async def validate(self, data, schema_name):
        verrors, data = await OpenVPN.common_validation(
            self.middleware, data, schema_name, 'client')

        if not data.get('remote'):
            verrors.add(f'{schema_name}.remote', 'This field is required.')

        if not await self.validate_nobind(data):
            verrors.add(
                f'{schema_name}.nobind',
                'Please enable this to concurrently run OpenVPN Server/Client on the same local port.'
            )

        verrors.check()

        return data

    @private
    async def validate_nobind(self, config):
        if (await self.middleware.call('service.started', 'openvpn_server')
                and config['port']
                == (await
                    self.middleware.call('openvpn.server.config'))['port']
                and not config['nobind']):
            return False
        else:
            return True

    @private
    async def config_valid(self):
        config = await self.config()
        if not config['root_ca']:
            raise CallError('Please configure root_ca first.')
        else:
            if not await self.middleware.call(
                    'certificateauthority.query',
                [['id', '=', config['root_ca']], ['revoked', '=', False]]):
                raise CallError(
                    'Root CA has been revoked. Please select another Root CA.')

        if not config['client_certificate']:
            raise CallError('Please configure client certificate first.')
        else:
            if not await self.middleware.call('certificate.query', [[
                    'id', '=', config['client_certificate']
            ], ['revoked', '=', False]]):
                raise CallError(
                    'Client certificate has been revoked. Please select another Client certificate.'
                )

        if not config['remote']:
            raise CallError('Please configure remote first.')

        if not await self.validate_nobind(config):
            raise CallError(
                'Please enable "nobind" to concurrently run OpenVPN Server/Client on the same local port.'
            )

    @accepts(
        Dict('openvpn_client_update',
             Bool('nobind'),
             Bool('tls_crypt_auth_enabled'),
             Int('client_certificate', null=True),
             Int('root_ca', null=True),
             Int('port', validators=[Port()]),
             Str('additional_parameters'),
             Str('authentication_algorithm', null=True),
             Str('cipher', null=True),
             Str('compression', null=True, enum=['LZO', 'LZ4']),
             Str('device_type', enum=['TUN', 'TAP']),
             Str('protocol', enum=PROTOCOLS),
             Str('remote'),
             Str('tls_crypt_auth', null=True),
             update=True))
    async def do_update(self, data):
        """
        Update OpenVPN Client configuration.

        `remote` can be a valid ip address / domain which openvpn will try to connect to.

        `nobind` must be enabled if OpenVPN client / server are to run concurrently.
        """
        old_config = await self.config()
        old_config.pop('interface')
        config = old_config.copy()

        config.update(data)

        config = await self.validate(config, 'openvpn_client_update')

        await self._update_service(old_config, config)

        return await self.config()
Exemple #4
0
class OpenVPNServerService(SystemServiceService):
    class Config:
        namespace = 'openvpn.server'
        service = 'openvpn_server'
        service_model = 'openvpnserver'
        service_verb = 'restart'
        datastore_extend = 'openvpn.server.server_extend'

    @private
    async def server_extend(self, data):
        data.update({
            'server_certificate':
            None if not data['server_certificate'] else
            data['server_certificate']['id'],
            'root_ca':
            None if not data['root_ca'] else data['root_ca']['id'],
            'tls_crypt_auth_enabled':
            bool(data['tls_crypt_auth']),
            'interface':
            'openvpn-server',
        })
        return data

    @private
    async def config_valid(self):
        config = await self.config()
        if not config['root_ca']:
            raise CallError('Please configure root_ca first.')
        else:
            if not await self.middleware.call(
                    'certificateauthority.query',
                [['id', '=', config['root_ca']], ['revoked', '=', False]]):
                raise CallError(
                    'Root CA has been revoked. Please select another Root CA.')

        if not config['server_certificate']:
            raise CallError('Please configure server certificate first.')
        else:
            if not await self.middleware.call('certificate.query', [[
                    'id', '=', config['server_certificate']
            ], ['revoked', '=', False]]):
                raise CallError(
                    'Server certificate has been revoked. Please select another Server certificate.'
                )

        if not await self.validate_nobind(config):
            raise CallError(
                'Please enable "nobind" on OpenVPN Client to concurrently run OpenVPN Server/Client '
                'on the same local port without any issues.')

    @accepts()
    async def authentication_algorithm_choices(self):
        """
        Returns a dictionary of valid authentication algorithms which can be used with OpenVPN server.
        """
        return OpenVPN.digests()

    @accepts()
    async def cipher_choices(self):
        """
        Returns a dictionary of valid ciphers which can be used with OpenVPN server.
        """
        return OpenVPN.ciphers()

    @private
    async def validate(self, data, schema_name):
        verrors, data = await OpenVPN.common_validation(
            self.middleware, data, schema_name, 'server')

        if not await self.validate_nobind(data):
            verrors.add(
                f'{schema_name}.nobind',
                'Please enable "nobind" on OpenVPN Client to concurrently run OpenVPN Server/Client '
                'on the same local port without any issues.')

        if ipaddress.ip_address(
                data['server']).version == 4 and data['netmask'] > 32:
            verrors.add(
                f'{schema_name}.netmask',
                'For IPv4 server addresses please provide a netmask value from 0-32.'
            )

        verrors.check()

        return data

    @private
    async def validate_nobind(self, config):
        client_config = await self.middleware.call('openvpn.client.config')
        if (await self.middleware.call('service.started', 'openvpn_client')
                and config['port'] == client_config['port']
                and not client_config['nobind']):
            return False
        else:
            return True

    @private
    async def generate_static_key(self):
        keyfile = tempfile.NamedTemporaryFile(mode='w+', dir='/tmp/')
        await run(['openvpn', '--genkey', '--secret', keyfile.name])
        keyfile.seek(0)
        key = keyfile.read()
        keyfile.close()
        return key.strip()

    @accepts()
    async def renew_static_key(self):
        """
        Reset OpenVPN server's TLS static key which will be used to encrypt/authenticate control channel packets.
        """
        return await self.update({
            'tls_crypt_auth': (await self.generate_static_key()),
            'tls_crypt_auth_enabled':
            True
        })

    @accepts(Int('client_certificate_id'), Str('server_address', null=True))
    async def client_configuration_generation(self,
                                              client_certificate_id,
                                              server_address=None):
        """
        Returns a configuration for OpenVPN client which can be used with any client to connect to FN/TN OpenVPN
        server.

        `client_certificate_id` should be a valid certificate issued for use with OpenVPN client service.

        `server_address` if specified auto-fills the remote directive in the OpenVPN configuration enabling the end
        user to use the file without making any edits to connect to OpenVPN server.
        """
        await self.config_valid()
        config = await self.config()
        root_ca = await self.middleware.call('certificateauthority.query',
                                             [['id', '=', config['root_ca']]],
                                             {'get': True})
        client_cert = await self.middleware.call(
            'certificate.query',
            [['id', '=', client_certificate_id], ['revoked', '=', False]])
        if not client_cert:
            raise CallError(
                'Please provide a client certificate id for a certificate which exists on '
                'the system and hasn\'t been marked as revoked.')
        else:
            client_cert = client_cert[0]
            if (await OpenVPN.common_validation(
                    self.middleware, {
                        **config, 'client_certificate': client_certificate_id
                    }, '', 'client'))[0]:
                raise CallError(
                    'Please ensure provided client certificate exists in Root CA chain '
                    'and has necessary extensions set.')

        client_config = [
            'client',
            f'dev {config["device_type"].lower()}',
            f'proto {config["protocol"].lower()}',
            f'port {config["port"]}',
            f'remote "{server_address or "PLEASE FILL OUT SERVER DOMAIN/IP HERE"}"',
            'user nobody',
            'group nobody',
            'persist-key',
            'persist-tun',
            '<ca>',
            f'{root_ca["certificate"]}',
            '</ca>',
            '<cert>',
            client_cert['certificate'],
            '</cert>',
            '<key>',
            client_cert['privatekey'],
            '</key>',
            'verb 3',
            'remote-cert-tls server',
            f'compress {config["compression"].lower()}'
            if config['compression'] else None,
            f'auth {config["authentication_algorithm"]}'
            if config['authentication_algorithm'] else None,
            f'cipher {config["cipher"]}' if config['cipher'] else None,
        ]

        if config['tls_crypt_auth_enabled']:
            client_config.extend(
                ['<tls-crypt>', config['tls_crypt_auth'], '</tls-crypt>'])

        return '\n'.join(filter(bool, client_config)).strip()

    @accepts(
        Dict('openvpn_server_update',
             Bool('tls_crypt_auth_enabled'),
             Int('netmask', validators=[Range(min=0, max=128)]),
             Int('server_certificate', null=True),
             Int('port', validators=[Port()]),
             Int('root_ca', null=True),
             IPAddr('server'),
             Str('additional_parameters'),
             Str('authentication_algorithm', null=True),
             Str('cipher', null=True),
             Str('compression', null=True, enum=['LZO', 'LZ4']),
             Str('device_type', enum=['TUN', 'TAP']),
             Str('protocol', enum=PROTOCOLS),
             Str('tls_crypt_auth', null=True),
             Str('topology', null=True, enum=['NET30', 'P2P', 'SUBNET']),
             update=True))
    async def do_update(self, data):
        """
        Update OpenVPN Server configuration.

        When `tls_crypt_auth_enabled` is enabled and `tls_crypt_auth` not provided, a static key is automatically
        generated to be used with OpenVPN server.
        """
        old_config = await self.config()
        old_config.pop('interface')
        config = old_config.copy()

        config.update(data)

        # If tls_crypt_auth_enabled is set and we don't have a tls_crypt_auth key,
        # let's generate one please
        if config['tls_crypt_auth_enabled'] and not config['tls_crypt_auth']:
            config['tls_crypt_auth'] = await self.generate_static_key()

        config = await self.validate(config, 'openvpn_server_update')

        await self._update_service(old_config, config)

        return await self.config()
Exemple #5
0
class VCenterService(ConfigService):

    PRIVATE_GROUP_NAME = 'iXSystems'

    class Config:
        datastore = 'vcp.vcenterconfiguration'
        datastore_prefix = 'vc_'
        datastore_extend = 'vcenter.vcenter_extend'

    @private
    async def common_validation(self, data, schema_name):
        verrors = ValidationErrors()

        ip = data.get('ip')
        if ip:
            await resolve_hostname(self.middleware, verrors,
                                   f'{schema_name}.ip', ip)

        management_ip = data.get('management_ip')
        if management_ip and management_ip not in (
                await self.get_management_ip_choices()):
            verrors.add(f'{schema_name}.management_ip',
                        'Please select a valid IP for your TrueNAS system')

        action = data.get('action')
        if action and action != 'UNINSTALL':
            if (not (await
                     self.middleware.call('vcenteraux.config'))['enable_https']
                    and (await self.middleware.call('system.general.config')
                         )['ui_httpsredirect']):
                verrors.add(f'{schema_name}.action',
                            'Please enable vCenter plugin over HTTPS')

        return verrors

    @private
    async def vcenter_extend(self, data):
        data['password'] = await self.middleware.call('notifier.pwenc_decrypt',
                                                      data['password'])
        data['port'] = int(
            data['port']) if data['port'] else 443  # Defaulting to 443
        return data

    @accepts(
        Dict(
            'vcenter_update_dict',
            Int('port', validators=[Port()]),
            Str('action',
                enum=['INSTALL', 'REPAIR', 'UNINSTALL', 'UPGRADE'],
                required=True),
            Str('management_ip'),
            Str('ip'),  # HOST IP
            Str('password', password=True),
            Str('username'),
        ))
    async def do_update(self, data):
        old = await self.config()
        new = old.copy()
        new.update(data)

        schema_name = 'vcenter_update'
        verrors = await self.common_validation(new, schema_name)
        if verrors:
            raise verrors

        action = new.pop('action')
        system_general = await self.middleware.call('system.general.config')
        ui_protocol = 'https' if system_general['ui_httpsredirect'] else 'http'
        ui_port = system_general['ui_port'] if ui_protocol.lower(
        ) != 'https' else system_general['ui_httpsport']
        fingerprint = await self.middleware.call(
            'certificate.get_host_certificates_thumbprint',
            new['management_ip'], new['port'])
        plugin_file_name = await self.middleware.run_in_thread(
            self.get_plugin_file_name)
        # TODO: URL will change once the plugin file's location is shifted
        management_addr = f'{ui_protocol}://{new["management_ip"]}:{ui_port}/legacy/static/{plugin_file_name}'

        install_dict = {
            'port': new['port'],
            'fingerprint': fingerprint,
            'management_ip': management_addr,
            'ip': new['ip'],
            'password': new['password'],
            'username': new['username']
        }

        if action == 'INSTALL':

            if new['installed']:
                verrors.add(f'{schema_name}.action',
                            'Plugin is already installed')
            else:

                for r_key in ('management_ip', 'ip', 'password', 'port',
                              'username'):
                    if not new[r_key]:
                        verrors.add(
                            f'{schema_name}.{r_key}',
                            'This field is required to install the plugin')

                if verrors:
                    raise verrors

                try:
                    await self.middleware.run_in_thread(
                        self.__install_vcenter_plugin, install_dict)
                except ValidationError as e:
                    verrors.add_validation_error(e)
                else:
                    new['version'] = await self.middleware.run_in_thread(
                        self.get_plugin_version)
                    new['installed'] = True

        elif action == 'REPAIR':

            if not new['installed']:
                verrors.add(
                    f'{schema_name}.action',
                    'Plugin is not installed. Please install it first')
            else:

                # FROM MY UNDERSTANDING REPAIR IS CALLED WHEN THE DATABASE APPARENTLY TELLS THAT THE PLUGIN IS PRESENT
                # BUT THE SYSTEM FAILS TO RECOGNIZE THE PLUGIN EXTENSION

                try:
                    credential_dict = install_dict.copy()
                    credential_dict.pop('management_ip')
                    credential_dict.pop('fingerprint')

                    found_plugin = await self.middleware.run_in_thread(
                        self._find_plugin, credential_dict)
                    if found_plugin:
                        verrors.add(f'{schema_name}.action',
                                    'Plugin repair is not required')
                except ValidationError as e:
                    verrors.add_validation_error(e)
                else:

                    if verrors:
                        raise verrors

                    try:
                        repair_dict = install_dict.copy()
                        repair_dict['install_mode'] = 'REPAIR'
                        await self.middleware.run_in_thread(
                            self.__install_vcenter_plugin, repair_dict)
                    except ValidationError as e:
                        verrors.add_validation_error(e)

        elif action == 'UNINSTALL':

            if not new['installed']:
                verrors.add(f'{schema_name}.action',
                            'Plugin is not installed on the system')
            else:

                try:
                    uninstall_dict = install_dict.copy()
                    uninstall_dict.pop('management_ip')
                    uninstall_dict.pop('fingerprint')
                    await self.middleware.run_in_thread(
                        self.__uninstall_vcenter_plugin, uninstall_dict)
                except ValidationError as e:
                    verrors.add_validation_error(e)
                else:
                    new['installed'] = False
                    new['port'] = 443
                    for key in new:
                        # flushing existing object with empty values
                        if key not in ('installed', 'id', 'port'):
                            new[key] = ''

        else:

            if not new['installed']:
                verrors.add(f'{schema_name}.action', 'Plugin not installed')
            elif not (await self.is_update_available()):
                verrors.add(f'{schema_name}.action',
                            'No update is available for vCenter plugin')
            else:

                try:
                    await self.middleware.run_in_thread(
                        self.__upgrade_vcenter_plugin, install_dict)
                except ValidationError as e:
                    verrors.add_validation_error(e)
                else:
                    new['version'] = await self.middleware.run_in_thread(
                        self.get_plugin_version)

        if verrors:
            raise verrors

        new['password'] = await self.middleware.call('notifier.pwenc_encrypt',
                                                     new['password'])

        await self.middleware.call('datastore.update', self._config.datastore,
                                   new['id'], new,
                                   {'prefix': self._config.datastore_prefix})

        return await self.config()

    @private
    async def is_update_available(self):
        latest_version = await self.middleware.run_in_thread(
            self.get_plugin_version)
        current_version = (await self.config())['version']
        return latest_version if current_version and \
                                 parse_version(latest_version) > parse_version(current_version) else None

    @private
    async def plugin_root_path(self):
        return await self.middleware.call('notifier.gui_static_root')

    @private
    async def get_management_ip_choices(self):
        ip_list = await self.middleware.call('interfaces.ip_in_use',
                                             {'ipv4': True})

        return [ip_dict['address'] for ip_dict in ip_list]

    @private
    def get_plugin_file_name(self):
        # TODO: The path to the plugin should be moved over to middlewared from django
        root_path = self.middleware.call_sync('vcenter.plugin_root_path')
        return next(v for v in os.listdir(root_path)
                    if 'plugin' in v and '.zip' in v)

    @private
    def get_plugin_version(self):
        file_name = self.get_plugin_file_name()
        return file_name.split('_')[1]

    @private
    async def property_file_path(self):
        return os.path.join(
            (await self.middleware.call('notifier.gui_base_path')),
            'vcp/Extensionconfig.ini.dist')

    @private
    async def resource_folder_path(self):
        return os.path.join(
            (await self.middleware.call('notifier.gui_base_path')),
            'vcp/vcp_locales')

    @private
    def create_event_keyvalue_pairs(self):
        try:

            eri_list = []
            resource_folder_path = self.middleware.call_sync(
                'vcenter.resource_folder_path')
            for file in os.listdir(resource_folder_path):
                eri = vim.Extension.ResourceInfo()

                # Read locale file from vcp_locale
                eri.module = file.split("_")[0]
                with open(os.path.join(resource_folder_path, file),
                          'r') as file:
                    for line in file:
                        if len(line) > 2 and '=' in line:
                            if 'locale' in line:
                                eri.locale = line.split(
                                    '=')[1].lstrip().rstrip()
                            else:
                                prop = line.split('=')
                                key_val = vim.KeyValue()
                                key_val.key = prop[0].lstrip().rstrip()
                                key_val.value = prop[1].lstrip().rstrip()
                                eri.data.append(key_val)
                eri_list.append(eri)
            return eri_list
        except Exception as e:
            raise ValidationError('vcenter_update.create_event_keyvalue_pairs',
                                  f'Can not read locales : {e}')

    @private
    def get_extension_key(self):
        cp = configparser.ConfigParser()
        cp.read(self.middleware.call_sync('vcenter.property_file_path'))
        return cp.get('RegisterParam', 'key')

    @accepts(
        Dict(
            'install_vcenter_plugin',
            Int('port', required=True),
            Str('fingerprint', required=True),
            Str('management_ip', required=True),
            Str('install_mode',
                enum=['NEW', 'REPAIR'],
                required=False,
                default='NEW'),
            Str('ip', required=True),  # HOST IP
            Str('password', password=True,
                required=True),  # Password should be decrypted
            Str('username', required=True),
            register=True))
    def __install_vcenter_plugin(self, data):

        encrypted_password = self.middleware.call_sync(
            'notifier.pwenc_encrypt', data['password'])

        update_zipfile_dict = data.copy()
        update_zipfile_dict.pop('management_ip')
        update_zipfile_dict.pop('fingerprint')
        update_zipfile_dict['password'] = encrypted_password
        update_zipfile_dict['plugin_version_old'] = 'null'
        update_zipfile_dict['plugin_version_new'] = self.get_plugin_version()
        self.__update_plugin_zipfile(update_zipfile_dict)

        data.pop('install_mode')

        try:
            ext = self.get_extension(data['management_ip'],
                                     data['fingerprint'])

            data.pop('fingerprint')
            data.pop('management_ip')
            si = self.__check_credentials(data)

            si.RetrieveServiceContent().extensionManager.RegisterExtension(ext)

        except vim.fault.NoPermission:
            raise ValidationError(
                'vcenter_update.username',
                'VCenter user has no permission to install the plugin')

    @accepts(
        Patch('install_vcenter_plugin',
              'uninstall_vcenter_plugin', ('rm', {
                  'name': 'fingerprint'
              }), ('rm', {
                  'name': 'install_mode'
              }), ('rm', {
                  'name': 'management_ip'
              }),
              register=True))
    def __uninstall_vcenter_plugin(self, data):
        try:
            extkey = self.get_extension_key()

            si = self.__check_credentials(data)
            si.RetrieveServiceContent().extensionManager.UnregisterExtension(
                extkey)

        except vim.fault.NoPermission:
            raise ValidationError(
                'vcenter_update.username',
                'VCenter user does not have necessary permission to uninstall the plugin'
            )

    @accepts(
        Patch('install_vcenter_plugin', 'upgrade_vcenter_plugin',
              ('rm', {
                  'name': 'install_mode'
              })))
    def __upgrade_vcenter_plugin(self, data):

        update_zipfile_dict = data.copy()
        update_zipfile_dict.pop('management_ip')
        update_zipfile_dict.pop('fingerprint')
        update_zipfile_dict['install_mode'] = 'UPGRADE'
        update_zipfile_dict['password'] = self.middleware.call_sync(
            'notifier.pwenc_encrypt', data['password'])
        update_zipfile_dict['plugin_version_old'] = str(
            (self.middleware.call_sync('vcenter.config'))['version'])
        update_zipfile_dict['plugin_version_new'] = self.middleware.call_sync(
            'vcenter.get_plugin_version')
        self.__update_plugin_zipfile(update_zipfile_dict)

        try:
            ext = self.get_extension(data['management_ip'],
                                     data['fingerprint'])

            data.pop('fingerprint')
            data.pop('management_ip')
            si = self.__check_credentials(data)

            si.RetrieveServiceContent().extensionManager.UpdateExtension(ext)

        except vim.fault.NoPermission:
            raise ValidationError(
                'vcenter_update.username',
                'VCenter user has no permission to upgrade the plugin')

    @accepts(Ref('uninstall_vcenter_plugin'))
    def _find_plugin(self, data):
        try:
            si = self.__check_credentials(data)

            extkey = self.get_extension_key()
            ext = si.RetrieveServiceContent().extensionManager.FindExtension(
                extkey)

            if ext is None:
                return False
            else:
                return f'TrueNAS System : {ext.client[0].url.split("/")[2]}'
        except vim.fault.NoPermission:
            raise ValidationError(
                'vcenter_update.username',
                'VCenter user has no permission to find the plugin on this system'
            )

    @accepts(Ref('uninstall_vcenter_plugin'))
    def __check_credentials(self, data):
        try:
            si = SmartConnect("https",
                              data['ip'],
                              data['port'],
                              data['username'],
                              data['password'],
                              sslContext=get_context_object())

            if si:
                return si

        except (socket.gaierror, TimeoutError):
            raise ValidationError(
                'vcenter_update.ip',
                'Provided vCenter Hostname/IP or port are not valid')
        except vim.fault.InvalidLogin:
            raise ValidationError(
                'vcenter_update.username',
                'Provided vCenter credentials are not valid ( username or password )'
            )
        except vim.fault.NoPermission:
            raise ValidationError(
                'vcenter_update.username',
                'vCenter user does not have permission to perform this operation'
            )
        except Exception as e:

            if 'not a vim server' in str(e).lower():
                # In case an IP is provided for a server which is not a VIM server - then Exception is raised with
                # following text
                # Exception: 10.XX.XX.XX:443 is not a VIM server

                raise ValidationError(
                    'vcenter_update.ip',
                    'Provided Hostname/IP is not a VIM server')

            else:
                raise e

    @private
    def get_extension(self, vcp_url, fingerprint):
        try:
            cp = configparser.ConfigParser()
            cp.read(self.middleware.call_sync('vcenter.property_file_path'))
            version = self.middleware.call_sync('vcenter.get_plugin_version')

            description = vim.Description()
            description.label = cp.get('RegisterParam', 'label')
            description.summary = cp.get('RegisterParam', 'description')

            ext = vim.Extension()
            ext.company = cp.get('RegisterParam', 'company')
            ext.version = version
            ext.key = cp.get('RegisterParam', 'key')
            ext.description = description
            ext.lastHeartbeatTime = datetime.now()

            server_info = vim.Extension.ServerInfo()
            server_info.serverThumbprint = fingerprint
            server_info.type = vcp_url.split(':')[0].upper()  # sysgui protocol
            server_info.url = vcp_url
            server_info.description = description
            server_info.company = cp.get('RegisterParam', 'company')
            server_info.adminEmail = ['ADMIN EMAIL']
            ext.server = [server_info]

            client = vim.Extension.ClientInfo()
            client.url = vcp_url
            client.company = cp.get('RegisterParam', 'company')
            client.version = version
            client.description = description
            client.type = "vsphere-client-serenity"
            ext.client = [client]

            event_info = []
            for e in cp.get('RegisterParam', 'events').split(","):
                ext_event_type_info = vim.Extension.EventTypeInfo()
                ext_event_type_info.eventID = e
                event_info.append(ext_event_type_info)

            task_info = []
            for t in cp.get('RegisterParam', 'tasks').split(","):
                ext_type_info = vim.Extension.TaskTypeInfo()
                ext_type_info.taskID = t
                task_info.append(ext_type_info)

            # Register custom privileges required for vcp RBAC
            priv_info = []
            for priv in cp.get('RegisterParam', 'auth').split(","):
                ext_type_info = vim.Extension.PrivilegeInfo()
                ext_type_info.privID = priv
                ext_type_info.privGroupName = self.PRIVATE_GROUP_NAME
                priv_info.append(ext_type_info)

            ext.taskList = task_info
            ext.eventList = event_info
            ext.privilegeList = priv_info

            resource_list = self.create_event_keyvalue_pairs()
            ext.resourceList = resource_list

            return ext
        except configparser.NoOptionError as e:
            raise ValidationError('vcenter_update.get_extension',
                                  f'Property Missing : {e}')

    @private
    def extract_zip(self, src_path, dest_path):
        if not os.path.exists(dest_path):
            os.makedirs(dest_path)
        with zipfile.ZipFile(src_path) as zip_f:
            zip_f.extractall(dest_path)

    @private
    def zipdir(self, src_path, dest_path):

        assert os.path.isdir(src_path)
        with closing(zipfile.ZipFile(dest_path, "w")) as z:

            for root, dirs, files in os.walk(src_path):
                for fn in files:
                    absfn = os.path.join(root, fn)
                    zfn = absfn[len(src_path) + len(os.sep):]
                    z.write(absfn, zfn)

    @private
    def remove_directory(self, dest_path):
        if os.path.exists(dest_path):
            shutil.rmtree(dest_path)

    @accepts(
        Dict(
            'update_vcp_plugin_zipfile',
            Int('port', required=True),
            Str('ip', required=True, validators=[IpAddress()]),
            Str('install_mode',
                enum=['NEW', 'REPAIR', 'UPGRADE'],
                required=True),
            Str('plugin_version_old', required=True),
            Str('plugin_version_new', required=True),
            Str('password', required=True,
                password=True),  # should be encrypted
            Str('username', required=True),
            register=True))
    def __update_plugin_zipfile(self, data):
        file_name = self.middleware.call_sync('vcenter.get_plugin_file_name')
        plugin_root_path = self.middleware.call_sync(
            'vcenter.plugin_root_path')

        self.extract_zip(os.path.join(plugin_root_path, file_name),
                         os.path.join(plugin_root_path, 'plugin'))
        self.extract_zip(
            os.path.join(plugin_root_path,
                         'plugin/plugins/ixsystems-vcp-service.jar'),
            os.path.join(plugin_root_path,
                         'plugin/plugins/ixsystems-vcp-service'))

        data['fpath'] = os.path.join(
            plugin_root_path,
            'plugin/plugins/ixsystems-vcp-service/META-INF/config/install.properties'
        )

        self.__create_property_file(data)
        self.zipdir(
            os.path.join(plugin_root_path,
                         'plugin/plugins/ixsystems-vcp-service'),
            os.path.join(plugin_root_path,
                         'plugin/plugins/ixsystems-vcp-service.jar'))
        self.remove_directory(
            os.path.join(plugin_root_path,
                         'plugin/plugins/ixsystems-vcp-service'))

        shutil.make_archive(os.path.join(plugin_root_path, file_name[0:-4]),
                            'zip', os.path.join(plugin_root_path, 'plugin'))

        self.remove_directory(os.path.join(plugin_root_path, 'plugin'))

    @accepts(
        Patch(
            'update_vcp_plugin_zipfile',
            '__create_property_file',
            ('add', {
                'name': 'fpath',
                'type': 'str'
            }),
        ))
    def __create_property_file(self, data):
        # Password encrypted using notifier.pwenc_encrypt

        config = configparser.ConfigParser()
        with open(data['fpath'], 'w') as config_file:
            config.add_section('installation_parameter')
            config.set('installation_parameter', 'ip', data['ip'])
            config.set('installation_parameter', 'username', data['username'])
            config.set('installation_parameter', 'port', str(data['port']))
            config.set('installation_parameter', 'password', data['password'])
            config.set('installation_parameter', 'install_mode',
                       data['install_mode'])
            config.set('installation_parameter', 'plugin_version_old',
                       data['plugin_version_old'])
            config.set('installation_parameter', 'plugin_version_new',
                       data['plugin_version_new'])
            config.write(config_file)
class ReplicationService(CRUDService):
    class Config:
        datastore = "storage.replication"
        datastore_prefix = "repl_"
        datastore_extend = "replication.extend"
        datastore_extend_context = "replication.extend_context"
        cli_namespace = "task.replication"

    @private
    async def extend_context(self, rows, extra):
        return {
            "state": await self.middleware.call("zettarepl.get_state"),
        }

    @private
    async def extend(self, data, context):
        data["periodic_snapshot_tasks"] = [{
            k.replace("task_", ""): v
            for k, v in task.items()
        } for task in data["periodic_snapshot_tasks"]]

        for task in data["periodic_snapshot_tasks"]:
            Cron.convert_db_format_to_schedule(task, begin_end=True)

        if data["direction"] == "PUSH":
            data["also_include_naming_schema"] = data["naming_schema"]
            data["naming_schema"] = []
        if data["direction"] == "PULL":
            data["also_include_naming_schema"] = []

        Cron.convert_db_format_to_schedule(data,
                                           "schedule",
                                           key_prefix="schedule_",
                                           begin_end=True)
        Cron.convert_db_format_to_schedule(data,
                                           "restrict_schedule",
                                           key_prefix="restrict_schedule_",
                                           begin_end=True)

        if "error" in context["state"]:
            data["state"] = context["state"]["error"]
        else:
            data["state"] = context["state"]["tasks"].get(
                f"replication_task_{data['id']}", {
                    "state": "PENDING",
                })

        data["job"] = data["state"].pop("job", None)

        return data

    @private
    async def compress(self, data):
        if data["direction"] == "PUSH":
            data["naming_schema"] = data["also_include_naming_schema"]
        del data["also_include_naming_schema"]

        Cron.convert_schedule_to_db_format(data,
                                           "schedule",
                                           key_prefix="schedule_",
                                           begin_end=True)
        Cron.convert_schedule_to_db_format(data,
                                           "restrict_schedule",
                                           key_prefix="restrict_schedule_",
                                           begin_end=True)

        del data["periodic_snapshot_tasks"]

        return data

    @accepts(
        Dict(
            "replication_create",
            Str("name", required=True),
            Str("direction", enum=["PUSH", "PULL"], required=True),
            Str("transport",
                enum=["SSH", "SSH+NETCAT", "LOCAL"],
                required=True),
            Int("ssh_credentials", null=True, default=None),
            Str("netcat_active_side",
                enum=["LOCAL", "REMOTE"],
                null=True,
                default=None),
            Str("netcat_active_side_listen_address", null=True, default=None),
            Int("netcat_active_side_port_min",
                null=True,
                default=None,
                validators=[Port()]),
            Int("netcat_active_side_port_max",
                null=True,
                default=None,
                validators=[Port()]),
            Str("netcat_passive_side_connect_address", null=True,
                default=None),
            List("source_datasets", items=[Dataset("dataset")], empty=False),
            Dataset("target_dataset", required=True),
            Bool("recursive", required=True),
            List("exclude", items=[Dataset("dataset")]),
            Bool("properties", default=True),
            List("properties_exclude", items=[Str("property", empty=False)]),
            Dict("properties_override", additional_attrs=True),
            Bool("replicate", default=False),
            Bool("encryption", default=False),
            Str("encryption_key", null=True, default=None),
            Str("encryption_key_format",
                enum=["HEX", "PASSPHRASE"],
                null=True,
                default=None),
            Str("encryption_key_location", null=True, default=None),
            List("periodic_snapshot_tasks",
                 items=[Int("periodic_snapshot_task")],
                 validators=[Unique()]),
            List("naming_schema",
                 items=[
                     Str("naming_schema",
                         validators=[ReplicationSnapshotNamingSchema()])
                 ]),
            List("also_include_naming_schema",
                 items=[
                     Str("naming_schema",
                         validators=[ReplicationSnapshotNamingSchema()])
                 ]),
            Bool("auto", required=True),
            Cron("schedule",
                 defaults={"minute": "00"},
                 begin_end=True,
                 null=True,
                 default=None),
            Cron("restrict_schedule",
                 defaults={"minute": "00"},
                 begin_end=True,
                 null=True,
                 default=None),
            Bool("only_matching_schedule", default=False),
            Bool("allow_from_scratch", default=False),
            Str("readonly", enum=["SET", "REQUIRE", "IGNORE"], default="SET"),
            Bool("hold_pending_snapshots", default=False),
            Str("retention_policy",
                enum=["SOURCE", "CUSTOM", "NONE"],
                required=True),
            Int("lifetime_value",
                null=True,
                default=None,
                validators=[Range(min=1)]),
            Str("lifetime_unit",
                null=True,
                default=None,
                enum=["HOUR", "DAY", "WEEK", "MONTH", "YEAR"]),
            Str("compression",
                enum=["LZ4", "PIGZ", "PLZIP"],
                null=True,
                default=None),
            Int("speed_limit",
                null=True,
                default=None,
                validators=[Range(min=1)]),
            Bool("large_block", default=True),
            Bool("embed", default=False),
            Bool("compressed", default=True),
            Int("retries", default=5, validators=[Range(min=1)]),
            Str("logging_level",
                enum=["DEBUG", "INFO", "WARNING", "ERROR"],
                null=True,
                default=None),
            Bool("enabled", default=True),
            register=True,
            strict=True,
        ))
    async def do_create(self, data):
        """
        Create a Replication Task

        Create a Replication Task that will push or pull ZFS snapshots to or from remote host..

        * `name` specifies a name for replication task
        * `direction` specifies whether task will `PUSH` or `PULL` snapshots
        * `transport` is a method of snapshots transfer:
          * `SSH` transfers snapshots via SSH connection. This method is supported everywhere but does not achieve
            great performance
            `ssh_credentials` is a required field for this transport (Keychain Credential ID of type `SSH_CREDENTIALS`)
          * `SSH+NETCAT` uses unencrypted connection for data transfer. This can only be used in trusted networks
            and requires a port (specified by range from `netcat_active_side_port_min` to `netcat_active_side_port_max`)
            to be open on `netcat_active_side`
            `ssh_credentials` is also required for control connection
          * `LOCAL` replicates to or from localhost
        * `source_datasets` is a non-empty list of datasets to replicate snapshots from
        * `target_dataset` is a dataset to put snapshots into. It must exist on target side
        * `recursive` and `exclude` have the same meaning as for Periodic Snapshot Task
        * `properties` control whether we should send dataset properties along with snapshots
        * `periodic_snapshot_tasks` is a list of periodic snapshot task IDs that are sources of snapshots for this
          replication task. Only push replication tasks can be bound to periodic snapshot tasks.
        * `naming_schema` is a list of naming schemas for pull replication
        * `also_include_naming_schema` is a list of naming schemas for push replication
        * `auto` allows replication to run automatically on schedule or after bound periodic snapshot task
        * `schedule` is a schedule to run replication task. Only `auto` replication tasks without bound periodic
          snapshot tasks can have a schedule
        * `restrict_schedule` restricts when replication task with bound periodic snapshot tasks runs. For example,
          you can have periodic snapshot tasks that run every 15 minutes, but only run replication task every hour.
        * Enabling `only_matching_schedule` will only replicate snapshots that match `schedule` or
          `restrict_schedule`
        * `allow_from_scratch` will destroy all snapshots on target side and replicate everything from scratch if none
          of the snapshots on target side matches source snapshots
        * `readonly` controls destination datasets readonly property:
          * `SET` will set all destination datasets to readonly=on after finishing the replication
          * `REQUIRE` will require all existing destination datasets to have readonly=on property
          * `IGNORE` will avoid this kind of behavior
        * `hold_pending_snapshots` will prevent source snapshots from being deleted by retention of replication fails
          for some reason
        * `retention_policy` specifies how to delete old snapshots on target side:
          * `SOURCE` deletes snapshots that are absent on source side
          * `CUSTOM` deletes snapshots that are older than `lifetime_value` and `lifetime_unit`
          * `NONE` does not delete any snapshots
        * `compression` compresses SSH stream. Available only for SSH transport
        * `speed_limit` limits speed of SSH stream. Available only for SSH transport
        * `large_block`, `embed` and `compressed` are various ZFS stream flag documented in `man zfs send`
        * `retries` specifies number of retries before considering replication failed

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.create",
                "params": [{
                    "name": "Work Backup",
                    "direction": "PUSH",
                    "transport": "SSH",
                    "ssh_credentials": [12],
                    "source_datasets", ["data/work"],
                    "target_dataset": "repl/work",
                    "recursive": true,
                    "periodic_snapshot_tasks": [5],
                    "auto": true,
                    "restrict_schedule": {
                        "minute": "0",
                        "hour": "*/2",
                        "dom": "*",
                        "month": "*",
                        "dow": "1,2,3,4,5",
                        "begin": "09:00",
                        "end": "18:00"
                    },
                    "only_matching_schedule": true,
                    "retention_policy": "CUSTOM",
                    "lifetime_value": 1,
                    "lifetime_unit": "WEEK",
                }]
            }
        """

        verrors = ValidationErrors()
        verrors.add_child("replication_create", await self._validate(data))

        if verrors:
            raise verrors

        periodic_snapshot_tasks = data["periodic_snapshot_tasks"]
        await self.compress(data)

        id = await self.middleware.call(
            "datastore.insert", self._config.datastore, data,
            {"prefix": self._config.datastore_prefix})

        await self._set_periodic_snapshot_tasks(id, periodic_snapshot_tasks)

        await self.middleware.call("zettarepl.update_tasks")

        return await self._get_instance(id)

    @accepts(Int("id"),
             Patch(
                 "replication_create",
                 "replication_update",
                 ("attr", {
                     "update": True
                 }),
             ))
    async def do_update(self, id, data):
        """
        Update a Replication Task with specific `id`

        See the documentation for `create` method for information on payload contents

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.update",
                "params": [
                    7,
                    {
                        "name": "Work Backup",
                        "direction": "PUSH",
                        "transport": "SSH",
                        "ssh_credentials": [12],
                        "source_datasets", ["data/work"],
                        "target_dataset": "repl/work",
                        "recursive": true,
                        "periodic_snapshot_tasks": [5],
                        "auto": true,
                        "restrict_schedule": {
                            "minute": "0",
                            "hour": "*/2",
                            "dom": "*",
                            "month": "*",
                            "dow": "1,2,3,4,5",
                            "begin": "09:00",
                            "end": "18:00"
                        },
                        "only_matching_schedule": true,
                        "retention_policy": "CUSTOM",
                        "lifetime_value": 1,
                        "lifetime_unit": "WEEK",
                    }
                ]
            }
        """

        old = await self._get_instance(id)

        new = old.copy()
        if new["ssh_credentials"]:
            new["ssh_credentials"] = new["ssh_credentials"]["id"]
        new["periodic_snapshot_tasks"] = [
            task["id"] for task in new["periodic_snapshot_tasks"]
        ]
        new.update(data)

        verrors = ValidationErrors()
        verrors.add_child("replication_update", await self._validate(new, id))

        if verrors:
            raise verrors

        periodic_snapshot_tasks = new["periodic_snapshot_tasks"]
        await self.compress(new)

        new.pop("state", None)
        new.pop("job", None)

        await self.middleware.call("datastore.update", self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})

        await self._set_periodic_snapshot_tasks(id, periodic_snapshot_tasks)

        await self.middleware.call("zettarepl.update_tasks")

        return await self._get_instance(id)

    @accepts(Int("id"))
    async def do_delete(self, id):
        """
        Delete a Replication Task with specific `id`

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.delete",
                "params": [
                    1
                ]
            }
        """

        response = await self.middleware.call("datastore.delete",
                                              self._config.datastore, id)

        await self.middleware.call("zettarepl.update_tasks")

        return response

    @item_method
    @accepts(Int("id"), Bool("really_run", default=True, hidden=True))
    @job(logs=True)
    async def run(self, job, id, really_run):
        """
        Run Replication Task of `id`.
        """
        if really_run:
            task = await self._get_instance(id)

            if not task["enabled"]:
                raise CallError("Task is not enabled")

            if task["state"]["state"] == "RUNNING":
                raise CallError("Task is already running")

            if task["state"]["state"] == "HOLD":
                raise CallError("Task is on hold")

        await self.middleware.call("zettarepl.run_replication_task", id,
                                   really_run, job)

    @accepts(
        Patch(
            "replication_create",
            "replication_run_onetime",
            ("rm", {
                "name": "name"
            }),
            ("rm", {
                "name": "auto"
            }),
            ("rm", {
                "name": "schedule"
            }),
            ("rm", {
                "name": "only_matching_schedule"
            }),
            ("rm", {
                "name": "enabled"
            }),
        ), )
    @job(logs=True)
    async def run_onetime(self, job, data):
        """
        Run replication task without creating it.
        """

        data["name"] = f"Temporary replication task for job {job.id}"
        data["schedule"] = None
        data["only_matching_schedule"] = False
        data["auto"] = False
        data["enabled"] = True

        verrors = ValidationErrors()
        verrors.add_child("replication_run_onetime", await
                          self._validate(data))

        if verrors:
            raise verrors

        await self.middleware.call("zettarepl.run_onetime_replication_task",
                                   job, data)

    async def _validate(self, data, id=None):
        verrors = ValidationErrors()

        await self._ensure_unique(verrors, "", "name", data["name"], id)

        # Direction

        snapshot_tasks = []

        if data["direction"] == "PUSH":
            e, snapshot_tasks = await self._query_periodic_snapshot_tasks(
                data["periodic_snapshot_tasks"])
            verrors.add_child("periodic_snapshot_tasks", e)

            if data["naming_schema"]:
                verrors.add("naming_schema",
                            "This field has no sense for push replication")

            if not snapshot_tasks and not data["also_include_naming_schema"]:
                verrors.add(
                    "periodic_snapshot_tasks",
                    "You must at least either bind a periodic snapshot task or provide "
                    "\"Also Include Naming Schema\" for push replication task")

            if data["schedule"] is None and data[
                    "auto"] and not data["periodic_snapshot_tasks"]:
                verrors.add(
                    "auto",
                    "Push replication that runs automatically must be either "
                    "bound to a periodic snapshot task or have a schedule")

        if data["direction"] == "PULL":
            if data["schedule"] is None and data["auto"]:
                verrors.add(
                    "auto",
                    "Pull replication that runs automatically must have a schedule"
                )

            if data["periodic_snapshot_tasks"]:
                verrors.add(
                    "periodic_snapshot_tasks",
                    "Pull replication can't be bound to a periodic snapshot task"
                )

            if not data["naming_schema"]:
                verrors.add("naming_schema",
                            "Naming schema is required for pull replication")

            if data["also_include_naming_schema"]:
                verrors.add("also_include_naming_schema",
                            "This field has no sense for pull replication")

            if data["hold_pending_snapshots"]:
                verrors.add(
                    "hold_pending_snapshots",
                    "Pull replication tasks can't hold pending snapshots because "
                    "they don't do source retention")

        # Transport

        if data["transport"] == "SSH+NETCAT":
            if data["netcat_active_side"] is None:
                verrors.add(
                    "netcat_active_side",
                    "You must choose active side for SSH+netcat replication")

            if data["netcat_active_side_port_min"] is not None and data[
                    "netcat_active_side_port_max"] is not None:
                if data["netcat_active_side_port_min"] > data[
                        "netcat_active_side_port_max"]:
                    verrors.add(
                        "netcat_active_side_port_max",
                        "Please specify value greater or equal than netcat_active_side_port_min"
                    )

            if data["compression"] is not None:
                verrors.add(
                    "compression",
                    "Compression is not supported for SSH+netcat replication")

            if data["speed_limit"] is not None:
                verrors.add(
                    "speed_limit",
                    "Speed limit is not supported for SSH+netcat replication")
        else:
            if data["netcat_active_side"] is not None:
                verrors.add(
                    "netcat_active_side",
                    "This field only has sense for SSH+netcat replication")

            for k in [
                    "netcat_active_side_listen_address",
                    "netcat_active_side_port_min",
                    "netcat_active_side_port_max",
                    "netcat_passive_side_connect_address"
            ]:
                if data[k] is not None:
                    verrors.add(
                        k,
                        "This field only has sense for SSH+netcat replication")

        if data["transport"] == "LOCAL":
            if data["ssh_credentials"] is not None:
                verrors.add(
                    "ssh_credentials",
                    "Remote credentials have no sense for local replication")

            if data["compression"] is not None:
                verrors.add("compression",
                            "Compression has no sense for local replication")

            if data["speed_limit"] is not None:
                verrors.add("speed_limit",
                            "Speed limit has no sense for local replication")
        else:
            if data["ssh_credentials"] is None:
                verrors.add(
                    "ssh_credentials",
                    "SSH Credentials are required for non-local replication")
            else:
                try:
                    await self.middleware.call(
                        "keychaincredential.get_of_type",
                        data["ssh_credentials"], "SSH_CREDENTIALS")
                except CallError as e:
                    verrors.add("ssh_credentials", str(e))

        # Common for all directions and transports

        for i, source_dataset in enumerate(data["source_datasets"]):
            for snapshot_task in snapshot_tasks:
                if is_child(source_dataset, snapshot_task["dataset"]):
                    if data["recursive"]:
                        for exclude in snapshot_task["exclude"]:
                            if is_child(exclude, source_dataset
                                        ) and exclude not in data["exclude"]:
                                verrors.add(
                                    "exclude",
                                    f"You should exclude {exclude!r} as bound periodic snapshot "
                                    f"task dataset {snapshot_task['dataset']!r} does"
                                )
                    else:
                        if source_dataset in snapshot_task["exclude"]:
                            verrors.add(
                                f"source_datasets.{i}",
                                f"Dataset {source_dataset!r} is excluded by bound "
                                f"periodic snapshot task for dataset "
                                f"{snapshot_task['dataset']!r}")

        if not data["recursive"] and data["exclude"]:
            verrors.add(
                "exclude",
                "Excluding child datasets is only supported for recursive replication"
            )

        for i, v in enumerate(data["exclude"]):
            if not any(
                    v.startswith(ds + "/") for ds in data["source_datasets"]):
                verrors.add(
                    f"exclude.{i}",
                    "This dataset is not a child of any of source datasets")

        if data["replicate"]:
            if not data["recursive"]:
                verrors.add(
                    "recursive",
                    "This option is required for full filesystem replication")

            if data["exclude"]:
                verrors.add(
                    "exclude",
                    "This option is not supported for full filesystem replication"
                )

            if not data["properties"]:
                verrors.add(
                    "properties",
                    "This option is required for full filesystem replication")

        if data["encryption"]:
            for k in [
                    "encryption_key", "encryption_key_format",
                    "encryption_key_location"
            ]:
                if data[k] is None:
                    verrors.add(
                        k,
                        "This property is required when remote dataset encryption is enabled"
                    )

        if data["schedule"]:
            if not data["auto"]:
                verrors.add(
                    "schedule",
                    "You can't have schedule for replication that does not run automatically"
                )
        else:
            if data["only_matching_schedule"]:
                verrors.add(
                    "only_matching_schedule",
                    "You can't have only-matching-schedule without schedule")

        if data["retention_policy"] == "CUSTOM":
            if data["lifetime_value"] is None:
                verrors.add(
                    "lifetime_value",
                    "This field is required for custom retention policy")
            if data["lifetime_unit"] is None:
                verrors.add(
                    "lifetime_value",
                    "This field is required for custom retention policy")
        else:
            if data["lifetime_value"] is not None:
                verrors.add(
                    "lifetime_value",
                    "This field has no sense for specified retention policy")
            if data["lifetime_unit"] is not None:
                verrors.add(
                    "lifetime_unit",
                    "This field has no sense for specified retention policy")

        if data["enabled"]:
            for i, snapshot_task in enumerate(snapshot_tasks):
                if not snapshot_task["enabled"]:
                    verrors.add(
                        f"periodic_snapshot_tasks.{i}",
                        "You can't bind disabled periodic snapshot task to enabled replication task"
                    )

        return verrors

    async def _set_periodic_snapshot_tasks(self, replication_task_id,
                                           periodic_snapshot_tasks_ids):
        await self.middleware.call(
            "datastore.delete",
            "storage.replication_repl_periodic_snapshot_tasks",
            [["replication_id", "=", replication_task_id]])
        for periodic_snapshot_task_id in periodic_snapshot_tasks_ids:
            await self.middleware.call(
                "datastore.insert",
                "storage.replication_repl_periodic_snapshot_tasks",
                {
                    "replication_id": replication_task_id,
                    "task_id": periodic_snapshot_task_id,
                },
            )

    async def _query_periodic_snapshot_tasks(self, ids):
        verrors = ValidationErrors()

        query_result = await self.middleware.call("pool.snapshottask.query",
                                                  [["id", "in", ids]])

        snapshot_tasks = []
        for i, task_id in enumerate(ids):
            for task in query_result:
                if task["id"] == task_id:
                    snapshot_tasks.append(task)
                    break
            else:
                verrors.add(str(i), "This snapshot task does not exist")

        return verrors, snapshot_tasks

    @accepts(
        Str("transport", enum=["SSH", "SSH+NETCAT", "LOCAL"], required=True),
        Int("ssh_credentials", null=True, default=None))
    async def list_datasets(self, transport, ssh_credentials):
        """
        List datasets on remote side

        Accepts `transport` and SSH credentials ID (for non-local transport)

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.list_datasets",
                "params": [
                    "SSH",
                    7
                ]
            }
        """

        return await self.middleware.call("zettarepl.list_datasets", transport,
                                          ssh_credentials)

    @accepts(Str("dataset", required=True),
             Str("transport",
                 enum=["SSH", "SSH+NETCAT", "LOCAL"],
                 required=True), Int("ssh_credentials",
                                     null=True,
                                     default=None))
    async def create_dataset(self, dataset, transport, ssh_credentials):
        """
        Creates dataset on remote side

        Accepts `dataset` name, `transport` and SSH credentials ID (for non-local transport)

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.create_dataset",
                "params": [
                    "repl/work",
                    "SSH",
                    7
                ]
            }
        """

        return await self.middleware.call("zettarepl.create_dataset", dataset,
                                          transport, ssh_credentials)

    @accepts()
    async def list_naming_schemas(self):
        """
        List all naming schemas used in periodic snapshot and replication tasks.
        """
        naming_schemas = []
        for snapshottask in await self.middleware.call(
                "pool.snapshottask.query"):
            naming_schemas.append(snapshottask["naming_schema"])
        for replication in await self.middleware.call("replication.query"):
            naming_schemas.extend(replication["naming_schema"])
            naming_schemas.extend(replication["also_include_naming_schema"])
        return sorted(set(naming_schemas))

    @accepts(
        List("datasets", empty=False, items=[Dataset("dataset")]),
        List("naming_schema",
             empty=False,
             items=[
                 Str("naming_schema",
                     validators=[ReplicationSnapshotNamingSchema()])
             ]),
        Str("transport", enum=["SSH", "SSH+NETCAT", "LOCAL"], required=True),
        Int("ssh_credentials", null=True, default=None),
    )
    async def count_eligible_manual_snapshots(self, datasets, naming_schema,
                                              transport, ssh_credentials):
        """
        Count how many existing snapshots of `dataset` match `naming_schema`.

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.count_eligible_manual_snapshots",
                "params": [
                    "repl/work",
                    ["auto-%Y-%m-%d_%H-%M"],
                    "SSH",
                    4
                ]
            }
        """
        return await self.middleware.call(
            "zettarepl.count_eligible_manual_snapshots", datasets,
            naming_schema, transport, ssh_credentials)

    @accepts(
        Str("direction", enum=["PUSH", "PULL"], required=True),
        List("source_datasets",
             items=[Dataset("dataset")],
             required=True,
             empty=False),
        Dataset("target_dataset", required=True),
        Str("transport",
            enum=["SSH", "SSH+NETCAT", "LOCAL", "LEGACY"],
            required=True),
        Int("ssh_credentials", null=True, default=None),
    )
    async def target_unmatched_snapshots(self, direction, source_datasets,
                                         target_dataset, transport,
                                         ssh_credentials):
        """
        Check if target has any snapshots that do not exist on source.

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.target_unmatched_snapshots",
                "params": [
                    "PUSH",
                    ["repl/work", "repl/games"],
                    "backup",
                    "SSH",
                    4
                ]
            }

        Returns

            {
                "backup/work": ["auto-2019-10-15_13-00", "auto-2019-10-15_09-00"],
                "backup/games": ["auto-2019-10-15_13-00"],
            }
        """
        return await self.middleware.call(
            "zettarepl.target_unmatched_snapshots", direction, source_datasets,
            target_dataset, transport, ssh_credentials)

    @private
    def new_snapshot_name(self, naming_schema):
        return datetime.now().strftime(naming_schema)

    # Legacy pair support
    @private
    @accepts(
        Dict(
            "replication-pair-data",
            Str("hostname", required=True),
            Str("public-key", required=True),
            Str("user", null=True),
        ))
    async def pair(self, data):
        result = await self.middleware.call(
            "keychaincredential.ssh_pair", {
                "remote_hostname": data["hostname"],
                "username": data["user"] or "root",
                "public_key": data["public-key"],
            })
        return {
            "ssh_port": result["port"],
            "ssh_hostkey": result["host_key"],
        }
Exemple #7
0
class UPSService(SystemServiceService):
    DRIVERS_AVAILABLE = set(os.listdir(DRIVER_BIN_DIR))

    class Config:
        datastore = 'services.ups'
        datastore_prefix = 'ups_'
        datastore_extend = 'ups.ups_config_extend'
        service = 'ups'
        service_verb = 'restart'

    @private
    async def ups_config_extend(self, data):
        data['mode'] = data['mode'].upper()
        data['shutdown'] = data['shutdown'].upper()
        data['toemail'] = [v for v in data['toemail'].split(';') if v]
        host = 'localhost' if data['mode'] == 'MASTER' else data['remotehost']
        data['complete_identifier'] = f'{data["identifier"]}@{host}:{data["remoteport"]}'
        return data

    @accepts()
    async def port_choices(self):
        ports = [x for x in glob.glob('/dev/cua*') if x.find('.') == -1]
        ports.extend(glob.glob('/dev/ugen*'))
        ports.extend(glob.glob('/dev/uhid*'))
        ports.append('auto')
        return ports

    @accepts()
    def driver_choices(self):
        """
        Returns choices of UPS drivers supported by the system.
        """
        ups_choices = {}
        if os.path.exists("/conf/base/etc/local/nut/driver.list"):
            with open('/conf/base/etc/local/nut/driver.list', 'rb') as f:
                d = f.read().decode('utf8', 'ignore')
            r = io.StringIO()
            for line in re.sub(r'[ \t]+', ' ', d, flags=re.M).split('\n'):
                r.write(line.strip() + '\n')
            r.seek(0)
            reader = csv.reader(r, delimiter=' ', quotechar='"')
            for row in reader:
                if len(row) == 0 or row[0].startswith('#'):
                    continue
                if row[-2] == '#':
                    last = -3
                else:
                    last = -1
                driver_str = row[last]
                driver_annotation = ''
                m = re.match(r'(.+) \((.+)\)', driver_str)  # "blazer_usb (USB ID 0665:5161)"
                if m:
                    driver_str, driver_annotation = m.group(1), m.group(2)
                for driver in driver_str.split(' or '):  # can be "blazer_ser or blazer_usb"
                    driver = driver.strip()
                    if driver not in self.DRIVERS_AVAILABLE:
                        continue
                    for i, field in enumerate(list(row)):
                        row[i] = field
                    ups_choices['$'.join([driver, row[3]])] = '%s (%s)' % (
                        ' '.join(filter(None, row[0:last])),
                        ', '.join(filter(None, [driver, driver_annotation]))
                    )
        return ups_choices

    @private
    async def validate_data(self, data, schema):
        verrors = ValidationErrors()

        driver = data.get('driver')
        if driver:
            if driver not in (await self.middleware.call('ups.driver_choices')).keys():
                verrors.add(
                    f'{schema}.driver',
                    'Driver selected does not match local machine\'s driver list'
                )

        identifier = data['identifier']
        if identifier:
            if not re.search(r'^[a-z0-9\.\-_]+$', identifier, re.I):
                verrors.add(
                    f'{schema}.identifier',
                    'Use alphanumeric characters, ".", "-" and "_"'
                )

        for field in [field for field in ['monpwd', 'monuser'] if data.get(field)]:
            if re.search(r'[ #]', data[field], re.I):
                verrors.add(
                    f'{schema}.{field}',
                    'Spaces or number signs are not allowed'
                )

        mode = data.get('mode')
        if mode == 'MASTER':
            for field in filter(
                lambda f: not data[f],
                ['port', 'driver']
            ):
                verrors.add(
                    f'{schema}.{field}',
                    'This field is required'
                )
        else:
            if not data.get('remotehost'):
                verrors.add(
                    f'{schema}.remotehost',
                    'This field is required'
                )

        to_emails = data.get('toemail')
        if to_emails:
            data['toemail'] = ';'.join(to_emails)
        else:
            data['toemail'] = ''

        data['mode'] = data['mode'].lower()
        data['shutdown'] = data['shutdown'].lower()

        return verrors, data

    @accepts(
        Dict(
            'ups_update',
            Bool('emailnotify'),
            Bool('powerdown'),
            Bool('rmonitor'),
            Int('nocommwarntime', null=True),
            Int('remoteport', validators=[Port()]),
            Int('shutdowntimer'),
            Int('hostsync', validators=[Range(min=0)]),
            Str('description'),
            Str('driver'),
            Str('extrausers', max_length=None),
            Str('identifier', empty=False),
            Str('mode', enum=['MASTER', 'SLAVE']),
            Str('monpwd', empty=False),
            Str('monuser', empty=False),
            Str('options', max_length=None),
            Str('optionsupsd', max_length=None),
            Str('port'),
            Str('remotehost'),
            Str('shutdown', enum=['LOWBATT', 'BATT']),
            Str('shutdowncmd', null=True),
            Str('subject'),
            List('toemail', items=[Str('email', validators=[Email()])]),
            update=True
        )
    )
    async def do_update(self, data):
        """
        Update UPS Service Configuration.

        `emailnotify` when enabled, sends out notifications of different UPS events via email.

        `powerdown` when enabled, sets UPS to power off after shutting down the system.

        `nocommwarntime` is a value in seconds which makes UPS Service wait the specified seconds before alerting that
        the Service cannot reach configured UPS.

        `shutdowntimer` is a value in seconds which tells the Service to wait specified seconds for the UPS before
        initiating a shutdown. This only applies when `shutdown` is set to "BATT".

        `shutdowncmd` is the command which is executed to initiate a shutdown. It defaults to "poweroff".

        `toemail` is a list of valid email id's on which notification emails are sent.
        """
        config = await self.config()
        config.pop('complete_identifier')
        old_config = config.copy()
        config.update(data)
        verros, config = await self.validate_data(config, 'ups_update')
        if verros:
            raise verros

        old_config['mode'] = old_config['mode'].lower()
        old_config['shutdown'] = old_config['shutdown'].lower()
        old_config['toemail'] = ';'.join(old_config['toemail']) if old_config['toemail'] else ''

        if len(set(old_config.items()) ^ set(config.items())) > 0:
            if config['identifier'] != old_config['identifier']:
                await self.dismiss_alerts()

            await self._update_service(old_config, config)

        return await self.config()

    @private
    async def alerts_mapping(self):
        return {
            'LOWBATT': 'UPSBatteryLow',
            'COMMBAD': 'UPSCommbad',
            'COMMOK': 'UPSCommok',
            'ONBATT': 'UPSOnBattery',
            'ONLINE': 'UPSOnline',
            'REPLBATT': 'UPSReplbatt'
        }

    @private
    async def dismiss_alerts(self):
        config = await self.config()

        for alert in (await self.alerts_mapping()).values():
            await self.middleware.call('alert.oneshot_delete', alert, {'ups': config['identifier']})

    @private
    @accepts(
        Str('notify_type')
    )
    async def upssched_event(self, notify_type):
        config = await self.config()
        upsc_identifier = config['complete_identifier']
        if notify_type.lower() == 'shutdown':
            # Before we start FSD with upsmon, lets ensure that ups is not ONLINE (OL).
            # There are cases where battery/charger issues can result in ups.status being "OL LB" at the
            # same time. This will ensure that we don't initiate a shutdown if ups is OL.
            stats_output = (
                await run(
                    '/usr/local/bin/upsc', upsc_identifier,
                    check=False
                )
            ).stdout

            ups_status = re.findall(
                fr'ups.status: (.*)',
                '' if not stats_output else stats_output.decode()
            )
            if ups_status and 'ol' in ups_status[0].lower():
                self.middleware.logger.debug(
                    f'Shutdown not initiated as ups.status ({ups_status[0]}) indicates '
                    f'{config["identifier"]} is ONLINE (OL).'
                )
            else:
                syslog.syslog(syslog.LOG_NOTICE, 'upssched-cmd "issuing shutdown"')
                await run('/usr/local/sbin/upsmon', '-c', 'fsd', check=False)
        elif 'notify' in notify_type.lower():
            # notify_type is expected to be of the following format
            # NOTIFY-EVENT i.e NOTIFY-LOWBATT
            notify_type = notify_type.split('-')[-1]

            # We would like to send alerts for the following events
            alert_mapping = await self.alerts_mapping()

            await self.dismiss_alerts()

            if notify_type in alert_mapping:
                await self.middleware.call(
                    'alert.oneshot_create', alert_mapping[notify_type], {'ups': config['identifier']}
                )

            if config['emailnotify']:
                # Email user with the notification event and details
                # We send the email in the following format ( inclusive line breaks )

                # NOTIFICATION: 'LOWBATT'
                # UPS: 'ups'
                #
                # Statistics recovered:
                #
                # 1) Battery charge (percent)
                # battery.charge: 5
                #
                # 2) Remaining battery level when UPS switches to LB (percent)
                # battery.charge.low: 10
                #
                # 3) Battery runtime (seconds)
                # battery.runtime: 1860
                #
                # 4) Remaining battery runtime when UPS switches to LB (seconds)
                # battery.runtime.low: 900

                ups_name = config['identifier']
                hostname = (await self.middleware.call('system.info'))['hostname']
                current_time = datetime.datetime.now(tz=dateutil.tz.tzlocal()).strftime('%a %b %d %H:%M:%S %Z %Y')
                ups_subject = config['subject'].replace('%d', current_time).replace('%h', hostname)
                body = f'NOTIFICATION: {notify_type!r}<br>UPS: {ups_name!r}<br><br>'

                # Let's gather following stats
                data_points = {
                    'battery.charge': 'Battery charge (percent)',
                    'battery.charge.low': 'Battery level remaining (percent) when UPS switches to Low Battery (LB)',
                    'battery.charge.status': 'Battery charge status',
                    'battery.runtime': 'Battery runtime (seconds)',
                    'battery.runtime.low': 'Battery runtime remaining (seconds) when UPS switches to Low Battery (LB)',
                    'battery.runtime.restart': 'Minimum battery runtime (seconds) to allow UPS restart after power-off',
                }

                stats_output = (
                    await run('/usr/local/bin/upsc', upsc_identifier, check=False)
                ).stdout
                recovered_stats = re.findall(
                    fr'({"|".join(data_points)}): (.*)',
                    '' if not stats_output else stats_output.decode()
                )

                if recovered_stats:
                    body += 'Statistics recovered:<br><br>'
                    # recovered_stats is expected to be a list in this format
                    # [('battery.charge', '5'), ('battery.charge.low', '10'), ('battery.runtime', '1860')]
                    for index, stat in enumerate(recovered_stats):
                        body += f'{index + 1}) {data_points[stat[0]]}<br>  {stat[0]}: {stat[1]}<br><br>'

                else:
                    body += 'Statistics could not be recovered<br>'

                # Subject and body defined, send email
                job = await self.middleware.call(
                    'mail.send', {
                        'subject': ups_subject,
                        'text': body,
                        'to': config['toemail']
                    }
                )

                await job.wait()
                if job.error:
                    self.middleware.logger.debug(f'Failed to send UPS status email: {job.error}')

        else:
            self.middleware.logger.debug(f'Unrecognized UPS notification event: {notify_type}')
Exemple #8
0
class UPSService(SystemServiceService):

    ENTRY = Dict(
        'ups_entry',
        Bool('powerdown', required=True),
        Bool('rmonitor', required=True),
        Int('id', required=True),
        Int('nocommwarntime', null=True, required=True),
        Int('remoteport', validators=[Port()], required=True),
        Int('shutdowntimer', required=True),
        Int('hostsync', validators=[Range(min=0)], required=True),
        Str('description', required=True),
        Str('driver', required=True),
        Str('extrausers', max_length=None, required=True),
        Str('identifier', empty=False, required=True),
        Str('mode', enum=['MASTER', 'SLAVE'], required=True),
        Str('monpwd', required=True),
        Str('monuser', empty=False, required=True),
        Str('options', max_length=None, required=True),
        Str('optionsupsd', max_length=None, required=True),
        Str('port', required=True),
        Str('remotehost', required=True),
        Str('shutdown', enum=['LOWBATT', 'BATT'], required=True),
        Str('shutdowncmd', null=True, required=True),
        Str('complete_identifier', required=True),
    )

    class Config:
        datastore = 'services.ups'
        datastore_prefix = 'ups_'
        datastore_extend = 'ups.ups_config_extend'
        service = 'ups'
        service_verb = 'restart'
        cli_namespace = 'service.ups'

    @private
    async def ups_config_extend(self, data):
        data['mode'] = data['mode'].upper()
        data['shutdown'] = data['shutdown'].upper()
        host = 'localhost' if data['mode'] == 'MASTER' else data['remotehost']
        data[
            'complete_identifier'] = f'{data["identifier"]}@{host}:{data["remoteport"]}'
        return data

    @accepts()
    @returns(List(items=[Str('port_choice')]))
    async def port_choices(self):
        ports = [
            os.path.join('/dev', port['name'])
            for port in await self.middleware.call('device.get_serials')
        ]
        ports.extend(glob.glob('/dev/uhid*'))
        ports.append('auto')
        return ports

    @private
    def normalize_driver_string(self, driver_str):
        driver = driver_str.split('$')[0]
        driver = driver.split('(')[0]  # "blazer_usb (USB ID 0665:5161)"
        driver = driver.split(' or ')[0]  # "blazer_ser or blazer_usb"
        driver = driver.replace(' ', '\n\t')  # "genericups upstype=16"
        return f'driver = {driver}'

    @accepts()
    @returns(
        Dict(additional_attrs=True,
             example={
                 'blazer_ser$CPM-800': 'WinPower ups 2 CPM-800 (blazer_ser)'
             }))
    def driver_choices(self):
        """
        Returns choices of UPS drivers supported by the system.
        """
        ups_choices = {}
        driver_list = '/usr/share/nut/driver.list'
        if os.path.exists(driver_list):
            with open(driver_list, 'r') as f:
                d = f.read()
            r = io.StringIO()
            for line in re.sub(r'[ \t]+', ' ', d, flags=re.M).split('\n'):
                r.write(line.strip() + '\n')
            r.seek(0)
            reader = csv.reader(r, delimiter=' ', quotechar='"')
            for row in reader:
                if len(row) == 0 or row[0].startswith('#'):
                    continue
                if row[-2] == '#':
                    last = -3
                else:
                    last = -1
                driver_str = row[last]
                driver_options = ''
                driver_annotation = ''
                # We want to match following strings
                # genericups upstype=1
                # powerman-pdu (experimental)
                m = RE_DRIVER_CHOICE.match(driver_str)
                if m:
                    driver_str = m.group(1)
                    driver_options = m.group(2) or ''
                    driver_annotation = m.group(3) or ''
                for driver in driver_str.split(
                        ' or '):  # can be "blazer_ser or blazer_usb"
                    driver = driver.strip()
                    if driver not in drivers_available():
                        continue
                    for i, field in enumerate(list(row)):
                        row[i] = field
                    key = '$'.join([
                        driver +
                        (f' {driver_options}' if driver_options else ''),
                        row[3]
                    ])
                    val = f'{ups_choices[key]} / ' if key in ups_choices else ''
                    ups_choices[key] = val + '%s (%s)' % (' '.join(
                        filter(None, row[0:last])), ', '.join(
                            filter(None, [driver, driver_annotation])))
        return ups_choices

    @private
    async def validate_data(self, data, schema):
        verrors = ValidationErrors()

        driver = data.get('driver')
        if driver:
            if driver not in (
                    await self.middleware.call('ups.driver_choices')).keys():
                verrors.add(
                    f'{schema}.driver',
                    'Driver selected does not match local machine\'s driver list'
                )

        port = data['port']
        if port:
            serial_port = os.path.join(
                '/dev',
                (await
                 self.middleware.call('system.advanced.config'))['serialport'])
            if serial_port == port:
                verrors.add(
                    f'{schema}.port',
                    'UPS port must be different then the port specified for '
                    'serial port for console in system advanced settings')

        identifier = data['identifier']
        if identifier:
            if not re.search(r'^[a-z0-9\.\-_]+$', identifier, re.I):
                verrors.add(f'{schema}.identifier',
                            'Use alphanumeric characters, ".", "-" and "_"')

        for field in ['monpwd', 'monuser']:
            if not data.get(field):
                verrors.add(f'{schema}.{field}', 'This field is required.')
            elif re.search(r'[ #]', data[field], re.I):
                verrors.add(f'{schema}.{field}',
                            'Spaces or number signs are not allowed.')

        mode = data.get('mode')
        if mode == 'MASTER':
            for field in filter(lambda f: not data[f], ['port', 'driver']):
                verrors.add(f'{schema}.{field}', 'This field is required')
        else:
            if not data.get('remotehost'):
                verrors.add(f'{schema}.remotehost', 'This field is required')

        data['mode'] = data['mode'].lower()
        data['shutdown'] = data['shutdown'].lower()

        verrors.check()
        return data

    @accepts(
        Patch(
            'ups_entry',
            'ups_update',
            ('rm', {
                'name': 'id'
            }),
            ('rm', {
                'name': 'complete_identifier'
            }),
            ('edit', {
                'name': 'monpwd',
                'method': lambda x: setattr(x, 'empty', False)
            }),
            ('attr', {
                'update': True
            }),
        ), )
    async def do_update(self, data):
        """
        Update UPS Service Configuration.

        `powerdown` when enabled, sets UPS to power off after shutting down the system.

        `nocommwarntime` is a value in seconds which makes UPS Service wait the specified seconds before alerting that
        the Service cannot reach configured UPS.

        `shutdowntimer` is a value in seconds which tells the Service to wait specified seconds for the UPS before
        initiating a shutdown. This only applies when `shutdown` is set to "BATT".

        `shutdowncmd` is the command which is executed to initiate a shutdown. It defaults to "poweroff".
        """
        config = await self.config()
        config.pop('complete_identifier')
        old_config = config.copy()
        config.update(data)
        config = await self.validate_data(config, 'ups_update')

        old_config['mode'] = old_config['mode'].lower()
        old_config['shutdown'] = old_config['shutdown'].lower()

        if len(set(old_config.items()) ^ set(config.items())) > 0:
            if config['identifier'] != old_config['identifier']:
                await self.dismiss_alerts()

            await self._update_service(old_config, config)

        return await self.config()

    @private
    async def alerts_mapping(self):
        return {
            'LOWBATT': 'UPSBatteryLow',
            'COMMBAD': 'UPSCommbad',
            'COMMOK': 'UPSCommok',
            'ONBATT': 'UPSOnBattery',
            'ONLINE': 'UPSOnline',
            'REPLBATT': 'UPSReplbatt'
        }

    @private
    async def dismiss_alerts(self):
        for alert in (await self.alerts_mapping()).values():
            await self.middleware.call('alert.oneshot_delete', alert)

    @private
    @accepts(Str('notify_type'))
    async def upssched_event(self, notify_type):
        config = await self.config()
        upsc_identifier = config['complete_identifier']
        cp = await run('upsc', upsc_identifier, check=False)
        if cp.returncode:
            stats_output = ''
            self.logger.error('Failed to retrieve ups information: %s',
                              cp.stderr.decode())
        else:
            stats_output = cp.stdout.decode()

        if RE_TEST_IN_PROGRESS.search(stats_output):
            self.logger.debug(
                'Self test is in progress and %r notify event should be ignored',
                notify_type)
            return

        if notify_type.lower() == 'shutdown':
            # Before we start FSD with upsmon, lets ensure that ups is not ONLINE (OL).
            # There are cases where battery/charger issues can result in ups.status being "OL LB" at the
            # same time. This will ensure that we don't initiate a shutdown if ups is OL.
            ups_status = RE_UPS_STATUS.findall(stats_output)
            if ups_status and 'ol' in ups_status[0].lower():
                self.middleware.logger.debug(
                    f'Shutdown not initiated as ups.status ({ups_status[0]}) indicates '
                    f'{config["identifier"]} is ONLINE (OL).')
            else:
                # if we shutdown the active node while the passive is still online
                # then we're just going to cause a failover event. Shut the passive down
                # first and then shut the active node down
                if await self.middleware.call('failover.licensed'):
                    if await self.middleware.call('failover.status'
                                                  ) == 'MASTER':
                        syslog.syslog(
                            syslog.LOG_NOTICE,
                            'upssched-cmd "issuing shutdown" for passive node')
                        try:
                            await self.middleware.call('failover.call_remote',
                                                       'ups.upssched_event',
                                                       'shutdown')
                        except Exception as e:
                            syslog.syslog(
                                syslog.LOG_ERR,
                                f'failed shutting down passive node with error {e}'
                            )

                syslog.syslog(syslog.LOG_NOTICE,
                              'upssched-cmd "issuing shutdown"')
                await run('upsmon', '-c', 'fsd', check=False)

        elif 'notify' in notify_type.lower():
            # notify_type is expected to be of the following format
            # NOTIFY-EVENT i.e NOTIFY-LOWBATT
            notify_type = notify_type.split('-')[-1]

            # We would like to send alerts for the following events
            alert_mapping = await self.alerts_mapping()

            await self.dismiss_alerts()

            if notify_type in alert_mapping:
                # Send user with the notification event and details
                # We send the email in the following format ( inclusive line breaks )

                # UPS Statistics: 'ups'
                #
                # Statistics recovered:
                #
                # 1) Battery charge (percent)
                # battery.charge: 5
                #
                # 2) Remaining battery level when UPS switches to LB (percent)
                # battery.charge.low: 10
                #
                # 3) Battery runtime (seconds)
                # battery.runtime: 1860
                #
                # 4) Remaining battery runtime when UPS switches to LB (seconds)
                # battery.runtime.low: 900
                body = f'<br><br>UPS Statistics: {config["identifier"]!r}<br><br>'

                # Let's gather following stats
                data_points = {
                    'battery.charge':
                    'Battery charge (percent)',
                    'battery.charge.low':
                    'Battery level remaining (percent) when UPS switches to Low Battery (LB)',
                    'battery.charge.status':
                    'Battery charge status',
                    'battery.runtime':
                    'Battery runtime (seconds)',
                    'battery.runtime.low':
                    'Battery runtime remaining (seconds) when UPS switches to Low Battery (LB)',
                    'battery.runtime.restart':
                    'Minimum battery runtime (seconds) to allow UPS restart after power-off',
                }

                stats_output = (await run('upsc', upsc_identifier,
                                          check=False)).stdout
                recovered_stats = re.findall(
                    fr'({"|".join(data_points)}): (.*)',
                    '' if not stats_output else stats_output.decode())

                if recovered_stats:
                    body += 'Statistics recovered:<br><br>'
                    # recovered_stats is expected to be a list in this format
                    # [('battery.charge', '5'), ('battery.charge.low', '10'), ('battery.runtime', '1860')]
                    for index, stat in enumerate(recovered_stats):
                        body += f'{index + 1}) {data_points[stat[0]]}<br> ' \
                                f'&nbsp;&nbsp;&nbsp; {stat[0]}: {stat[1]}<br><br>'
                else:
                    body += 'Statistics could not be recovered<br>'

                await self.middleware.call('alert.oneshot_create',
                                           alert_mapping[notify_type], {
                                               'ups': config['identifier'],
                                               'body': body
                                           })
        else:
            self.middleware.logger.debug(
                f'Unrecognized UPS notification event: {notify_type}')
Exemple #9
0
class ReplicationService(CRUDService):
    class Config:
        datastore = "storage.replication"
        datastore_prefix = "repl_"
        datastore_extend = "replication.extend"
        datastore_extend_context = "replication.extend_context"

    @private
    async def extend_context(self):
        legacy_result, legacy_result_datetime = await self.middleware.run_in_thread(
            self._legacy_extend_context)

        return {
            "state": await self.middleware.call("zettarepl.get_state"),
            "legacy_result": legacy_result,
            "legacy_result_datetime": legacy_result_datetime,
        }

    def _legacy_extend_context(self):
        try:
            with open("/tmp/.repl-result", "rb") as f:
                data = f.read()
                legacy_result = pickle.loads(data)
                legacy_result_datetime = datetime.fromtimestamp(
                    os.stat("/tmp/.repl-result").st_mtime)
        except Exception:
            legacy_result = defaultdict(dict)
            legacy_result_datetime = None

        return legacy_result, legacy_result_datetime

    @private
    async def extend(self, data, context):
        data["periodic_snapshot_tasks"] = [{
            k.replace("task_", ""): v
            for k, v in task.items()
        } for task in data["periodic_snapshot_tasks"]]

        for task in data["periodic_snapshot_tasks"]:
            Cron.convert_db_format_to_schedule(task, begin_end=True)

        if data["direction"] == "PUSH":
            data["also_include_naming_schema"] = data["naming_schema"]
            data["naming_schema"] = []
        if data["direction"] == "PULL":
            data["also_include_naming_schema"] = []

        Cron.convert_db_format_to_schedule(data,
                                           "schedule",
                                           key_prefix="schedule_",
                                           begin_end=True)
        Cron.convert_db_format_to_schedule(data,
                                           "restrict_schedule",
                                           key_prefix="restrict_schedule_",
                                           begin_end=True)

        if data["transport"] == 'LEGACY':
            if data["id"] in context["legacy_result"]:
                legacy_result = context["legacy_result"][data["id"]]

                msg = legacy_result.get("msg")
                if msg == "Running":
                    state = "RUNNING"
                elif msg in ["Succeeded", "Up to date"]:
                    state = "FINISHED"
                else:
                    state = "ERROR"

                data["state"] = {
                    "datetime": context["legacy_result_datetime"],
                    "state": state,
                    "last_snapshot": legacy_result.get("last_snapshot"),
                }

                if state == "ERROR":
                    data["state"]["error"] = msg
            else:
                data["state"] = {
                    "state": "PENDING",
                }
        else:
            data["state"] = context["state"].get(
                f"replication_task_{data['id']}", {
                    "state": "PENDING",
                })

        return data

    @private
    async def compress(self, data):
        if data["direction"] == "PUSH":
            data["naming_schema"] = data["also_include_naming_schema"]
        del data["also_include_naming_schema"]

        Cron.convert_schedule_to_db_format(data,
                                           "schedule",
                                           key_prefix="schedule_",
                                           begin_end=True)
        Cron.convert_schedule_to_db_format(data,
                                           "restrict_schedule",
                                           key_prefix="restrict_schedule_",
                                           begin_end=True)

        del data["periodic_snapshot_tasks"]

        return data

    @accepts(
        Dict(
            "replication_create",
            Str("name", required=True),
            Str("direction", enum=["PUSH", "PULL"], required=True),
            Str("transport",
                enum=["SSH", "SSH+NETCAT", "LOCAL", "LEGACY"],
                required=True),
            Int("ssh_credentials", null=True, default=None),
            Str("netcat_active_side",
                enum=["LOCAL", "REMOTE"],
                null=True,
                default=None),
            Str("netcat_active_side_listen_address", null=True, default=None),
            Int("netcat_active_side_port_min",
                null=True,
                default=None,
                validators=[Port()]),
            Int("netcat_active_side_port_max",
                null=True,
                default=None,
                validators=[Port()]),
            Str("netcat_passive_side_connect_address", null=True,
                default=None),
            List("source_datasets",
                 items=[Path("dataset", empty=False)],
                 required=True,
                 empty=False),
            Path("target_dataset", required=True, empty=False),
            Bool("recursive", required=True),
            List("exclude", items=[Path("dataset", empty=False)], default=[]),
            List("periodic_snapshot_tasks",
                 items=[Int("periodic_snapshot_task")],
                 default=[],
                 validators=[Unique()]),
            List("naming_schema",
                 items=[
                     Str("naming_schema",
                         validators=[ReplicationSnapshotNamingSchema()])
                 ],
                 default=[]),
            List("also_include_naming_schema",
                 items=[
                     Str("naming_schema",
                         validators=[ReplicationSnapshotNamingSchema()])
                 ],
                 default=[]),
            Bool("auto", required=True),
            Cron("schedule",
                 defaults={"minute": "00"},
                 begin_end=True,
                 null=True,
                 default=None),
            Cron("restrict_schedule",
                 defaults={"minute": "00"},
                 begin_end=True,
                 null=True,
                 default=None),
            Bool("only_matching_schedule", default=False),
            Bool("allow_from_scratch", default=False),
            Bool("hold_pending_snapshots", default=False),
            Str("retention_policy",
                enum=["SOURCE", "CUSTOM", "NONE"],
                required=True),
            Int("lifetime_value",
                null=True,
                default=None,
                validators=[Range(min=1)]),
            Str("lifetime_unit",
                null=True,
                default=None,
                enum=["HOUR", "DAY", "WEEK", "MONTH", "YEAR"]),
            Str("compression",
                enum=["LZ4", "PIGZ", "PLZIP"],
                null=True,
                default=None),
            Int("speed_limit",
                null=True,
                default=None,
                validators=[Range(min=1)]),
            Bool("dedup", default=True),
            Bool("large_block", default=True),
            Bool("embed", default=True),
            Bool("compressed", default=True),
            Int("retries", default=5, validators=[Range(min=1)]),
            Str("logging_level",
                enum=["DEBUG", "INFO", "WARNING", "ERROR"],
                null=True,
                default=None),
            Bool("enabled", default=True),
            register=True,
            strict=True,
        ))
    async def do_create(self, data):
        """
        Create a Replication Task

        Create a Replication Task that will push or pull ZFS snapshots to or from remote host..

        * `name` specifies a name for replication task
        * `direction` specifies whether task will `PUSH` or `PULL` snapshots
        * `transport` is a method of snapshots transfer:
          * `SSH` transfers snapshots via SSH connection. This method is supported everywhere but does not achieve
            great performance
            `ssh_credentials` is a required field for this transport (Keychain Credential ID of type `SSH_CREDENTIALS`)
          * `SSH+NETCAT` uses unencrypted connection for data transfer. This can only be used in trusted networks
            and requires a port (specified by range from `netcat_active_side_port_min` to `netcat_active_side_port_max`)
            to be open on `netcat_active_side`
            `ssh_credentials` is also required for control connection
          * `LOCAL` replicates to or from localhost
          * `LEGACY` uses legacy replication engine prior to FreeNAS 11.3
        * `source_datasets` is a non-empty list of datasets to replicate snapshots from
        * `target_dataset` is a dataset to put snapshots into. It must exist on target side
        * `recursive` and `exclude` have the same meaning as for Periodic Snapshot Task
        * `periodic_snapshot_tasks` is a list of periodic snapshot task IDs that are sources of snapshots for this
          replication task. Only push replication tasks can be bound to periodic snapshot tasks.
        * `naming_schema` is a list of naming schemas for pull replication
        * `also_include_naming_schema` is a list of naming schemas for push replication
        * `auto` allows replication to run automatically on schedule or after bound periodic snapshot task
        * `schedule` is a schedule to run replication task. Only `auto` replication tasks without bound periodic
          snapshot tasks can have a schedule
        * `restrict_schedule` restricts when replication task with bound periodic snapshot tasks runs. For example,
          you can have periodic snapshot tasks that run every 15 minutes, but only run replication task every hour.
        * Enabling `only_matching_schedule` will only replicate snapshots that match `schedule` or
          `restrict_schedule`
        * `allow_from_scratch` will destroy all snapshots on target side and replicate everything from scratch if none
          of the snapshots on target side matches source snapshots
        * `hold_pending_snapshots` will prevent source snapshots from being deleted by retention of replication fails
          for some reason
        * `retention_policy` specifies how to delete old snapshots on target side:
          * `SOURCE` deletes snapshots that are absent on source side
          * `CUSTOM` deletes snapshots that are older than `lifetime_value` and `lifetime_unit`
          * `NONE` does not delete any snapshots
        * `compression` compresses SSH stream. Available only for SSH transport
        * `speed_limit` limits speed of SSH stream. Available only for SSH transport
        * `dedup`, `large_block`, `embed` and `compressed` are various ZFS stream flag documented in `man zfs send`
        * `retries` specifies number of retries before considering replication failed

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.create",
                "params": [{
                    "name": "Work Backup",
                    "direction": "PUSH",
                    "transport": "SSH",
                    "ssh_credentials": [12],
                    "source_datasets", ["data/work"],
                    "target_dataset": "repl/work",
                    "recursive": true,
                    "periodic_snapshot_tasks": [5],
                    "auto": true,
                    "restrict_schedule": {
                        "minute": "0",
                        "hour": "*/2",
                        "dom": "*",
                        "month": "*",
                        "dow": "1,2,3,4,5",
                        "begin": "09:00",
                        "end": "18:00"
                    },
                    "only_matching_schedule": true,
                    "retention_policy": "CUSTOM",
                    "lifetime_value": 1,
                    "lifetime_unit": "WEEK",
                }]
            }
        """

        verrors = ValidationErrors()
        verrors.add_child("replication_create", await self._validate(data))

        if verrors:
            raise verrors

        periodic_snapshot_tasks = data["periodic_snapshot_tasks"]
        await self.compress(data)

        id = await self.middleware.call(
            "datastore.insert", self._config.datastore, data,
            {"prefix": self._config.datastore_prefix})

        await self._set_periodic_snapshot_tasks(id, periodic_snapshot_tasks)

        await self.middleware.call("service.restart", "cron")
        await self.middleware.call("zettarepl.update_tasks")

        return await self._get_instance(id)

    @accepts(Int("id"),
             Patch(
                 "replication_create",
                 "replication_update",
                 ("attr", {
                     "update": True
                 }),
             ))
    async def do_update(self, id, data):
        """
        Update a Replication Task with specific `id`

        See the documentation for `create` method for information on payload contents

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.update",
                "params": [
                    7,
                    {
                        "name": "Work Backup",
                        "direction": "PUSH",
                        "transport": "SSH",
                        "ssh_credentials": [12],
                        "source_datasets", ["data/work"],
                        "target_dataset": "repl/work",
                        "recursive": true,
                        "periodic_snapshot_tasks": [5],
                        "auto": true,
                        "restrict_schedule": {
                            "minute": "0",
                            "hour": "*/2",
                            "dom": "*",
                            "month": "*",
                            "dow": "1,2,3,4,5",
                            "begin": "09:00",
                            "end": "18:00"
                        },
                        "only_matching_schedule": true,
                        "retention_policy": "CUSTOM",
                        "lifetime_value": 1,
                        "lifetime_unit": "WEEK",
                    }
                ]
            }
        """

        old = await self._get_instance(id)

        new = old.copy()
        if new["ssh_credentials"]:
            new["ssh_credentials"] = new["ssh_credentials"]["id"]
        new["periodic_snapshot_tasks"] = [
            task["id"] for task in new["periodic_snapshot_tasks"]
        ]
        new.update(data)

        verrors = ValidationErrors()
        verrors.add_child("replication_update", await self._validate(new))

        if verrors:
            raise verrors

        periodic_snapshot_tasks = new["periodic_snapshot_tasks"]
        await self.compress(new)

        new.pop('state', None)

        await self.middleware.call("datastore.update", self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})

        await self._set_periodic_snapshot_tasks(id, periodic_snapshot_tasks)

        await self.middleware.call("service.restart", "cron")
        await self.middleware.call("zettarepl.update_tasks")

        return await self._get_instance(id)

    @accepts(Int("id"))
    async def do_delete(self, id):
        """
        Delete a Replication Task with specific `id`

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.delete",
                "params": [
                    1
                ]
            }
        """

        response = await self.middleware.call("datastore.delete",
                                              self._config.datastore, id)

        await self.middleware.call("service.restart", "cron")
        await self.middleware.call("zettarepl.update_tasks")

        return response

    @item_method
    @accepts(Int("id"))
    async def run(self, id):
        """
        Run Replication Task of `id`.
        """
        task = await self._get_instance(id)

        if not task["enabled"]:
            raise CallError("Task is not enabled")

        await self.middleware.call("zettarepl.run_replication_task",
                                   task["id"])

    async def _validate(self, data):
        verrors = ValidationErrors()

        # Direction

        snapshot_tasks = []

        if data["direction"] == "PUSH":
            e, snapshot_tasks = await self._query_periodic_snapshot_tasks(
                data["periodic_snapshot_tasks"])
            verrors.add_child("periodic_snapshot_tasks", e)

            if data["naming_schema"]:
                verrors.add("naming_schema",
                            "This field has no sense for push replication")

            if data["schedule"]:
                if data["periodic_snapshot_tasks"]:
                    verrors.add(
                        "schedule",
                        "Push replication can't be bound to periodic snapshot task and have "
                        "schedule at the same time")
            else:
                if data["auto"] and not data[
                        "periodic_snapshot_tasks"] and data[
                            "transport"] != "LEGACY":
                    verrors.add(
                        "auto",
                        "Push replication that runs automatically must be either "
                        "bound to periodic snapshot task or have schedule")

        if data["direction"] == "PULL":
            if data["schedule"]:
                pass
            else:
                if data["auto"]:
                    verrors.add(
                        "auto",
                        "Pull replication that runs automatically must have schedule"
                    )

            if data["periodic_snapshot_tasks"]:
                verrors.add(
                    "periodic_snapshot_tasks",
                    "Pull replication can't be bound to periodic snapshot task"
                )

            if not data["naming_schema"]:
                verrors.add("naming_schema",
                            "Naming schema is required for pull replication")

            if data["also_include_naming_schema"]:
                verrors.add("also_include_naming_schema",
                            "This field has no sense for pull replication")

            if data["hold_pending_snapshots"]:
                verrors.add(
                    "hold_pending_snapshots",
                    "Pull replication tasks can't hold pending snapshots because "
                    "they don't do source retention")

        # Transport

        if data["transport"] == "SSH+NETCAT":
            if data["netcat_active_side"] is None:
                verrors.add(
                    "netcat_active_side",
                    "You must choose active side for SSH+netcat replication")

            if data["netcat_active_side_port_min"] is not None and data[
                    "netcat_active_side_port_max"] is not None:
                if data["netcat_active_side_port_min"] > data[
                        "netcat_active_side_port_max"]:
                    verrors.add(
                        "netcat_active_side_port_max",
                        "Please specify value greater or equal than netcat_active_side_port_min"
                    )

            if data["compression"] is not None:
                verrors.add(
                    "compression",
                    "Compression is not supported for SSH+netcat replication")

            if data["speed_limit"] is not None:
                verrors.add(
                    "speed_limit",
                    "Speed limit is not supported for SSH+netcat replication")
        else:
            if data["netcat_active_side"] is not None:
                verrors.add(
                    "netcat_active_side",
                    "This field only has sense for SSH+netcat replication")

            for k in [
                    "netcat_active_side_listen_address",
                    "netcat_active_side_port_min",
                    "netcat_active_side_port_max",
                    "netcat_passive_side_connect_address"
            ]:
                if data[k] is not None:
                    verrors.add(
                        k,
                        "This field only has sense for SSH+netcat replication")

        if data["transport"] == "LOCAL":
            if data["ssh_credentials"] is not None:
                verrors.add(
                    "ssh_credentials",
                    "Remote credentials have no sense for local replication")

            if data["compression"] is not None:
                verrors.add("compression",
                            "Compression has no sense for local replication")

            if data["speed_limit"] is not None:
                verrors.add("speed_limit",
                            "Speed limit has no sense for local replication")
        else:
            if data["ssh_credentials"] is None:
                verrors.add(
                    "ssh_credentials",
                    "SSH Credentials are required for non-local replication")
            else:
                try:
                    await self.middleware.call(
                        "keychaincredential.get_of_type",
                        data["ssh_credentials"], "SSH_CREDENTIALS")
                except CallError as e:
                    verrors.add("ssh_credentials", str(e))

        if data["transport"] == "LEGACY":
            for should_be_true in ["auto", "allow_from_scratch"]:
                if not data[should_be_true]:
                    verrors.add(
                        should_be_true,
                        "Legacy replication does not support disabling this option"
                    )

            for should_be_false in [
                    "exclude", "periodic_snapshot_tasks", "naming_schema",
                    "also_include_naming_schema", "only_matching_schedule",
                    "dedup", "large_block", "embed", "compressed"
            ]:
                if data[should_be_false]:
                    verrors.add(
                        should_be_false,
                        "Legacy replication does not support this option")

            if data["direction"] != "PUSH":
                verrors.add(
                    "direction",
                    "Only push application is allowed for Legacy transport")

            if len(data["source_datasets"]) != 1:
                verrors.add(
                    "source_datasets",
                    "You can only have one source dataset for legacy replication"
                )

            if data["retries"] != 1:
                verrors.add("retries",
                            "This value should be 1 for legacy replication")

        # Common for all directions and transports

        for i, source_dataset in enumerate(data["source_datasets"]):
            for snapshot_task in snapshot_tasks:
                if is_child(source_dataset, snapshot_task["dataset"]):
                    if data["recursive"]:
                        for exclude in snapshot_task["exclude"]:
                            if exclude not in data["exclude"]:
                                verrors.add(
                                    "exclude",
                                    f"You should exclude {exclude!r} as bound periodic snapshot "
                                    f"task dataset {snapshot_task['dataset']!r} does"
                                )
                    else:
                        if source_dataset in snapshot_task["exclude"]:
                            verrors.add(
                                f"source_datasets.{i}",
                                f"Dataset {source_dataset!r} is excluded by bound "
                                f"periodic snapshot task for dataset "
                                f"{snapshot_task['dataset']!r}")

        if not data["recursive"] and data["exclude"]:
            verrors.add(
                "exclude",
                "Excluding child datasets is only supported for recursive replication"
            )

        for i, v in enumerate(data["exclude"]):
            if not any(
                    v.startswith(ds + "/") for ds in data["source_datasets"]):
                verrors.add(
                    f"exclude.{i}",
                    "This dataset is not a child of any of source datasets")

        if data["schedule"]:
            if not data["auto"]:
                verrors.add(
                    "schedule",
                    "You can't have schedule for replication that does not run automatically"
                )
        else:
            if data["only_matching_schedule"]:
                verrors.add(
                    "only_matching_schedule",
                    "You can't have only-matching-schedule without schedule")

        if data["retention_policy"] == "CUSTOM":
            if data["lifetime_value"] is None:
                verrors.add(
                    "lifetime_value",
                    "This field is required for custom retention policy")
            if data["lifetime_unit"] is None:
                verrors.add(
                    "lifetime_value",
                    "This field is required for custom retention policy")
        else:
            if data["lifetime_value"] is not None:
                verrors.add(
                    "lifetime_value",
                    "This field has no sense for specified retention policy")
            if data["lifetime_unit"] is not None:
                verrors.add(
                    "lifetime_unit",
                    "This field has no sense for specified retention policy")

        if data["enabled"]:
            for i, snapshot_task in enumerate(snapshot_tasks):
                if not snapshot_task["enabled"]:
                    verrors.add(
                        f"periodic_snapshot_tasks.{i}",
                        "You can't bind disabled periodic snapshot task to enabled replication task"
                    )

        return verrors

    async def _set_periodic_snapshot_tasks(self, replication_task_id,
                                           periodic_snapshot_tasks_ids):
        await self.middleware.call(
            "datastore.delete",
            "storage.replication_repl_periodic_snapshot_tasks",
            [["replication_id", "=", replication_task_id]])
        for periodic_snapshot_task_id in periodic_snapshot_tasks_ids:
            await self.middleware.call(
                "datastore.insert",
                "storage.replication_repl_periodic_snapshot_tasks",
                {
                    "replication_id": replication_task_id,
                    "task_id": periodic_snapshot_task_id,
                },
            )

    async def _query_periodic_snapshot_tasks(self, ids):
        verrors = ValidationErrors()

        query_result = await self.middleware.call("pool.snapshottask.query",
                                                  [["id", "in", ids]])

        snapshot_tasks = []
        for i, task_id in enumerate(ids):
            for task in query_result:
                if task["id"] == task_id:
                    snapshot_tasks.append(task)
                    break
            else:
                verrors.add(str(i), "This snapshot task does not exist")

        return verrors, snapshot_tasks

    @accepts(
        Str("transport",
            enum=["SSH", "SSH+NETCAT", "LOCAL", "LEGACY"],
            required=True), Int("ssh_credentials", null=True, default=None))
    async def list_datasets(self, transport, ssh_credentials=None):
        """
        List datasets on remote side

        Accepts `transport` and SSH credentials ID (for non-local transport)

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.list_datasets",
                "params": [
                    "SSH",
                    7
                ]
            }
        """

        return await self.middleware.call("zettarepl.list_datasets", transport,
                                          ssh_credentials)

    @accepts(Str("dataset", required=True),
             Str("transport",
                 enum=["SSH", "SSH+NETCAT", "LOCAL", "LEGACY"],
                 required=True), Int("ssh_credentials",
                                     null=True,
                                     default=None))
    async def create_dataset(self, dataset, transport, ssh_credentials=None):
        """
        Creates dataset on remote side

        Accepts `dataset` name, `transport` and SSH credentials ID (for non-local transport)

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.create_dataset",
                "params": [
                    "repl/work",
                    "SSH",
                    7
                ]
            }
        """

        return await self.middleware.call("zettarepl.create_dataset", dataset,
                                          transport, ssh_credentials)

    # Legacy pair support
    @private
    @accepts(
        Dict(
            "replication-pair-data",
            Str("hostname", required=True),
            Str("public-key", required=True),
            Str("user"),
        ))
    async def pair(self, data):
        result = await self.middleware.call(
            "keychaincredential.ssh_pair", {
                "remote_hostname": data["hostname"],
                "username": data["user"],
                "public_key": data["public-key"],
            })
        return {
            "ssh_port": result["port"],
            "ssh_hostkey": result["host_key"],
        }
Exemple #10
0
class KMIPService(ConfigService):
    class Config:
        datastore = 'system_kmip'
        datastore_extend = 'kmip.kmip_extend'
        cli_namespace = 'system.kmip'

    ENTRY = Dict(
        'kmip_entry',
        Int('id', required=True),
        Bool('enabled', required=True),
        Bool('manage_sed_disks', required=True),
        Bool('manage_zfs_keys', required=True),
        Int('certificate', null=True, required=True),
        Int('certificate_authority', null=True, required=True),
        Int('port', validators=[Port()], required=True),
        Str('server', required=True, null=True),
        Str('ssl_version', required=True, enum=SUPPORTED_SSL_VERSIONS),
    )

    @private
    async def kmip_extend(self, data):
        for k in filter(lambda v: data[v],
                        ('certificate', 'certificate_authority')):
            data[k] = data[k]['id']
        return data

    @accepts()
    @returns(Dict(*[Str(i, enum=[i]) for i in SUPPORTED_SSL_VERSIONS]))
    async def ssl_version_choices(self):
        """
        Retrieve valid SSL version choices to be used when configuring kmip service.
        """
        return {k: k for k in SUPPORTED_SSL_VERSIONS}

    @accepts(
        Patch(
            'kmip_entry',
            'kmip_update',
            ('rm', {
                'name': 'id'
            }),
            ('add', Bool('enabled')),
            ('add', Bool('force_clear')),
            ('add', Bool('change_server')),
            ('add', Bool('validate')),
            ('attr', {
                'update': True
            }),
        ))
    @job(lock='kmip_update')
    async def do_update(self, job, data):
        """
        Update KMIP Server Configuration.

        System currently authenticates connection with remote KMIP Server with a TLS handshake. `certificate` and
        `certificate_authority` determine the certs which will be used to initiate the TLS handshake with `server`.

        `validate` is enabled by default. When enabled, system will test connection to `server` making sure
        it's reachable.

        `manage_zfs_keys`/`manage_sed_disks` when enabled will sync keys from local database to remote KMIP server.
        When disabled, if there are any keys left to be retrieved from the KMIP server,
        it will sync them back to local database.

        `enabled` if true, cannot be set to disabled if there are existing keys pending to be synced. However users
        can still perform this action by enabling `force_clear`.

        `ssl_version` can be specified to match the ssl configuration being used by KMIP server.

        `change_server` is a boolean field which allows users to migrate data between two KMIP servers. System
        will first migrate keys from old KMIP server to local database and then migrate the keys from local database
        to new KMIP server. If it is unable to retrieve all the keys from old server, this will fail. Users can bypass
        this by enabling `force_clear`.

        `force_clear` is a boolean option which when enabled will in this case remove all
        pending keys to be synced from database. It should be used with extreme caution as users may end up with
        not having ZFS dataset or SED disks keys leaving them locked forever. It is disabled by default.
        """
        old = await self.config()
        new = old.copy()
        new.update(data)
        verrors = ValidationErrors()

        if not new['server'] and new['enabled']:
            verrors.add('kmip_update.server',
                        'Please specify a valid hostname or an IPv4 address')

        if new['enabled']:
            verrors.extend(
                (await
                 self.middleware.call('certificate.cert_services_validation',
                                      new['certificate'],
                                      'kmip_update.certificate', False)))

        ca = await self.middleware.call(
            'certificateauthority.query',
            [['id', '=', new['certificate_authority']]])
        if ca and not verrors:
            ca = ca[0]
            if not await self.middleware.call(
                    'cryptokey.validate_cert_with_chain',
                (await self.middleware.call(
                    'certificate.get_instance',
                    new['certificate']))['certificate'], [ca['certificate']]):
                verrors.add(
                    'kmip_update.certificate_authority',
                    'Certificate chain could not be verified with specified certificate authority.'
                )
        elif not ca and new['enabled']:
            verrors.add('kmip_update.certificate_authority',
                        'Please specify a valid id.')

        if new.pop('validate', True) and new['enabled'] and not verrors:
            if not await self.middleware.call('kmip.test_connection', new):
                verrors.add(
                    'kmip_update.server',
                    f'Unable to connect to {new["server"]}:{new["port"]} KMIP server.'
                )

        change_server = new.pop('change_server', False)
        if change_server and new['server'] == old['server']:
            verrors.add(
                'kmip_update.change_server',
                'Please update server field to reflect the new server.')
        if change_server and not new['enabled']:
            verrors.add('kmip_update.enabled',
                        'Must be enabled when change server is enabled.')

        force_clear = new.pop('force_clear', False)
        clear_keys = force_clear if change_server else False
        sync_error = 'KMIP sync is pending, please make sure database and KMIP server ' \
                     'are in sync before proceeding with this operation.'
        if old['enabled'] != new['enabled'] and await self.middleware.call(
                'kmip.kmip_sync_pending'):
            if force_clear:
                clear_keys = True
            else:
                verrors.add('kmip_update.enabled', sync_error)

        verrors.check()

        job.set_progress(30, 'Initial Validation complete')

        if clear_keys:
            await self.middleware.call('kmip.clear_sync_pending_keys')
            job.set_progress(50, 'Cleared keys pending sync')

        if change_server:
            # We will first migrate all the keys to local database - once done with that,
            # we will proceed with pushing it to the new server - we should have the old server
            # old server -> db
            # db -> new server
            # First can be skipped if old server is not reachable and we want to clear keys
            job.set_progress(
                55, 'Starting migration from existing server to new server')
            await self.middleware.call('datastore.update',
                                       self._config.datastore, old['id'], {
                                           'manage_zfs_keys': False,
                                           'manage_sed_disks': False
                                       })
            job.set_progress(
                60, 'Syncing keys from existing server to local database')
            sync_jobs = [(await self.middleware.call(f'kmip.{i}'))
                         for i in ('sync_zfs_keys', 'sync_sed_keys')]
            errors = []
            for sync_job in sync_jobs:
                await sync_job.wait()
                if sync_job.error:
                    errors.append(sync_job.error)
                elif sync_job.result:
                    errors.append(
                        f'Failed to sync {",".join(sync_job.result)}')

            if errors:
                await self.middleware.call('datastore.update',
                                           self._config.datastore, old['id'],
                                           old)
                # We do this because it's possible a few datasets/disks got synced to db and few didn't - this is
                # to push all the data of interest back to the KMIP server from db
                await self.middleware.call('kmip.sync_keys')
                errors = '\n'.join(errors)
                raise CallError(
                    f'Failed to sync keys from {old["server"]} to host: {errors}'
                )

            if await self.middleware.call('kmip.kmip_sync_pending'):
                raise CallError(sync_error)

            job.set_progress(
                80,
                'Successfully synced keys from existing server to local database'
            )

        await self.middleware.call(
            'datastore.update',
            self._config.datastore,
            old['id'],
            new,
        )

        await self.middleware.call('service.start', 'kmip')
        if new['enabled'] and old['enabled'] != new['enabled']:
            await self.middleware.call('kmip.initialize_keys')
        if any(old[k] != new[k]
               for k in ('enabled', 'manage_zfs_keys',
                         'manage_sed_disks')) or change_server:
            job.set_progress(
                90,
                'Starting sync between local database and configured KMIP server'
            )
            await self.middleware.call('kmip.sync_keys')

        return await self.config()
Exemple #11
0
class NetDataService(SystemServiceService):
    class Config:
        service = 'netdata'
        service_model = 'netdataglobalsettings'
        service_verb = 'restart'
        datastore_extend = 'netdata.netdata_extend'

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._alarms = {}
        self._initialize_alarms()

    @private
    async def netdata_extend(self, data):
        # We get data alarms as a dict e.g
        # {"alarms": {"alarm1": {"enabled": True}, "alarm2": {"enabled": True}}}
        alarms = copy.deepcopy(self._alarms)
        alarms.update(data['alarms'])
        data['alarms'] = alarms
        for alarm in data['alarms']:
            # Remove conf file paths
            data['alarms'][alarm].pop('path', None)
        return data

    @private
    async def list_alarms(self):
        alarms = copy.deepcopy(self._alarms)
        config = await self.config()
        for alarm in config['alarms']:
            if alarm not in alarms:
                # An unlikely case when a previously configured alarm does not exist in conf files anymore
                alarms[alarm] = {}
            alarms[alarm]['enabled'] = config['alarms'][alarm]['enabled']

        return alarms

    @private
    def _initialize_alarms(self):
        path = '/usr/local/etc/netdata/health.d/'
        pattern = re.compile(r'alarm: +(.*)(?:[\s\S]*?os: +(.*)\n)?')

        for file in [f for f in os.listdir(path) if 'sample' not in f]:
            with open(path + file, 'r') as f:
                for alarm in re.findall(pattern, f.read()):
                    # By default all alarms are enabled in netdata
                    # When we list alarms, alarms which have been configured by user to be disabled
                    # will show up as disabled only
                    if 'freebsd' in alarm[1] or not alarm[1]:
                        self._alarms[alarm[0].strip()] = {
                            'path': path + file,
                            'enabled': True
                        }

    @private
    async def validate_attrs(self, data):
        verrors = ValidationErrors()

        additional_params = data.get('additional_params')
        if additional_params:
            # Let's be very generic here and introduce very basic validation
            # Expected format is as following
            # [ipv6.icmpneighbor]
            #   history = 86400
            #   enabled = yes
            #
            # While we are here, we will also introduce basic formatting to the file to ensure
            # that we can make it as compliable as possible

            param_str = ''
            for i in additional_params.split('\n'):
                i = i.strip()
                if not i:
                    continue
                if i.startswith('#'):
                    # Let's not validate this
                    if i.replace('#', '').startswith('['):
                        param_str += f'\n\n{i}'
                    else:
                        param_str += f'\n\t{i}'

                    continue

                if i.startswith('[') and not i.endswith(']'):
                    verrors.add(
                        'netdata_update.additional_params',
                        f'Please correct format for {i}. i.e [system.intr]')
                elif not i.startswith('[') and '=' not in i:
                    verrors.add(
                        'netdata_update.additional_params',
                        f'Please correct format for {i}. i.e enabled = yes')

                if i.startswith('['):
                    param_str += f'\n\n{i}'
                else:
                    param_str += f'\n\t{i}'

            data['additional_params'] = param_str + '\n'

        bind_to_ips = data.get('bind')
        if bind_to_ips:
            valid_ips = [
                ip['address']
                for ip in await self.middleware.call('interfaces.ip_in_use')
            ]
            valid_ips.extend(['127.0.0.1', '::1', '0.0.0.0', '::'])

            for bind_ip in bind_to_ips:
                if bind_ip not in valid_ips:
                    verrors.add('netdata_update.bind',
                                f'Invalid {bind_ip} bind IP')
        else:
            verrors.add('netdata_update.bind', 'This field is required')

        update_alarms = data.pop('update_alarms', {})
        valid_alarms = self._alarms
        if update_alarms:
            for alarm in update_alarms:
                if alarm not in valid_alarms:
                    verrors.add('netdata_update.alarms',
                                f'{alarm} not a valid alarm')

            verrors.extend(
                validate_attributes([
                    Dict(key, Bool('enabled', required=True))
                    for key in update_alarms
                ], {'attributes': update_alarms}))

        # Validating streaming metrics now
        stream_mode = data.get('stream_mode')
        if stream_mode == 'SLAVE':
            for key in ('api_key', 'destination'):
                if not data.get(key):
                    verrors.add(
                        f'netdata_update.{key}',
                        f'{key} is required with stream mode as SLAVE')

            destinations = data.get('destination')
            if destinations:
                ip_addr = IpAddress()
                port = Port()
                for dest in destinations:
                    ip = dest.split(':')[0]
                    try:
                        ip_addr(ip)
                    except ValueError as e:
                        verrors.add('netdata_update.destination', str(e))
                    else:
                        if ':' in dest:
                            try:
                                port(dest.split(':')[1])
                            except ValueError as e:
                                verrors.add('netdata_update.destination',
                                            f'Not a valid port: {e}')
        elif stream_mode == 'MASTER':
            for key in ('allow_from', 'api_key'):
                if not data.get(key):
                    verrors.add(
                        f'netdata_update.{key}',
                        f'{key} is required with stream mode as MASTER')

        verrors.check()

        data['alarms'].update(update_alarms)

        return data

    @accepts(
        Dict('netdata_update',
             Str('additional_params'),
             Dict('alarms', additional_attrs=True),
             List('allow_from', items=[Str('pattern')]),
             Str('api_key', validators=[UUID()]),
             List('bind', validators=[Unique()], items=[Str('bind_ip')]),
             Int('port', validators=[Port()]),
             List('destination',
                  validators=[Unique()],
                  items=[Str('destination')]),
             Int('history'),
             Int('http_port_listen_backlog'),
             Str('stream_mode', enum=['NONE', 'MASTER', 'SLAVE']),
             Int('update_every'),
             update=True))
    async def do_update(self, data):
        """
        Update Netdata Service Configuration

        `alarms` is a dictionary where user specifies a key,value pair with key being alarm name and value is a
        dictionary which is of the schema "{'enabled': True}" indicating should the alarm be enabled or not.
        Middlewared supports interacting (changing) alarms in /usr/local/etc/netdata/health.d/ directory.

        `allow_from` is used when netdata service is expected to be used as a master. It defaults to "['*']". This field
        expects a list of Netdata patterns which Netdata will use to set restrictions on incoming connections from slave
        accordingly.

        `api_key` is a valid UUID which can be generated in command line by typing uuidgen.

        `destination` is used when netdata service is expected to be used as a slave. Destination is a list of potential
        destinations to which netdata should stream metrics. We expect the format to be IP:PORT ( port is optional ).
        The first working destination is used by Netdata service.

        `history` is the number of entries the netdata daemon will by default keep in memory for each chart dimension.
        It defaults to 86400.

        .. examples(websocket)::

          Update Netdata Service Configuration

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "netdata.update",
                "params": [{
                    "history": 80000,
                    "alarms": {
                        "used_swap": {"enabled": true},
                        "ram_in_swap": {"enabled": true}
                    }
                }]
            }
        """
        old = await self.config()
        new = old.copy()
        # We separate alarms we have in db and the ones user supplies
        new['update_alarms'] = data.pop('alarms', {})
        new.update(data)

        new = await self.validate_attrs(new)

        # If port value is updated, we don't restart nginx, that has to be done manually
        await self._update_service(old, new)

        return await self.config()
Exemple #12
0
    async def validate_attrs(self, data):
        verrors = ValidationErrors()

        additional_params = data.get('additional_params')
        if additional_params:
            # Let's be very generic here and introduce very basic validation
            # Expected format is as following
            # [ipv6.icmpneighbor]
            #   history = 86400
            #   enabled = yes
            #
            # While we are here, we will also introduce basic formatting to the file to ensure
            # that we can make it as compliable as possible

            param_str = ''
            for i in additional_params.split('\n'):
                i = i.strip()
                if not i:
                    continue
                if i.startswith('#'):
                    # Let's not validate this
                    if i.replace('#', '').startswith('['):
                        param_str += f'\n\n{i}'
                    else:
                        param_str += f'\n\t{i}'

                    continue

                if i.startswith('[') and not i.endswith(']'):
                    verrors.add(
                        'netdata_update.additional_params',
                        f'Please correct format for {i}. i.e [system.intr]')
                elif not i.startswith('[') and '=' not in i:
                    verrors.add(
                        'netdata_update.additional_params',
                        f'Please correct format for {i}. i.e enabled = yes')

                if i.startswith('['):
                    param_str += f'\n\n{i}'
                else:
                    param_str += f'\n\t{i}'

            data['additional_params'] = param_str + '\n'

        bind_to_ips = data.get('bind')
        if bind_to_ips:
            valid_ips = [
                ip['address']
                for ip in await self.middleware.call('interfaces.ip_in_use')
            ]
            valid_ips.extend(['127.0.0.1', '::1', '0.0.0.0', '::'])

            for bind_ip in bind_to_ips:
                if bind_ip not in valid_ips:
                    verrors.add('netdata_update.bind',
                                f'Invalid {bind_ip} bind IP')
        else:
            verrors.add('netdata_update.bind', 'This field is required')

        update_alarms = data.pop('update_alarms', {})
        valid_alarms = self._alarms
        if update_alarms:
            for alarm in update_alarms:
                if alarm not in valid_alarms:
                    verrors.add('netdata_update.alarms',
                                f'{alarm} not a valid alarm')

            verrors.extend(
                validate_attributes([
                    Dict(key, Bool('enabled', required=True))
                    for key in update_alarms
                ], {'attributes': update_alarms}))

        # Validating streaming metrics now
        stream_mode = data.get('stream_mode')
        if stream_mode == 'SLAVE':
            for key in ('api_key', 'destination'):
                if not data.get(key):
                    verrors.add(
                        f'netdata_update.{key}',
                        f'{key} is required with stream mode as SLAVE')

            destinations = data.get('destination')
            if destinations:
                ip_addr = IpAddress()
                port = Port()
                for dest in destinations:
                    ip = dest.split(':')[0]
                    try:
                        ip_addr(ip)
                    except ValueError as e:
                        verrors.add('netdata_update.destination', str(e))
                    else:
                        if ':' in dest:
                            try:
                                port(dest.split(':')[1])
                            except ValueError as e:
                                verrors.add('netdata_update.destination',
                                            f'Not a valid port: {e}')
        elif stream_mode == 'MASTER':
            for key in ('allow_from', 'api_key'):
                if not data.get(key):
                    verrors.add(
                        f'netdata_update.{key}',
                        f'{key} is required with stream mode as MASTER')

        verrors.check()

        data['alarms'].update(update_alarms)

        return data
Exemple #13
0
class TFTPService(SystemServiceService):
    class Config:
        service = "tftp"
        datastore_prefix = "tftp_"
        cli_namespace = "service.tftp"

    ENTRY = Dict(
        'tftp_entry',
        Bool('newfiles', required=True),
        Str('directory', required=True),
        Str('host', validators=[IpAddress()], required=True),
        Int('port', validators=[Port()], required=True),
        Str('options', required=True),
        Str('umask', required=True, validators=[Match(r'^[0-7]{3}$')]),
        Str('username', required=True),
        Int('id', required=True),
    )

    @accepts()
    @returns(Dict('tftp_host_choices', additional_attrs=True))
    async def host_choices(self):
        """
        Return host choices for TFTP service to use.
        """
        return {
            d['address']: d['address']
            for d in await self.middleware.call('interface.ip_in_use', {
                'static': True,
                'any': True
            })
        }

    @accepts(
        Patch(
            'tftp_entry',
            'tftp_update',
            ('rm', {
                'name': 'id'
            }),
            ('replace', Dir('directory')),
            ('attr', {
                'update': True
            }),
        ))
    async def do_update(self, data):
        """
        Update TFTP Service Configuration.

        `newfiles` when set enables network devices to send files to the system.

        `username` sets the user account which will be used to access `directory`. It should be ensured `username`
        has access to `directory`.
        """
        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        if new["directory"]:
            await check_path_resides_within_volume(verrors, self.middleware,
                                                   "tftp_update.directory",
                                                   new["directory"])

        if new['host'] not in await self.host_choices():
            verrors.add('tftp_update.host',
                        'Please provide a valid ip address')

        if verrors:
            raise verrors

        await self._update_service(old, new)

        return await self.config()