Exemple #1
0
    def test_read_write_configuration(self, read_file, write_file, chown,
                                      chmod):
        sample_path = Mock()
        sample_owner = Mock()
        sample_group = Mock()
        sample_codec = MagicMock()
        sample_requires_root = Mock()

        manager = ConfigurationManager(sample_path,
                                       sample_owner,
                                       sample_group,
                                       sample_codec,
                                       requires_root=sample_requires_root)

        manager.parse_configuration()
        read_file.assert_called_with(sample_path, codec=sample_codec)

        with patch.object(manager,
                          'parse_configuration',
                          return_value={
                              'key1': 'v1',
                              'key2': 'v2'
                          }):
            self.assertEqual('v1', manager.get_value('key1'))
            self.assertEqual(None, manager.get_value('key3'))

        sample_contents = Mock()
        manager.save_configuration(sample_contents)
        write_file.assert_called_with(sample_path,
                                      sample_contents,
                                      as_root=sample_requires_root)

        chown.assert_called_with(sample_path,
                                 sample_owner,
                                 sample_group,
                                 as_root=sample_requires_root)
        chmod.assert_called_with(sample_path,
                                 FileMode.ADD_READ_ALL,
                                 as_root=sample_requires_root)

        sample_options = Mock()
        with patch.object(manager, 'save_configuration') as save_config:
            manager.render_configuration(sample_options)
            save_config.assert_called_once_with(
                sample_codec.serialize.return_value)
            sample_codec.serialize.assert_called_once_with(sample_options)

        with patch('trove.guestagent.common.configuration.'
                   'ConfigurationOverrideStrategy') as mock_strategy:
            manager.set_override_strategy(mock_strategy)
            manager._current_revision = 3
            manager.save_configuration(sample_contents)
            mock_strategy.remove_last.assert_called_once_with(
                manager._current_revision + 1)
            write_file.assert_called_with(sample_path,
                                          sample_contents,
                                          as_root=sample_requires_root)
Exemple #2
0
    def test_read_write_configuration(self, read_file, write_file, chown,
                                      chmod):
        sample_path = Mock()
        sample_owner = Mock()
        sample_group = Mock()
        sample_codec = MagicMock()
        sample_requires_root = Mock()
        sample_strategy = MagicMock()
        sample_strategy.configure = Mock()
        sample_strategy.parse_updates = Mock(return_value={})

        manager = ConfigurationManager(sample_path,
                                       sample_owner,
                                       sample_group,
                                       sample_codec,
                                       requires_root=sample_requires_root,
                                       override_strategy=sample_strategy)

        manager.parse_configuration()
        read_file.assert_called_with(sample_path,
                                     codec=sample_codec,
                                     as_root=sample_requires_root)

        with patch.object(manager,
                          'parse_configuration',
                          return_value={
                              'key1': 'v1',
                              'key2': 'v2'
                          }):
            self.assertEqual('v1', manager.get_value('key1'))
            self.assertIsNone(manager.get_value('key3'))

        sample_contents = Mock()
        manager.save_configuration(sample_contents)
        write_file.assert_called_with(sample_path,
                                      sample_contents,
                                      as_root=sample_requires_root)

        chown.assert_called_with(sample_path,
                                 sample_owner,
                                 sample_group,
                                 as_root=sample_requires_root)
        chmod.assert_called_with(sample_path,
                                 FileMode.ADD_READ_ALL,
                                 as_root=sample_requires_root)

        sample_data = {}
        manager.apply_system_override(sample_data)
        manager.apply_user_override(sample_data)
        manager.apply_system_override(sample_data, change_id='sys1')
        manager.apply_user_override(sample_data, change_id='usr1')
        sample_strategy.apply.has_calls([
            call(manager.SYSTEM_GROUP, manager.DEFAULT_CHANGE_ID, sample_data),
            call(manager.USER_GROUP, manager.DEFAULT_CHANGE_ID, sample_data),
            call(manager.SYSTEM_GROUP, 'sys1', sample_data),
            call(manager.USER_GROUP, 'usr1', sample_data)
        ])
    def test_read_write_configuration(self, read_file, write_file,
                                      chown, chmod):
        sample_path = Mock()
        sample_owner = Mock()
        sample_group = Mock()
        sample_codec = MagicMock()
        sample_requires_root = Mock()
        sample_strategy = MagicMock()
        sample_strategy.configure = Mock()
        sample_strategy.parse_updates = Mock(return_value={})

        manager = ConfigurationManager(
            sample_path, sample_owner, sample_group, sample_codec,
            requires_root=sample_requires_root,
            override_strategy=sample_strategy)

        manager.parse_configuration()
        read_file.assert_called_with(sample_path, codec=sample_codec,
                                     as_root=sample_requires_root)

        with patch.object(manager, 'parse_configuration',
                          return_value={'key1': 'v1', 'key2': 'v2'}):
            self.assertEqual('v1', manager.get_value('key1'))
            self.assertIsNone(manager.get_value('key3'))

        sample_contents = Mock()
        manager.save_configuration(sample_contents)
        write_file.assert_called_with(
            sample_path, sample_contents, as_root=sample_requires_root)

        chown.assert_called_with(sample_path, sample_owner, sample_group,
                                 as_root=sample_requires_root)
        chmod.assert_called_with(
            sample_path, FileMode.ADD_READ_ALL, as_root=sample_requires_root)

        sample_data = {}
        manager.apply_system_override(sample_data)
        manager.apply_user_override(sample_data)
        manager.apply_system_override(sample_data, change_id='sys1')
        manager.apply_user_override(sample_data, change_id='usr1')
        manager.apply_system_override(sample_data, change_id='sys2',
                                      pre_user=True)
        sample_strategy.apply.has_calls([
            call(manager.SYSTEM_POST_USER_GROUP,
                 manager.DEFAULT_CHANGE_ID, sample_data),
            call(manager.USER_GROUP, manager.DEFAULT_CHANGE_ID, sample_data),
            call(manager.SYSTEM_POST_USER_GROUP,
                 'sys1', sample_data),
            call(manager.USER_GROUP, 'usr1', sample_data),
            call(manager.SYSTEM_PRE_USER_GROUP,
                 'sys2', sample_data),
        ])
Exemple #4
0
    def test_read_write_configuration(self, read_file, write_file, chown, chmod):
        sample_path = Mock()
        sample_owner = Mock()
        sample_group = Mock()
        sample_codec = MagicMock()
        sample_requires_root = Mock()
        sample_strategy = MagicMock()
        sample_strategy.configure = Mock()
        sample_strategy.parse_updates = Mock(return_value={})

        manager = ConfigurationManager(
            sample_path,
            sample_owner,
            sample_group,
            sample_codec,
            requires_root=sample_requires_root,
            override_strategy=sample_strategy,
        )

        manager.parse_configuration()
        read_file.assert_called_with(sample_path, codec=sample_codec)

        with patch.object(manager, "parse_configuration", return_value={"key1": "v1", "key2": "v2"}):
            self.assertEqual("v1", manager.get_value("key1"))
            self.assertIsNone(manager.get_value("key3"))

        sample_contents = Mock()
        manager.save_configuration(sample_contents)
        write_file.assert_called_with(sample_path, sample_contents, as_root=sample_requires_root)

        chown.assert_called_with(sample_path, sample_owner, sample_group, as_root=sample_requires_root)
        chmod.assert_called_with(sample_path, FileMode.ADD_READ_ALL, as_root=sample_requires_root)

        sample_data = {}
        manager.apply_system_override(sample_data)
        manager.apply_user_override(sample_data)
        manager.apply_system_override(sample_data, change_id="sys1")
        manager.apply_user_override(sample_data, change_id="usr1")
        sample_strategy.apply.has_calls(
            [
                call(manager.SYSTEM_GROUP, manager.DEFAULT_CHANGE_ID, sample_data),
                call(manager.USER_GROUP, manager.DEFAULT_CHANGE_ID, sample_data),
                call(manager.SYSTEM_GROUP, "sys1", sample_data),
                call(manager.USER_GROUP, "usr1", sample_data),
            ]
        )
Exemple #5
0
    def test_read_write_configuration(self, read_file, write_file,
                                      chown, chmod):
        sample_path = Mock()
        sample_owner = Mock()
        sample_group = Mock()
        sample_codec = MagicMock()
        sample_requires_root = Mock()

        manager = ConfigurationManager(
            sample_path, sample_owner, sample_group, sample_codec,
            requires_root=sample_requires_root)

        manager.parse_configuration()
        read_file.assert_called_with(sample_path, codec=sample_codec)

        with patch.object(manager, 'parse_configuration',
                          return_value={'key1': 'v1', 'key2': 'v2'}):
            self.assertEqual('v1', manager.get_value('key1'))
            self.assertEqual(None, manager.get_value('key3'))

        sample_contents = Mock()
        manager.save_configuration(sample_contents)
        write_file.assert_called_with(
            sample_path, sample_contents, as_root=sample_requires_root)

        chown.assert_called_with(sample_path, sample_owner, sample_group,
                                 as_root=sample_requires_root)
        chmod.assert_called_with(
            sample_path, FileMode.ADD_READ_ALL, as_root=sample_requires_root)

        sample_options = Mock()
        with patch.object(manager, 'save_configuration') as save_config:
            manager.render_configuration(sample_options)
            save_config.assert_called_once_with(
                sample_codec.serialize.return_value)
            sample_codec.serialize.assert_called_once_with(sample_options)

        with patch('trove.guestagent.common.configuration.'
                   'ConfigurationOverrideStrategy') as mock_strategy:
            manager.set_override_strategy(mock_strategy)
            manager._current_revision = 3
            manager.save_configuration(sample_contents)
            mock_strategy.remove_last.assert_called_once_with(
                manager._current_revision + 1)
            write_file.assert_called_with(
                sample_path, sample_contents, as_root=sample_requires_root)
class PgSqlApp(object):

    OS = operating_system.get_os()
    LISTEN_ADDRESSES = ['*']  # Listen on all available IP (v4/v6) interfaces.
    ADMIN_USER = '******'  # Trove's administrative user.

    def __init__(self):
        super(PgSqlApp, self).__init__()

        self._current_admin_user = None
        self.status = PgSqlAppStatus(self.pgsql_extra_bin_dir)

        revision_dir = guestagent_utils.build_file_path(
            os.path.dirname(self.pgsql_config),
            ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR)
        self.configuration_manager = ConfigurationManager(
            self.pgsql_config, self.pgsql_owner, self.pgsql_owner,
            PropertiesCodec(
                delimiter='=',
                string_mappings={'on': True, 'off': False, "''": None}),
            requires_root=True,
            override_strategy=OneFileOverrideStrategy(revision_dir))

    @property
    def service_candidates(self):
        return ['postgresql']

    @property
    def pgsql_owner(self):
        return 'postgres'

    @property
    def default_superuser_name(self):
        return "postgres"

    @property
    def pgsql_base_data_dir(self):
        return '/var/lib/postgresql/'

    @property
    def pgsql_pid_file(self):
        return guestagent_utils.build_file_path(self.pgsql_run_dir,
                                                'postgresql.pid')

    @property
    def pgsql_run_dir(self):
        return '/var/run/postgresql/'

    @property
    def pgsql_extra_bin_dir(self):
        """Redhat and Ubuntu packages for PgSql do not place 'extra' important
        binaries in /usr/bin, but rather in a directory like /usr/pgsql-9.4/bin
        in the case of PostgreSQL 9.4 for RHEL/CentOS
        """
        return {
            operating_system.DEBIAN: '/usr/lib/postgresql/%s/bin/',
            operating_system.REDHAT: '/usr/pgsql-%s/bin/',
            operating_system.SUSE: '/usr/bin/'
        }[self.OS] % self.pg_version[1]

    @property
    def pgsql_config(self):
        return self._find_config_file('postgresql.conf')

    @property
    def pgsql_hba_config(self):
        return self._find_config_file('pg_hba.conf')

    @property
    def pgsql_ident_config(self):
        return self._find_config_file('pg_ident.conf')

    def _find_config_file(self, name_pattern):
        version_base = guestagent_utils.build_file_path(self.pgsql_config_dir,
                                                        self.pg_version[1])
        return sorted(operating_system.list_files_in_directory(
            version_base, recursive=True, pattern=name_pattern,
            as_root=True), key=len)[0]

    @property
    def pgsql_config_dir(self):
        return {
            operating_system.DEBIAN: '/etc/postgresql/',
            operating_system.REDHAT: '/var/lib/postgresql/',
            operating_system.SUSE: '/var/lib/pgsql/'
        }[self.OS]

    @property
    def pgsql_log_dir(self):
        return "/var/log/postgresql/"

    def build_admin(self):
        return PgSqlAdmin(self.get_current_admin_user())

    def update_overrides(self, context, overrides, remove=False):
        if remove:
            self.configuration_manager.remove_user_override()
        elif overrides:
            self.configuration_manager.apply_user_override(overrides)

    def set_current_admin_user(self, user):
        self._current_admin_user = user

    def get_current_admin_user(self):
        if self._current_admin_user is not None:
            return self._current_admin_user

        if self.status.is_installed:
            return models.PostgreSQLUser(self.ADMIN_USER)

        return models.PostgreSQLUser(self.default_superuser_name)

    def apply_overrides(self, context, overrides):
        self.reload_configuration()

    def reload_configuration(self):
        """Send a signal to the server, causing configuration files to be
        reloaded by all server processes.
        Active queries or connections to the database will not be
        interrupted.

        NOTE: Do not use the 'SET' command as it only affects the current
        session.
        """
        self.build_admin().psql(
            "SELECT pg_reload_conf()")

    def reset_configuration(self, context, configuration):
        """Reset the PgSql configuration to the one given.
        """
        config_contents = configuration['config_contents']
        self.configuration_manager.save_configuration(config_contents)

    def start_db_with_conf_changes(self, context, config_contents):
        """Starts the PgSql instance with a new configuration."""
        if self.status.is_running:
            raise RuntimeError(_("The service is still running."))

        self.configuration_manager.save_configuration(config_contents)
        # The configuration template has to be updated with
        # guestagent-controlled settings.
        self.apply_initial_guestagent_configuration()
        self.start_db()

    def apply_initial_guestagent_configuration(self):
        """Update guestagent-controlled configuration properties.
        """
        LOG.debug("Applying initial guestagent configuration.")
        file_locations = {
            'data_directory': self._quote(self.pgsql_data_dir),
            'hba_file': self._quote(self.pgsql_hba_config),
            'ident_file': self._quote(self.pgsql_ident_config),
            'external_pid_file': self._quote(self.pgsql_pid_file),
            'unix_socket_directories': self._quote(self.pgsql_run_dir),
            'listen_addresses': self._quote(','.join(self.LISTEN_ADDRESSES)),
            'port': cfg.get_configuration_property('postgresql_port')}
        self.configuration_manager.apply_system_override(file_locations)
        self._apply_access_rules()

    @staticmethod
    def _quote(value):
        return "'%s'" % value

    def _apply_access_rules(self):
        LOG.debug("Applying database access rules.")

        # Connections to all resources are granted.
        #
        # Local access from administrative users is implicitly trusted.
        #
        # Remote access from the Trove's account is always rejected as
        # it is not needed and could be used by malicious users to hijack the
        # instance.
        #
        # Connections from other accounts always require a double-MD5-hashed
        # password.
        #
        # Make the rules readable only by the Postgres service.
        #
        # NOTE: The order of entries is important.
        # The first failure to authenticate stops the lookup.
        # That is why the 'local' connections validate first.
        # The OrderedDict is necessary to guarantee the iteration order.
        local_admins = ','.join([self.default_superuser_name, self.ADMIN_USER])
        remote_admins = self.ADMIN_USER
        access_rules = OrderedDict(
            [('local', [['all', local_admins, None, 'trust'],
                        ['replication', local_admins, None, 'trust'],
                        ['all', 'all', None, 'md5']]),
             ('host', [['all', local_admins, '127.0.0.1/32', 'trust'],
                       ['all', local_admins, '::1/128', 'trust'],
                       ['all', local_admins, 'localhost', 'trust'],
                       ['all', remote_admins, '0.0.0.0/0', 'reject'],
                       ['all', remote_admins, '::/0', 'reject'],
                       ['all', 'all', '0.0.0.0/0', 'md5'],
                       ['all', 'all', '::/0', 'md5']])
             ])
        operating_system.write_file(self.pgsql_hba_config, access_rules,
                                    PropertiesCodec(
                                        string_mappings={'\t': None}),
                                    as_root=True)
        operating_system.chown(self.pgsql_hba_config,
                               self.pgsql_owner, self.pgsql_owner,
                               as_root=True)
        operating_system.chmod(self.pgsql_hba_config, FileMode.SET_USR_RO,
                               as_root=True)

    def disable_backups(self):
        """Reverse overrides applied by PgBaseBackup strategy"""
        if not self.configuration_manager.has_system_override(
                BACKUP_CFG_OVERRIDE):
            return
        LOG.info("Removing configuration changes for backups")
        self.configuration_manager.remove_system_override(BACKUP_CFG_OVERRIDE)
        self.remove_wal_archive_dir()
        self.restart()

    def enable_backups(self):
        """Apply necessary changes to config to enable WAL-based backups
        if we are using the PgBaseBackup strategy
        """
        LOG.info("Checking if we need to apply changes to WAL config")
        if 'PgBaseBackup' not in self.backup_strategy:
            return
        if self.configuration_manager.has_system_override(BACKUP_CFG_OVERRIDE):
            return

        LOG.info("Applying changes to WAL config for use by base backups")
        arch_cmd = "'test ! -f {wal_arch}/%f && cp %p {wal_arch}/%f'".format(
            wal_arch=self.wal_archive_location
        )
        opts = {
            # FIXME(atomic77) These spaces after the options are needed until
            # DBAAS-949 is fixed
            'wal_level ': 'hot_standby',
            'archive_mode ': 'on',
            'max_wal_senders': 8,
            # 'checkpoint_segments ': 8,
            'wal_keep_segments': 8,
            'archive_command': arch_cmd
        }
        if self.pg_version[1] in ('9.4', '9.5'):
            opts['wal_log_hints'] = 'on'

        self.configuration_manager.apply_system_override(opts,
                                                         BACKUP_CFG_OVERRIDE)
        # self.enable_debugging(level=1)
        self.restart()

    def disable_debugging(self, level=1):
        """Enable debug-level logging in postgres"""
        self.configuration_manager.remove_system_override(DEBUG_MODE_OVERRIDE)

    def enable_debugging(self, level=1):
        """Enable debug-level logging in postgres"""
        opt = {'log_min_messages': 'DEBUG%s' % level}
        self.configuration_manager.apply_system_override(opt,
                                                         DEBUG_MODE_OVERRIDE)

    def install(self, context, packages):
        """Install one or more packages that postgresql needs to run.

        The packages parameter is a string representing the package names that
        should be given to the system's package manager.
        """

        LOG.debug(
            "{guest_id}: Beginning PgSql package installation.".format(
                guest_id=CONF.guest_id
            )
        )
        self.recreate_wal_archive_dir()

        packager = pkg.Package()
        if not packager.pkg_is_installed(packages):
            try:
                LOG.info(
                    _("{guest_id}: Installing ({packages}).").format(
                        guest_id=CONF.guest_id,
                        packages=packages,
                    )
                )
                packager.pkg_install(packages, {}, 1000)
            except (pkg.PkgAdminLockError, pkg.PkgPermissionError,
                    pkg.PkgPackageStateError, pkg.PkgNotFoundError,
                    pkg.PkgTimeout, pkg.PkgScriptletError,
                    pkg.PkgDownloadError, pkg.PkgSignError,
                    pkg.PkgBrokenError):
                LOG.exception(
                    "{guest_id}: There was a package manager error while "
                    "trying to install ({packages}).".format(
                        guest_id=CONF.guest_id,
                        packages=packages,
                    )
                )
                raise
            except Exception:
                LOG.exception(
                    "{guest_id}: The package manager encountered an unknown "
                    "error while trying to install ({packages}).".format(
                        guest_id=CONF.guest_id,
                        packages=packages,
                    )
                )
                raise
            else:
                self.start_db()
                LOG.debug(
                    "{guest_id}: Completed package installation.".format(
                        guest_id=CONF.guest_id,
                    )
                )

    @property
    def pgsql_recovery_config(self):
        return os.path.join(self.pgsql_data_dir, "recovery.conf")

    @property
    def pgsql_data_dir(self):
        return os.path.dirname(self.pg_version[0])

    @property
    def pg_version(self):
        """Find the database version file stored in the data directory.

        :returns: A tuple with the path to the version file
                  (in the root of the data directory) and the version string.
        """
        version_files = operating_system.list_files_in_directory(
            self.pgsql_base_data_dir, recursive=True, pattern='PG_VERSION',
            as_root=True)
        version_file = sorted(version_files, key=len)[0]
        version = operating_system.read_file(version_file, as_root=True)
        return version_file, version.strip()

    def restart(self):
        self.status.restart_db_service(
            self.service_candidates, CONF.state_change_wait_time)

    def start_db(self, enable_on_boot=True, update_db=False):
        self.status.start_db_service(
            self.service_candidates, CONF.state_change_wait_time,
            enable_on_boot=enable_on_boot, update_db=update_db)

    def stop_db(self, do_not_start_on_reboot=False, update_db=False):
        self.status.stop_db_service(
            self.service_candidates, CONF.state_change_wait_time,
            disable_on_boot=do_not_start_on_reboot, update_db=update_db)

    def secure(self, context):
        """Create an administrative user for Trove.
        Force password encryption.
        Also disable the built-in superuser
        """
        password = utils.generate_random_password()

        os_admin_db = models.PostgreSQLSchema(self.ADMIN_USER)
        os_admin = models.PostgreSQLUser(self.ADMIN_USER, password)
        os_admin.databases.append(os_admin_db.serialize())

        postgres = models.PostgreSQLUser(self.default_superuser_name)
        admin = PgSqlAdmin(postgres)
        admin._create_database(context, os_admin_db)
        admin._create_admin_user(context, os_admin,
                                 encrypt_password=True)

        PgSqlAdmin(os_admin).alter_user(context, postgres, None,
                                        'NOSUPERUSER', 'NOLOGIN')

        self.set_current_admin_user(os_admin)

    def pg_current_xlog_location(self):
        """Wrapper for pg_current_xlog_location()
        Cannot be used against a running slave
        """
        r = self.build_admin().query("SELECT pg_current_xlog_location()")
        return r[0][0]

    def pg_last_xlog_replay_location(self):
        """Wrapper for pg_last_xlog_replay_location()
         For use on standby servers
         """
        r = self.build_admin().query("SELECT pg_last_xlog_replay_location()")
        return r[0][0]

    def pg_is_in_recovery(self):
        """Wrapper for pg_is_in_recovery() for detecting a server in
        standby mode
        """
        r = self.build_admin().query("SELECT pg_is_in_recovery()")
        return r[0][0]

    def pg_primary_host(self):
        """There seems to be no way to programmatically determine this
        on a hot standby, so grab what we have written to the recovery
        file
        """
        r = operating_system.read_file(self.pgsql_recovery_config,
                                       as_root=True)
        regexp = re.compile("host=(\d+.\d+.\d+.\d+) ")
        m = regexp.search(r)
        return m.group(1)

    def recreate_wal_archive_dir(self):
        wal_archive_dir = self.wal_archive_location
        operating_system.remove(wal_archive_dir, force=True, recursive=True,
                                as_root=True)
        operating_system.create_directory(wal_archive_dir,
                                          user=self.pgsql_owner,
                                          group=self.pgsql_owner,
                                          force=True, as_root=True)

    def remove_wal_archive_dir(self):
        wal_archive_dir = self.wal_archive_location
        operating_system.remove(wal_archive_dir, force=True, recursive=True,
                                as_root=True)

    def is_root_enabled(self, context):
        """Return True if there is a superuser account enabled.
        """
        results = self.build_admin().query(
            pgsql_query.UserQuery.list_root(),
            timeout=30,
        )

        # There should be only one superuser (Trove's administrative account).
        return len(results) > 1 or (results[0][0] != self.ADMIN_USER)

    def enable_root(self, context, root_password=None):
        """Create a superuser user or reset the superuser password.

        The default PostgreSQL administration account is 'postgres'.
        This account always exists and cannot be removed.
        Its attributes and access can however be altered.

        Clients can connect from the localhost or remotely via TCP/IP:

        Local clients (e.g. psql) can connect from a preset *system* account
        called 'postgres'.
        This system account has no password and is *locked* by default,
        so that it can be used by *local* users only.
        It should *never* be enabled (or it's password set)!!!
        That would just open up a new attack vector on the system account.

        Remote clients should use a build-in *database* account of the same
        name. It's password can be changed using the "ALTER USER" statement.

        Access to this account is disabled by Trove exposed only once the
        superuser access is requested.
        Trove itself creates its own administrative account.

            {"_name": "postgres", "_password": "******"}
        """
        user = self.build_root_user(root_password)
        self.build_admin().alter_user(
            context, user, None, *PgSqlAdmin.ADMIN_OPTIONS)
        return user.serialize()

    def build_root_user(self, password=None):
        return models.PostgreSQLRootUser(password=password)

    def pg_start_backup(self, backup_label):
        r = self.build_admin().query(
            "SELECT pg_start_backup('%s', true)" % backup_label)
        return r[0][0]

    def pg_xlogfile_name(self, start_segment):
        r = self.build_admin().query(
            "SELECT pg_xlogfile_name('%s')" % start_segment)
        return r[0][0]

    def pg_stop_backup(self):
        r = self.build_admin().query("SELECT pg_stop_backup()")
        return r[0][0]

    def disable_root(self, context):
        """Generate a new random password for the public superuser account.
        Do not disable its access rights. Once enabled the account should
        stay that way.
        """
        self.enable_root(context)

    def enable_root_with_password(self, context, root_password=None):
        return self.enable_root(context, root_password)

    @property
    def wal_archive_location(self):
        return cfg.get_configuration_property('wal_archive_location')

    @property
    def backup_strategy(self):
        return cfg.get_configuration_property('backup_strategy')

    def save_files_pre_upgrade(self, mount_point):
        LOG.debug('Saving files pre-upgrade.')
        mnt_etc_dir = os.path.join(mount_point, 'save_etc')
        if self.OS != operating_system.REDHAT:
            # No need to store the config files away for Redhat because
            # they are already stored in the data volume.
            operating_system.remove(mnt_etc_dir, force=True, as_root=True)
            operating_system.copy(self.pgsql_config_dir, mnt_etc_dir,
                                  preserve=True, recursive=True, as_root=True)
        return {'save_etc': mnt_etc_dir}

    def restore_files_post_upgrade(self, upgrade_info):
        LOG.debug('Restoring files post-upgrade.')
        if self.OS != operating_system.REDHAT:
            # No need to restore the config files for Redhat because
            # they are already in the data volume.
            operating_system.copy('%s/.' % upgrade_info['save_etc'],
                                  self.pgsql_config_dir,
                                  preserve=True, recursive=True,
                                  force=True, as_root=True)
            operating_system.remove(upgrade_info['save_etc'], force=True,
                                    as_root=True)
Exemple #7
0
class MongoDBApp(object):
    """Prepares DBaaS on a Guest container."""
    def __init__(self):
        self.state_change_wait_time = CONF.state_change_wait_time

        revision_dir = guestagent_utils.build_file_path(
            os.path.dirname(CONFIG_FILE),
            ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR)
        self.configuration_manager = ConfigurationManager(
            CONFIG_FILE,
            system.MONGO_USER,
            system.MONGO_USER,
            SafeYamlCodec(default_flow_style=False),
            requires_root=True,
            override_strategy=OneFileOverrideStrategy(revision_dir))

        self.is_query_router = False
        self.is_cluster_member = False
        self.status = MongoDBAppStatus()

    def install_if_needed(self, packages):
        """Prepare the guest machine with a MongoDB installation."""
        LOG.info(_("Preparing Guest as MongoDB."))
        if not system.PACKAGER.pkg_is_installed(packages):
            LOG.debug("Installing packages: %s." % str(packages))
            system.PACKAGER.pkg_install(packages, {}, system.TIME_OUT)
        LOG.info(_("Finished installing MongoDB server."))

    def _get_service_candidates(self):
        if self.is_query_router:
            return system.MONGOS_SERVICE_CANDIDATES
        return system.MONGOD_SERVICE_CANDIDATES

    def stop_db(self, update_db=False, do_not_start_on_reboot=False):
        self.status.stop_db_service(self._get_service_candidates(),
                                    self.state_change_wait_time,
                                    disable_on_boot=do_not_start_on_reboot,
                                    update_db=update_db)

    def restart(self):
        self.status.restart_db_service(self._get_service_candidates(),
                                       self.state_change_wait_time)

    def start_db(self, update_db=False):
        self.status.start_db_service(self._get_service_candidates(),
                                     self.state_change_wait_time,
                                     enable_on_boot=True,
                                     update_db=update_db)

    def update_overrides(self, context, overrides, remove=False):
        if overrides:
            self.configuration_manager.apply_user_override(overrides)

    def remove_overrides(self):
        self.configuration_manager.remove_user_override()

    def start_db_with_conf_changes(self, config_contents):
        LOG.info(_('Starting MongoDB with configuration changes.'))
        if self.status.is_running:
            format = 'Cannot start_db_with_conf_changes because status is %s.'
            LOG.debug(format, self.status)
            raise RuntimeError(format % self.status)
        LOG.info(_("Initiating config."))
        self.configuration_manager.save_configuration(config_contents)
        # The configuration template has to be updated with
        # guestagent-controlled settings.
        self.apply_initial_guestagent_configuration(
            None, mount_point=system.MONGODB_MOUNT_POINT)
        self.start_db(True)

    def apply_initial_guestagent_configuration(self,
                                               cluster_config,
                                               mount_point=None):
        LOG.debug("Applying initial configuration.")

        # Mongodb init scripts assume the PID-file path is writable by the
        # database service.
        # See: https://jira.mongodb.org/browse/SERVER-20075
        self._initialize_writable_run_dir()

        self.configuration_manager.apply_system_override({
            'processManagement.fork':
            False,
            'processManagement.pidFilePath':
            system.MONGO_PID_FILE,
            'systemLog.destination':
            'file',
            'systemLog.path':
            system.MONGO_LOG_FILE,
            'systemLog.logAppend':
            True
        })

        if mount_point:
            self.configuration_manager.apply_system_override(
                {'storage.dbPath': mount_point})

        if cluster_config is not None:
            self._configure_as_cluster_instance(cluster_config)
        else:
            self._configure_network(MONGODB_PORT)

    def _initialize_writable_run_dir(self):
        """Create a writable directory for Mongodb's runtime data
        (e.g. PID-file).
        """
        mongodb_run_dir = os.path.dirname(system.MONGO_PID_FILE)
        LOG.debug("Initializing a runtime directory: %s" % mongodb_run_dir)
        operating_system.create_directory(mongodb_run_dir,
                                          user=system.MONGO_USER,
                                          group=system.MONGO_USER,
                                          force=True,
                                          as_root=True)

    def _configure_as_cluster_instance(self, cluster_config):
        """Configure this guest as a cluster instance and return its
        new status.
        """
        if cluster_config['instance_type'] == "query_router":
            self._configure_as_query_router()
        elif cluster_config["instance_type"] == "config_server":
            self._configure_as_config_server()
        elif cluster_config["instance_type"] == "member":
            self._configure_as_cluster_member(
                cluster_config['replica_set_name'])
        else:
            LOG.error(
                _("Bad cluster configuration; instance type "
                  "given as %s.") % cluster_config['instance_type'])
            return ds_instance.ServiceStatuses.FAILED

        if 'key' in cluster_config:
            self._configure_cluster_security(cluster_config['key'])

    def _configure_as_query_router(self):
        LOG.info(_("Configuring instance as a cluster query router."))
        self.is_query_router = True

        # FIXME(pmalik): We should really have a separate configuration
        # template for the 'mongos' process.
        # Remove all storage configurations from the template.
        # They apply only to 'mongod' processes.
        # Already applied overrides will be integrated into the base file and
        # their current groups removed.
        config = guestagent_utils.expand_dict(
            self.configuration_manager.parse_configuration())
        if 'storage' in config:
            LOG.debug("Removing 'storage' directives from the configuration "
                      "template.")
            del config['storage']
            self.configuration_manager.save_configuration(
                guestagent_utils.flatten_dict(config))

        # Apply 'mongos' configuration.
        self._configure_network(MONGODB_PORT)
        self.configuration_manager.apply_system_override(
            {'sharding.configDB': ''}, CNF_CLUSTER)

    def _configure_as_config_server(self):
        LOG.info(_("Configuring instance as a cluster config server."))
        self._configure_network(CONFIGSVR_PORT)
        self.configuration_manager.apply_system_override(
            {'sharding.clusterRole': 'configsvr'}, CNF_CLUSTER)

    def _configure_as_cluster_member(self, replica_set_name):
        LOG.info(_("Configuring instance as a cluster member."))
        self.is_cluster_member = True
        self._configure_network(MONGODB_PORT)
        # we don't want these thinking they are in a replica set yet
        # as that would prevent us from creating the admin user,
        # so start mongo before updating the config.
        # mongo will be started by the cluster taskmanager
        self.start_db()
        self.configuration_manager.apply_system_override(
            {'replication.replSetName': replica_set_name}, CNF_CLUSTER)

    def _configure_cluster_security(self, key_value):
        """Force cluster key-file-based authentication.

        This will enabled RBAC.
        """
        # Store the cluster member authentication key.
        self.store_key(key_value)

        self.configuration_manager.apply_system_override(
            {
                'security.clusterAuthMode': 'keyFile',
                'security.keyFile': self.get_key_file()
            }, CNF_CLUSTER)

    def _configure_network(self, port=None):
        """Make the service accessible at a given (or default if not) port.
        """
        instance_ip = netutils.get_my_ipv4()
        bind_interfaces_string = ','.join([instance_ip, '127.0.0.1'])
        options = {'net.bindIp': bind_interfaces_string}
        if port is not None:
            guestagent_utils.update_dict({'net.port': port}, options)

        self.configuration_manager.apply_system_override(options)
        self.status.set_host(instance_ip, port=port)

    def clear_storage(self):
        mount_point = "/var/lib/mongodb/*"
        LOG.debug("Clearing storage at %s." % mount_point)
        try:
            operating_system.remove(mount_point, force=True, as_root=True)
        except exception.ProcessExecutionError:
            LOG.exception(_("Error clearing storage."))

    def _has_config_db(self):
        value_string = self.configuration_manager.get_value('sharding',
                                                            {}).get('configDB')

        return value_string is not None

    # FIXME(pmalik): This method should really be called 'set_config_servers'.
    # The current name suggests it adds more config servers, but it
    # rather replaces the existing ones.
    def add_config_servers(self, config_server_hosts):
        """Set config servers on a query router (mongos) instance.
        """
        config_servers_string = ','.join(
            ['%s:%s' % (host, CONFIGSVR_PORT) for host in config_server_hosts])
        LOG.info(_("Setting config servers: %s") % config_servers_string)
        self.configuration_manager.apply_system_override(
            {'sharding.configDB': config_servers_string}, CNF_CLUSTER)
        self.start_db(True)

    def add_shard(self, replica_set_name, replica_set_member):
        """
        This method is used by query router (mongos) instances.
        """
        url = "%(rs)s/%(host)s:%(port)s"\
              % {'rs': replica_set_name,
                 'host': replica_set_member,
                 'port': MONGODB_PORT}
        MongoDBAdmin().add_shard(url)

    def add_members(self, members):
        """
        This method is used by a replica-set member instance.
        """
        def check_initiate_status():
            """
            This method is used to verify replica-set status.
            """
            status = MongoDBAdmin().get_repl_status()

            if ((status["ok"] == 1)
                    and (status["members"][0]["stateStr"] == "PRIMARY")
                    and (status["myState"] == 1)):
                return True
            else:
                return False

        def check_rs_status():
            """
            This method is used to verify replica-set status.
            """
            status = MongoDBAdmin().get_repl_status()
            primary_count = 0

            if status["ok"] != 1:
                return False
            if len(status["members"]) != (len(members) + 1):
                return False
            for rs_member in status["members"]:
                if rs_member["state"] not in [1, 2, 7]:
                    return False
                if rs_member["health"] != 1:
                    return False
                if rs_member["state"] == 1:
                    primary_count += 1

            return primary_count == 1

        MongoDBAdmin().rs_initiate()
        # TODO(ramashri) see if hardcoded values can be removed
        utils.poll_until(check_initiate_status,
                         sleep_time=30,
                         time_out=CONF.mongodb.add_members_timeout)

        # add replica-set members
        MongoDBAdmin().rs_add_members(members)
        # TODO(ramashri) see if hardcoded values can be removed
        utils.poll_until(check_rs_status,
                         sleep_time=10,
                         time_out=CONF.mongodb.add_members_timeout)

    def _set_localhost_auth_bypass(self, enabled):
        """When active, the localhost exception allows connections from the
        localhost interface to create the first user on the admin database.
        The exception applies only when there are no users created in the
        MongoDB instance.
        """
        self.configuration_manager.apply_system_override(
            {'setParameter': {
                'enableLocalhostAuthBypass': enabled
            }})

    def list_all_dbs(self):
        return MongoDBAdmin().list_database_names()

    def db_data_size(self, db_name):
        schema = models.MongoDBSchema(db_name)
        return MongoDBAdmin().db_stats(schema.serialize())['dataSize']

    def admin_cmd_auth_params(self):
        return MongoDBAdmin().cmd_admin_auth_params

    def get_key_file(self):
        return system.MONGO_KEY_FILE

    def get_key(self):
        return operating_system.read_file(system.MONGO_KEY_FILE,
                                          as_root=True).rstrip()

    def store_key(self, key):
        """Store the cluster key."""
        LOG.debug('Storing key for MongoDB cluster.')
        operating_system.write_file(system.MONGO_KEY_FILE, key, as_root=True)
        operating_system.chmod(system.MONGO_KEY_FILE,
                               operating_system.FileMode.SET_USR_RO,
                               as_root=True)
        operating_system.chown(system.MONGO_KEY_FILE,
                               system.MONGO_USER,
                               system.MONGO_USER,
                               as_root=True)

    def store_admin_password(self, password):
        LOG.debug('Storing admin password.')
        creds = MongoDBCredentials(username=system.MONGO_ADMIN_NAME,
                                   password=password)
        creds.write(system.MONGO_ADMIN_CREDS_FILE)
        return creds

    def create_admin_user(self, password):
        """Create the admin user while the localhost exception is active."""
        LOG.debug('Creating the admin user.')
        creds = self.store_admin_password(password)
        user = models.MongoDBUser(name='admin.%s' % creds.username,
                                  password=creds.password)
        user.roles = system.MONGO_ADMIN_ROLES
        # the driver engine is already cached, but we need to change it it
        with MongoDBClient(None, host='localhost',
                           port=MONGODB_PORT) as client:
            MongoDBAdmin().create_validated_user(user, client=client)
        # now revert to the normal engine
        self.status.set_host(host=netutils.get_my_ipv4(), port=MONGODB_PORT)
        LOG.debug('Created admin user.')

    def secure(self):
        """Create the Trove admin user.

        The service should not be running at this point.
        This will enable role-based access control (RBAC) by default.
        """
        if self.status.is_running:
            raise RuntimeError(
                _("Cannot secure the instance. "
                  "The service is still running."))

        try:
            self.configuration_manager.apply_system_override(
                {'security.authorization': 'enabled'})
            self._set_localhost_auth_bypass(True)
            self.start_db(update_db=False)
            password = utils.generate_random_password()
            self.create_admin_user(password)
            LOG.debug("MongoDB secure complete.")
        finally:
            self._set_localhost_auth_bypass(False)
            self.stop_db()

    def get_configuration_property(self, name, default=None):
        """Return the value of a MongoDB configuration property.
        """
        return self.configuration_manager.get_value(name, default)

    def prep_primary(self):
        # Prepare the primary member of a replica set.
        password = utils.generate_random_password()
        self.create_admin_user(password)
        self.restart()

    @property
    def replica_set_name(self):
        return MongoDBAdmin().get_repl_status()['set']

    @property
    def admin_password(self):
        creds = MongoDBCredentials()
        creds.read(system.MONGO_ADMIN_CREDS_FILE)
        return creds.password

    def is_shard_active(self, replica_set_name):
        shards = MongoDBAdmin().list_active_shards()
        if replica_set_name in [shard['_id'] for shard in shards]:
            LOG.debug('Replica set %s is active.' % replica_set_name)
            return True
        else:
            LOG.debug('Replica set %s is not active.' % replica_set_name)
            return False
Exemple #8
0
class MongoDBApp(object):
    """Prepares DBaaS on a Guest container."""

    @classmethod
    def _init_overrides_dir(cls):
        """Initialize a directory for configuration overrides.
        """
        revision_dir = guestagent_utils.build_file_path(
            os.path.dirname(CONFIG_FILE), ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR
        )

        if not os.path.exists(revision_dir):
            operating_system.create_directory(
                revision_dir, user=system.MONGO_USER, group=system.MONGO_USER, force=True, as_root=True
            )

        return revision_dir

    def __init__(self):
        self.state_change_wait_time = CONF.state_change_wait_time

        revision_dir = self._init_overrides_dir()
        self.configuration_manager = ConfigurationManager(
            CONFIG_FILE,
            system.MONGO_USER,
            system.MONGO_USER,
            SafeYamlCodec(default_flow_style=False),
            requires_root=True,
            override_strategy=OneFileOverrideStrategy(revision_dir),
        )

        self.is_query_router = False
        self.is_cluster_member = False
        self.status = MongoDBAppStatus()

    def install_if_needed(self, packages):
        """Prepare the guest machine with a MongoDB installation."""
        LOG.info(_("Preparing Guest as MongoDB."))
        if not system.PACKAGER.pkg_is_installed(packages):
            LOG.debug("Installing packages: %s." % str(packages))
            system.PACKAGER.pkg_install(packages, {}, system.TIME_OUT)
        LOG.info(_("Finished installing MongoDB server."))

    def _get_service(self):
        if self.is_query_router:
            return operating_system.service_discovery(system.MONGOS_SERVICE_CANDIDATES)
        else:
            return operating_system.service_discovery(system.MONGOD_SERVICE_CANDIDATES)

    def _enable_db_on_boot(self):
        LOG.info(_("Enabling MongoDB on boot."))
        try:
            mongo_service = self._get_service()
            utils.execute_with_timeout(mongo_service["cmd_enable"], shell=True)
        except KeyError:
            raise RuntimeError(_("MongoDB service is not discovered."))

    def _disable_db_on_boot(self):
        LOG.info(_("Disabling MongoDB on boot."))
        try:
            mongo_service = self._get_service()
            utils.execute_with_timeout(mongo_service["cmd_disable"], shell=True)
        except KeyError:
            raise RuntimeError("MongoDB service is not discovered.")

    def stop_db(self, update_db=False, do_not_start_on_reboot=False):
        LOG.info(_("Stopping MongoDB."))
        if do_not_start_on_reboot:
            self._disable_db_on_boot()

        try:
            mongo_service = self._get_service()
            # TODO(ramashri) see if hardcoded values can be removed
            utils.execute_with_timeout(mongo_service["cmd_stop"], shell=True, timeout=100)
        except KeyError:
            raise RuntimeError(_("MongoDB service is not discovered."))

        if not self.status.wait_for_real_status_to_change_to(
            ds_instance.ServiceStatuses.SHUTDOWN, self.state_change_wait_time, update_db
        ):
            LOG.error(_("Could not stop MongoDB."))
            self.status.end_install_or_restart()
            raise RuntimeError(_("Could not stop MongoDB"))

    def restart(self):
        LOG.info(_("Restarting MongoDB."))
        try:
            self.status.begin_restart()
            self.stop_db()
            self.start_db()
        finally:
            self.status.end_install_or_restart()

    def start_db(self, update_db=False):
        LOG.info(_("Starting MongoDB."))

        self._enable_db_on_boot()

        try:
            mongo_service = self._get_service()
            utils.execute_with_timeout(mongo_service["cmd_start"], shell=True)
        except ProcessExecutionError:
            pass
        except KeyError:
            raise RuntimeError("MongoDB service is not discovered.")
        self.wait_for_start(update_db=update_db)

    def wait_for_start(self, update_db=False):
        LOG.debug("Waiting for MongoDB to start.")
        if not self.status.wait_for_real_status_to_change_to(
            ds_instance.ServiceStatuses.RUNNING, self.state_change_wait_time, update_db
        ):
            LOG.error(_("Start up of MongoDB failed."))
            # If it won't start, but won't die either, kill it by hand so we
            # don't let a rouge process wander around.
            try:
                out, err = utils.execute_with_timeout(system.FIND_PID, shell=True)
                pid = "".join(out.split(" ")[1:2])
                utils.execute_with_timeout(system.MONGODB_KILL % pid, shell=True)
            except exception.ProcessExecutionError:
                LOG.exception(_("Error killing MongoDB start command."))
                # There's nothing more we can do...
            self.status.end_install_or_restart()
            raise RuntimeError("Could not start MongoDB.")
        LOG.debug("MongoDB started successfully.")

    def complete_install_or_restart(self):
        self.status.end_install_or_restart()

    def update_overrides(self, context, overrides, remove=False):
        if overrides:
            self.configuration_manager.apply_user_override(overrides)

    def remove_overrides(self):
        self.configuration_manager.remove_user_override()

    def start_db_with_conf_changes(self, config_contents):
        LOG.info(_("Starting MongoDB with configuration changes."))
        if self.status.is_running:
            format = "Cannot start_db_with_conf_changes because status is %s."
            LOG.debug(format, self.status)
            raise RuntimeError(format % self.status)
        LOG.info(_("Initiating config."))
        self.configuration_manager.save_configuration(config_contents)
        # The configuration template has to be updated with
        # guestagent-controlled settings.
        self.apply_initial_guestagent_configuration(None, mount_point=system.MONGODB_MOUNT_POINT)
        self.start_db(True)

    def reset_configuration(self, configuration):
        LOG.info(_("Resetting configuration."))
        config_contents = configuration["config_contents"]
        self.configuration_manager.save_configuration(config_contents)

    def apply_initial_guestagent_configuration(self, cluster_config, mount_point=None):
        LOG.debug("Applying initial configuration.")

        # Mongodb init scripts assume the PID-file path is writable by the
        # database service.
        # See: https://jira.mongodb.org/browse/SERVER-20075
        self._initialize_writable_run_dir()

        # todo mvandijk: enable authorization.
        # 'security.authorization': True
        self.configuration_manager.apply_system_override(
            {
                "processManagement.fork": False,
                "processManagement.pidFilePath": system.MONGO_PID_FILE,
                "systemLog.destination": "file",
                "systemLog.path": system.MONGO_LOG_FILE,
                "systemLog.logAppend": True,
            }
        )

        if mount_point:
            self.configuration_manager.apply_system_override({"storage.dbPath": mount_point})

        if cluster_config is not None:
            self._configure_as_cluster_instance(cluster_config)
        else:
            self._configure_network(MONGODB_PORT)

    def _initialize_writable_run_dir(self):
        """Create a writable directory for Mongodb's runtime data
        (e.g. PID-file).
        """
        mongodb_run_dir = os.path.dirname(system.MONGO_PID_FILE)
        LOG.debug("Initializing a runtime directory: %s" % mongodb_run_dir)
        operating_system.create_directory(
            mongodb_run_dir, user=system.MONGO_USER, group=system.MONGO_USER, force=True, as_root=True
        )

    def _configure_as_cluster_instance(self, cluster_config):
        """Configure this guest as a cluster instance and return its
        new status.
        """
        if cluster_config["instance_type"] == "query_router":
            self._configure_as_query_router()
        elif cluster_config["instance_type"] == "config_server":
            self._configure_as_config_server()
        elif cluster_config["instance_type"] == "member":
            self._configure_as_cluster_member(cluster_config["replica_set_name"])
        else:
            LOG.error(_("Bad cluster configuration; instance type " "given as %s.") % cluster_config["instance_type"])
            return ds_instance.ServiceStatuses.FAILED

        if "key" in cluster_config:
            self._configure_cluster_security(cluster_config["key"])

    def _configure_as_query_router(self):
        LOG.info(_("Configuring instance as a cluster query router."))
        self.is_query_router = True

        # Write the 'mongos' upstart script.
        # FIXME(pmalik): The control script should really be written in the
        # elements.
        # The guestagent will choose the right daemon ('mongod' or 'mongos')
        # based on the 'cluster_config' values.
        upstart_contents = system.MONGOS_UPSTART_CONTENTS.format(config_file_placeholder=CONFIG_FILE)
        operating_system.write_file(system.MONGOS_UPSTART, upstart_contents, as_root=True)

        # FIXME(pmalik): We should really have a separate configuration
        # template for the 'mongos' process.
        # Remove all storage configurations from the template.
        # They apply only to 'mongod' processes.
        # Already applied overrides will be integrated into the base file and
        # their current groups removed.
        config = guestagent_utils.expand_dict(self.configuration_manager.parse_configuration())
        if "storage" in config:
            LOG.debug("Removing 'storage' directives from the configuration " "template.")
            del config["storage"]
            self.configuration_manager.save_configuration(guestagent_utils.flatten_dict(config))

        # Apply 'mongos' configuration.
        self._configure_network(MONGODB_PORT)
        self.configuration_manager.apply_system_override({"sharding.configDB": ""}, CNF_CLUSTER)

    def _configure_as_config_server(self):
        LOG.info(_("Configuring instance as a cluster config server."))
        self._configure_network(CONFIGSVR_PORT)
        self.configuration_manager.apply_system_override({"sharding.clusterRole": "configsvr"}, CNF_CLUSTER)

    def _configure_as_cluster_member(self, replica_set_name):
        LOG.info(_("Configuring instance as a cluster member."))
        self.is_cluster_member = True
        self._configure_network(MONGODB_PORT)
        # we don't want these thinking they are in a replica set yet
        # as that would prevent us from creating the admin user,
        # so start mongo before updating the config.
        # mongo will be started by the cluster taskmanager
        self.start_db()
        self.configuration_manager.apply_system_override({"replication.replSetName": replica_set_name}, CNF_CLUSTER)

    def _configure_cluster_security(self, key_value):
        """Force cluster key-file-based authentication.
        """
        # Store the cluster member authentication key.
        self.store_key(key_value)

        # TODO(mvandijk): enable cluster security once Trove features are in
        # self.configuration_manager.apply_system_override(
        #     {'security.clusterAuthMode': 'keyFile',
        #      'security.keyFile': self.get_key_file()}, CNF_CLUSTER)

    def _configure_network(self, port=None):
        """Make the service accessible at a given (or default if not) port.
        """
        instance_ip = netutils.get_my_ipv4()
        bind_interfaces_string = ",".join([instance_ip, "127.0.0.1"])
        options = {"net.bindIp": bind_interfaces_string}
        if port is not None:
            guestagent_utils.update_dict({"net.port": port}, options)

        self.configuration_manager.apply_system_override(options)
        self.status.set_host(instance_ip, port=port)

    def clear_storage(self):
        mount_point = "/var/lib/mongodb/*"
        LOG.debug("Clearing storage at %s." % mount_point)
        try:
            operating_system.remove(mount_point, force=True, as_root=True)
        except exception.ProcessExecutionError:
            LOG.exception(_("Error clearing storage."))

    def _has_config_db(self):
        value_string = self.configuration_manager.get_value("sharding", {}).get("configDB")

        return value_string is not None

    # FIXME(pmalik): This method should really be called 'set_config_servers'.
    # The current name suggests it adds more config servers, but it
    # rather replaces the existing ones.
    def add_config_servers(self, config_server_hosts):
        """Set config servers on a query router (mongos) instance.
        """
        config_servers_string = ",".join(["%s:27019" % host for host in config_server_hosts])
        LOG.info(_("Setting config servers: %s") % config_servers_string)
        self.configuration_manager.apply_system_override({"sharding.configDB": config_servers_string}, CNF_CLUSTER)
        self.start_db(True)

    def add_shard(self, replica_set_name, replica_set_member):
        """
        This method is used by query router (mongos) instances.
        """
        url = "%(rs)s/%(host)s:%(port)s" % {"rs": replica_set_name, "host": replica_set_member, "port": MONGODB_PORT}
        MongoDBAdmin().add_shard(url)

    def add_members(self, members):
        """
        This method is used by a replica-set member instance.
        """

        def check_initiate_status():
            """
            This method is used to verify replica-set status.
            """
            status = MongoDBAdmin().get_repl_status()

            if (status["ok"] == 1) and (status["members"][0]["stateStr"] == "PRIMARY") and (status["myState"] == 1):
                return True
            else:
                return False

        def check_rs_status():
            """
            This method is used to verify replica-set status.
            """
            status = MongoDBAdmin().get_repl_status()
            primary_count = 0

            if status["ok"] != 1:
                return False
            if len(status["members"]) != (len(members) + 1):
                return False
            for rs_member in status["members"]:
                if rs_member["state"] not in [1, 2, 7]:
                    return False
                if rs_member["health"] != 1:
                    return False
                if rs_member["state"] == 1:
                    primary_count += 1

            return primary_count == 1

        MongoDBAdmin().rs_initiate()
        # TODO(ramashri) see if hardcoded values can be removed
        utils.poll_until(check_initiate_status, sleep_time=30, time_out=100)

        # add replica-set members
        MongoDBAdmin().rs_add_members(members)
        # TODO(ramashri) see if hardcoded values can be removed
        utils.poll_until(check_rs_status, sleep_time=10, time_out=100)

    def _set_localhost_auth_bypass(self, enabled):
        """When active, the localhost exception allows connections from the
        localhost interface to create the first user on the admin database.
        The exception applies only when there are no users created in the
        MongoDB instance.
        """
        self.configuration_manager.apply_system_override({"setParameter": {"enableLocalhostAuthBypass": enabled}})

    def list_all_dbs(self):
        return MongoDBAdmin().list_database_names()

    def db_data_size(self, db_name):
        schema = models.MongoDBSchema(db_name)
        return MongoDBAdmin().db_stats(schema.serialize())["dataSize"]

    def admin_cmd_auth_params(self):
        return MongoDBAdmin().cmd_admin_auth_params

    def get_key_file(self):
        return system.MONGO_KEY_FILE

    def get_key(self):
        return operating_system.read_file(system.MONGO_KEY_FILE, as_root=True).rstrip()

    def store_key(self, key):
        """Store the cluster key."""
        LOG.debug("Storing key for MongoDB cluster.")
        operating_system.write_file(system.MONGO_KEY_FILE, key, as_root=True)
        operating_system.chmod(system.MONGO_KEY_FILE, operating_system.FileMode.SET_USR_RO, as_root=True)
        operating_system.chown(system.MONGO_KEY_FILE, system.MONGO_USER, system.MONGO_USER, as_root=True)

    def store_admin_password(self, password):
        LOG.debug("Storing admin password.")
        creds = MongoDBCredentials(username=system.MONGO_ADMIN_NAME, password=password)
        creds.write(system.MONGO_ADMIN_CREDS_FILE)
        return creds

    def create_admin_user(self, password):
        """Create the admin user while the localhost exception is active."""
        LOG.debug("Creating the admin user.")
        creds = self.store_admin_password(password)
        user = models.MongoDBUser(name="admin.%s" % creds.username, password=creds.password)
        user.roles = system.MONGO_ADMIN_ROLES
        # the driver engine is already cached, but we need to change it it
        with MongoDBClient(None, host="localhost", port=MONGODB_PORT) as client:
            MongoDBAdmin().create_user(user, client=client)
        # now revert to the normal engine
        self.status.set_host(host=netutils.get_my_ipv4(), port=MONGODB_PORT)
        LOG.debug("Created admin user.")

    def secure(self):
        """Create the Trove admin user.

        The service should not be running at this point.
        """
        if self.status.is_running:
            raise RuntimeError(_("Cannot secure the instance. " "The service is still running."))

        try:
            self._set_localhost_auth_bypass(True)
            self.start_db(update_db=False)
            password = utils.generate_random_password()
            self.create_admin_user(password)
            LOG.debug("MongoDB secure complete.")
        finally:
            self._set_localhost_auth_bypass(False)
            self.stop_db()

    def get_configuration_property(self, name, default=None):
        """Return the value of a MongoDB configuration property.
        """
        return self.configuration_manager.get_value(name, default)

    def prep_primary(self):
        # Prepare the primary member of a replica set.
        password = utils.generate_random_password()
        self.create_admin_user(password)
        self.restart()

    @property
    def replica_set_name(self):
        return MongoDBAdmin().get_repl_status()["set"]

    @property
    def admin_password(self):
        creds = MongoDBCredentials()
        creds.read(system.MONGO_ADMIN_CREDS_FILE)
        return creds.password

    def is_shard_active(self, replica_set_name):
        shards = MongoDBAdmin().list_active_shards()
        if replica_set_name in [shard["_id"] for shard in shards]:
            LOG.debug("Replica set %s is active." % replica_set_name)
            return True
        else:
            LOG.debug("Replica set %s is not active." % replica_set_name)
            return False
Exemple #9
0
class MongoDBApp(object):
    """Prepares DBaaS on a Guest container."""
    @classmethod
    def _init_overrides_dir(cls):
        """Initialize a directory for configuration overrides.
        """
        revision_dir = guestagent_utils.build_file_path(
            os.path.dirname(system.MONGO_USER),
            ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR)

        if not os.path.exists(revision_dir):
            operating_system.create_directory(revision_dir,
                                              user=system.MONGO_USER,
                                              group=system.MONGO_USER,
                                              force=True,
                                              as_root=True)

        return revision_dir

    def __init__(self):
        self.state_change_wait_time = CONF.state_change_wait_time

        revision_dir = self._init_overrides_dir()
        self.configuration_manager = ConfigurationManager(
            CONFIG_FILE,
            system.MONGO_USER,
            system.MONGO_USER,
            SafeYamlCodec(default_flow_style=False),
            requires_root=True,
            override_strategy=OneFileOverrideStrategy(revision_dir))

        self.is_query_router = False
        self.status = MongoDBAppStatus()

    def install_if_needed(self, packages):
        """Prepare the guest machine with a MongoDB installation."""
        LOG.info(_("Preparing Guest as MongoDB."))
        if not system.PACKAGER.pkg_is_installed(packages):
            LOG.debug("Installing packages: %s." % str(packages))
            system.PACKAGER.pkg_install(packages, {}, system.TIME_OUT)
        LOG.info(_("Finished installing MongoDB server."))

    def _get_service(self):
        if self.is_query_router:
            return (operating_system.service_discovery(
                system.MONGOS_SERVICE_CANDIDATES))
        else:
            return (operating_system.service_discovery(
                system.MONGOD_SERVICE_CANDIDATES))

    def _enable_db_on_boot(self):
        LOG.info(_("Enabling MongoDB on boot."))
        try:
            mongo_service = self._get_service()
            utils.execute_with_timeout(mongo_service['cmd_enable'], shell=True)
        except KeyError:
            raise RuntimeError(_("MongoDB service is not discovered."))

    def _disable_db_on_boot(self):
        LOG.info(_("Disabling MongoDB on boot."))
        try:
            mongo_service = self._get_service()
            utils.execute_with_timeout(mongo_service['cmd_disable'],
                                       shell=True)
        except KeyError:
            raise RuntimeError("MongoDB service is not discovered.")

    def stop_db(self, update_db=False, do_not_start_on_reboot=False):
        LOG.info(_("Stopping MongoDB."))
        if do_not_start_on_reboot:
            self._disable_db_on_boot()

        try:
            mongo_service = self._get_service()
            # TODO(ramashri) see if hardcoded values can be removed
            utils.execute_with_timeout(mongo_service['cmd_stop'],
                                       shell=True,
                                       timeout=100)
        except KeyError:
            raise RuntimeError(_("MongoDB service is not discovered."))

        if not self.status.wait_for_real_status_to_change_to(
                ds_instance.ServiceStatuses.SHUTDOWN,
                self.state_change_wait_time, update_db):
            LOG.error(_("Could not stop MongoDB."))
            self.status.end_install_or_restart()
            raise RuntimeError(_("Could not stop MongoDB"))

    def restart(self):
        LOG.info(_("Restarting MongoDB."))
        try:
            self.status.begin_restart()
            self.stop_db()
            self.start_db()
        finally:
            self.status.end_install_or_restart()

    def start_db(self, update_db=False):
        LOG.info(_("Starting MongoDB."))

        self._enable_db_on_boot()

        try:
            mongo_service = self._get_service()
            utils.execute_with_timeout(mongo_service['cmd_start'], shell=True)
        except ProcessExecutionError:
            pass
        except KeyError:
            raise RuntimeError("MongoDB service is not discovered.")
        self.wait_for_start(update_db=update_db)

    def wait_for_start(self, update_db=False):
        LOG.debug('Waiting for MongoDB to start.')
        if not self.status.wait_for_real_status_to_change_to(
                ds_instance.ServiceStatuses.RUNNING,
                self.state_change_wait_time, update_db):
            LOG.error(_("Start up of MongoDB failed."))
            # If it won't start, but won't die either, kill it by hand so we
            # don't let a rouge process wander around.
            try:
                out, err = utils.execute_with_timeout(system.FIND_PID,
                                                      shell=True)
                pid = "".join(out.split(" ")[1:2])
                utils.execute_with_timeout(system.MONGODB_KILL % pid,
                                           shell=True)
            except exception.ProcessExecutionError:
                LOG.exception(_("Error killing MongoDB start command."))
                # There's nothing more we can do...
            self.status.end_install_or_restart()
            raise RuntimeError("Could not start MongoDB.")
        LOG.debug('MongoDB started successfully.')

    def update_overrides(self, context, overrides, remove=False):
        if overrides:
            self.configuration_manager.apply_user_override(overrides)

    def remove_overrides(self):
        self.configuration_manager.remove_user_override()

    def start_db_with_conf_changes(self, config_contents):
        LOG.info(_('Starting MongoDB with configuration changes.'))
        if self.status.is_running:
            format = 'Cannot start_db_with_conf_changes because status is %s.'
            LOG.debug(format, self.status)
            raise RuntimeError(format % self.status)
        LOG.info(_("Initiating config."))
        self.configuration_manager.save_configuration(config_contents)
        # The configuration template has to be updated with
        # guestagent-controlled settings.
        self.apply_initial_guestagent_configuration(
            None, mount_point=system.MONGODB_MOUNT_POINT)
        self.start_db(True)

    def reset_configuration(self, configuration):
        LOG.info(_("Resetting configuration."))
        config_contents = configuration['config_contents']
        self.configuration_manager.save_configuration(config_contents)

    def apply_initial_guestagent_configuration(self,
                                               cluster_config,
                                               mount_point=None):
        LOG.debug("Applying initial configuration.")

        # todo mvandijk: enable authorization.
        # 'security.authorization': True
        self.configuration_manager.apply_system_override({
            'processManagement.fork':
            False,
            'processManagement.pidFilePath':
            system.MONGO_PID_FILE,
            'systemLog.destination':
            'file',
            'systemLog.path':
            system.MONGO_LOG_FILE,
            'systemLog.logAppend':
            True
        })

        if mount_point:
            self.configuration_manager.apply_system_override(
                {'storage.dbPath': mount_point})

        if cluster_config is not None:
            self._configure_as_cluster_instance(cluster_config)
        else:
            self._configure_network(MONGODB_PORT)

    def _configure_as_cluster_instance(self, cluster_config):
        """Configure this guest as a cluster instance and return its
        new status.
        """
        if cluster_config['instance_type'] == "query_router":
            self._configure_as_query_router()
        elif cluster_config["instance_type"] == "config_server":
            self._configure_as_config_server()
        elif cluster_config["instance_type"] == "member":
            self._configure_as_cluster_member(
                cluster_config['replica_set_name'])
        else:
            LOG.error(
                _("Bad cluster configuration; instance type "
                  "given as %s.") % cluster_config['instance_type'])
            return ds_instance.ServiceStatuses.FAILED

        if 'key' in cluster_config:
            self._configure_cluster_security(cluster_config['key'])

    def _configure_as_query_router(self):
        LOG.info(_("Configuring instance as a cluster query router."))
        self.is_query_router = True

        # Write the 'mongos' upstart script.
        # FIXME(pmalik): The control script should really be written in the
        # elements.
        # The guestagent will choose the right daemon ('mongod' or 'mongos')
        # based on the 'cluster_config' values.
        upstart_contents = (system.MONGOS_UPSTART_CONTENTS.format(
            config_file_placeholder=CONFIG_FILE))
        operating_system.write_file(system.MONGOS_UPSTART,
                                    upstart_contents,
                                    as_root=True)

        # FIXME(pmalik): We should really have a separate configuration
        # template for the 'mongos' process.
        # Remove all storage configurations from the template.
        # They apply only to 'mongod' processes.
        # Already applied overrides will be integrated into the base file and
        # their current groups removed.
        config = guestagent_utils.expand_dict(
            self.configuration_manager.parse_configuration())
        if 'storage' in config:
            LOG.debug("Removing 'storage' directives from the configuration "
                      "template.")
            del config['storage']
            self.configuration_manager.save_configuration(
                guestagent_utils.flatten_dict(config))

        # Apply 'mongos' configuration.
        self._configure_network(MONGODB_PORT)
        self.configuration_manager.apply_system_override(
            {'sharding.configDB': ''}, CNF_CLUSTER)

    def _configure_as_config_server(self):
        LOG.info(_("Configuring instance as a cluster config server."))
        self._configure_network(CONFIGSVR_PORT)
        self.configuration_manager.apply_system_override(
            {'sharding.clusterRole': 'configsvr'}, CNF_CLUSTER)

    def _configure_as_cluster_member(self, replica_set_name):
        LOG.info(_("Configuring instance as a cluster member."))
        self._configure_network(MONGODB_PORT)
        self.configuration_manager.apply_system_override(
            {'replication.replSetName': replica_set_name}, CNF_CLUSTER)

    def _configure_cluster_security(self, key_value):
        """Force cluster key-file-based authentication.
        """
        # Store the cluster member authentication key.
        self.store_key(key_value)

        self.configuration_manager.apply_system_override(
            {
                'security.clusterAuthMode': 'keyFile',
                'security.keyFile': self.get_key_file()
            }, CNF_CLUSTER)

    def _configure_network(self, port=None):
        """Make the service accessible at a given (or default if not) port.
        """
        instance_ip = netutils.get_my_ipv4()
        bind_interfaces_string = ','.join([instance_ip, '127.0.0.1'])
        options = {'net.bindIp': bind_interfaces_string}
        if port is not None:
            guestagent_utils.update_dict({'net.port': port}, options)

        self.configuration_manager.apply_system_override(options)
        self.status.set_host(instance_ip, port=port)

    def clear_storage(self):
        mount_point = "/var/lib/mongodb/*"
        LOG.debug("Clearing storage at %s." % mount_point)
        try:
            operating_system.remove(mount_point, force=True, as_root=True)
        except exception.ProcessExecutionError:
            LOG.exception(_("Error clearing storage."))

    def _has_config_db(self):
        value_string = self.configuration_manager.get_value('sharding',
                                                            {}).get('configDB')

        return value_string is not None

    # FIXME(pmalik): This method should really be called 'set_config_servers'.
    # The current name suggests it adds more config servers, but it
    # rather replaces the existing ones.
    def add_config_servers(self, config_server_hosts):
        """Set config servers on a query router (mongos) instance.
        """
        config_servers_string = ','.join(
            ['%s:27019' % host for host in config_server_hosts])
        LOG.info(_("Setting config servers: %s") % config_servers_string)
        self.configuration_manager.apply_system_override(
            {'sharding.configDB': config_servers_string}, CNF_CLUSTER)
        self.start_db(True)

    def add_shard(self, replica_set_name, replica_set_member):
        """
        This method is used by query router (mongos) instances.
        """
        url = "%(rs)s/%(host)s:%(port)s"\
              % {'rs': replica_set_name,
                 'host': replica_set_member,
                 'port': MONGODB_PORT}
        MongoDBAdmin().add_shard(url)

    def add_members(self, members):
        """
        This method is used by a replica-set member instance.
        """
        def check_initiate_status():
            """
            This method is used to verify replica-set status.
            """
            status = MongoDBAdmin().get_repl_status()

            if ((status["ok"] == 1)
                    and (status["members"][0]["stateStr"] == "PRIMARY")
                    and (status["myState"] == 1)):
                return True
            else:
                return False

        def check_rs_status():
            """
            This method is used to verify replica-set status.
            """
            status = MongoDBAdmin().get_repl_status()
            primary_count = 0

            if status["ok"] != 1:
                return False
            if len(status["members"]) != (len(members) + 1):
                return False
            for rs_member in status["members"]:
                if rs_member["state"] not in [1, 2, 7]:
                    return False
                if rs_member["health"] != 1:
                    return False
                if rs_member["state"] == 1:
                    primary_count += 1

            return primary_count == 1

        # Create the admin user on this member.
        # This is only necessary for setting up the replica set.
        # The query router will handle requests once this set
        # is added as a shard.
        password = utils.generate_random_password()
        self.create_admin_user(password)

        # initiate replica-set
        MongoDBAdmin().rs_initiate()
        # TODO(ramashri) see if hardcoded values can be removed
        utils.poll_until(check_initiate_status, sleep_time=60, time_out=100)

        # add replica-set members
        MongoDBAdmin().rs_add_members(members)
        # TODO(ramashri) see if hardcoded values can be removed
        utils.poll_until(check_rs_status, sleep_time=60, time_out=100)

    def _set_localhost_auth_bypass(self, enabled):
        """When active, the localhost exception allows connections from the
        localhost interface to create the first user on the admin database.
        The exception applies only when there are no users created in the
        MongoDB instance.
        """
        self.configuration_manager.apply_system_override(
            {'setParameter': {
                'enableLocalhostAuthBypass': enabled
            }})

    def list_all_dbs(self):
        return MongoDBAdmin().list_database_names()

    def db_data_size(self, db_name):
        schema = models.MongoDBSchema(db_name)
        return MongoDBAdmin().db_stats(schema.serialize())['dataSize']

    def admin_cmd_auth_params(self):
        return MongoDBAdmin().cmd_admin_auth_params

    def get_key_file(self):
        return system.MONGO_KEY_FILE

    def get_key(self):
        return open(system.MONGO_KEY_FILE).read().rstrip()

    def store_key(self, key):
        """Store the cluster key."""
        LOG.debug('Storing key for MongoDB cluster.')
        operating_system.write_file(system.MONGO_KEY_FILE, key, as_root=True)
        operating_system.chmod(system.MONGO_KEY_FILE,
                               operating_system.FileMode.SET_USR_RO,
                               as_root=True)
        operating_system.chown(system.MONGO_KEY_FILE,
                               system.MONGO_USER,
                               system.MONGO_USER,
                               as_root=True)

    def store_admin_password(self, password):
        LOG.debug('Storing admin password.')
        creds = MongoDBCredentials(username=system.MONGO_ADMIN_NAME,
                                   password=password)
        creds.write(system.MONGO_ADMIN_CREDS_FILE)
        return creds

    def create_admin_user(self, password):
        """Create the admin user while the localhost exception is active."""
        LOG.debug('Creating the admin user.')
        creds = self.store_admin_password(password)
        user = models.MongoDBUser(name='admin.%s' % creds.username,
                                  password=creds.password)
        user.roles = system.MONGO_ADMIN_ROLES
        with MongoDBClient(None) as client:
            MongoDBAdmin().create_user(user, client=client)
        LOG.debug('Created admin user.')

    def secure(self):
        """Create the Trove admin user.

        The service should not be running at this point.
        """
        if self.status.is_running:
            raise RuntimeError(
                _("Cannot secure the instance. "
                  "The service is still running."))

        try:
            self._set_localhost_auth_bypass(True)
            self.start_db(update_db=False)
            password = utils.generate_random_password()
            self.create_admin_user(password)
            LOG.debug("MongoDB secure complete.")
        finally:
            self._set_localhost_auth_bypass(False)
            self.stop_db()

    def get_configuration_property(self, name, default=None):
        """Return the value of a MongoDB configuration property.
        """
        return self.configuration_manager.get_value(name, default)
Exemple #10
0
class RedisApp(object):
    """
    Handles installation and configuration of redis
    on a trove instance.
    """
    def __init__(self, state_change_wait_time=None):
        """
        Sets default status and state_change_wait_time
        """
        if state_change_wait_time:
            self.state_change_wait_time = state_change_wait_time
        else:
            self.state_change_wait_time = CONF.state_change_wait_time

        revision_dir = guestagent_utils.build_file_path(
            os.path.dirname(system.REDIS_CONFIG),
            ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR)
        config_value_mappings = {'yes': True, 'no': False, "''": None}
        self._value_converter = StringConverter(config_value_mappings)
        self.configuration_manager = ConfigurationManager(
            system.REDIS_CONFIG,
            system.REDIS_OWNER,
            system.REDIS_OWNER,
            PropertiesCodec(unpack_singletons=False,
                            string_mappings=config_value_mappings),
            requires_root=True,
            override_strategy=OneFileOverrideStrategy(revision_dir))

        self.admin = self._build_admin_client()
        self.status = RedisAppStatus(self.admin)

    def _build_admin_client(self):
        password = self.get_configuration_property('requirepass')
        socket = self.get_configuration_property('unixsocket')
        cmd = self.get_config_command_name()

        return RedisAdmin(password=password,
                          unix_socket_path=socket,
                          config_cmd=cmd)

    def _refresh_admin_client(self):
        self.admin = self._build_admin_client()
        self.status.set_client(self.admin)
        return self.admin

    def install_if_needed(self, packages):
        """
        Install redis if needed do nothing if it is already installed.
        """
        LOG.info('Preparing Guest as Redis Server.')
        if not packager.pkg_is_installed(packages):
            LOG.info('Installing Redis.')
            self._install_redis(packages)
        LOG.info('Redis installed completely.')

    def _install_redis(self, packages):
        """
        Install the redis server.
        """
        LOG.debug('Installing redis server.')
        LOG.debug("Creating %s.", system.REDIS_CONF_DIR)
        operating_system.create_directory(system.REDIS_CONF_DIR, as_root=True)
        pkg_opts = {}
        packager.pkg_install(packages, pkg_opts, TIME_OUT)
        self.start_db()
        LOG.debug('Finished installing redis server.')

    def stop_db(self, update_db=False, do_not_start_on_reboot=False):
        self.status.stop_db_service(system.SERVICE_CANDIDATES,
                                    self.state_change_wait_time,
                                    disable_on_boot=do_not_start_on_reboot,
                                    update_db=update_db)

    def restart(self):
        self.status.restart_db_service(system.SERVICE_CANDIDATES,
                                       self.state_change_wait_time)

    def update_overrides(self, context, overrides, remove=False):
        if overrides:
            self.configuration_manager.apply_user_override(overrides)
            # apply requirepass at runtime
            # TODO(zhaochao): updating 'requirepass' here will be removed
            # in the future releases, Redis only use enable_root/disable_root
            # to set this parameter.
            if 'requirepass' in overrides:
                self.admin.config_set('requirepass', overrides['requirepass'])
                self._refresh_admin_client()

    def apply_overrides(self, client, overrides):
        """Use the 'CONFIG SET' command to apply configuration at runtime.

        Commands that appear multiple times have values separated by a
        white space. For instance, the following two 'save' directives from the
        configuration file...

            save 900 1
            save 300 10

        ... would be applied in a single command as:

            CONFIG SET save "900 1 300 10"

        Note that the 'CONFIG' command has been renamed to prevent
        users from using it to bypass configuration groups.
        """
        for prop_name, prop_args in overrides.items():
            args_string = self._join_lists(
                self._value_converter.to_strings(prop_args), ' ')
            client.config_set(prop_name, args_string)
            # NOTE(zhaochao): requirepass applied in update_overrides is
            # only kept for back compatibility. Now requirepass is set
            # via enable_root/disable_root, Redis admin client should be
            # refreshed here.
            if prop_name == "requirepass":
                client = self._refresh_admin_client()

    def _join_lists(self, items, sep):
        """Join list items (including items from sub-lists) into a string.
        Non-list inputs are returned unchanged.

        _join_lists('1234', ' ') = "1234"
        _join_lists(['1','2','3','4'], ' ') = "1 2 3 4"
        _join_lists([['1','2'], ['3','4']], ' ') = "1 2 3 4"
        """
        if isinstance(items, list):
            return sep.join(
                [sep.join(e) if isinstance(e, list) else e for e in items])
        return items

    def remove_overrides(self):
        self.configuration_manager.remove_user_override()

    def make_read_only(self, read_only):
        # Redis has no mechanism to make an instance read-only at present
        pass

    def start_db_with_conf_changes(self, config_contents):
        LOG.info('Starting redis with conf changes.')
        if self.status.is_running:
            format = 'Cannot start_db_with_conf_changes because status is %s.'
            LOG.debug(format, self.status)
            raise RuntimeError(format % self.status)
        LOG.info("Initiating config.")
        self.configuration_manager.save_configuration(config_contents)
        # The configuration template has to be updated with
        # guestagent-controlled settings.
        self.apply_initial_guestagent_configuration()
        self.start_db(True)

    def start_db(self, update_db=False):
        self.status.start_db_service(system.SERVICE_CANDIDATES,
                                     self.state_change_wait_time,
                                     enable_on_boot=True,
                                     update_db=update_db)

    def apply_initial_guestagent_configuration(self):
        """Update guestagent-controlled configuration properties.
        """

        # Hide the 'CONFIG' command from end users by mangling its name.
        self.admin.set_config_command_name(self._mangle_config_command_name())

        self.configuration_manager.apply_system_override({
            'daemonize':
            'yes',
            'protected-mode':
            'no',
            'supervised':
            'systemd',
            'pidfile':
            system.REDIS_PID_FILE,
            'logfile':
            system.REDIS_LOG_FILE,
            'dir':
            system.REDIS_DATA_DIR
        })

    def get_config_command_name(self):
        """Get current name of the 'CONFIG' command.
        """
        renamed_cmds = self.configuration_manager.get_value('rename-command')
        if renamed_cmds:
            for name_pair in renamed_cmds:
                if name_pair[0] == 'CONFIG':
                    return name_pair[1]

        return None

    def _mangle_config_command_name(self):
        """Hide the 'CONFIG' command from the clients by renaming it to a
        random string known only to the guestagent.
        Return the mangled name.
        """
        mangled = utils.generate_random_password()
        self._rename_command('CONFIG', mangled)
        return mangled

    def _rename_command(self, old_name, new_name):
        """It is possible to completely disable a command by renaming it
        to an empty string.
        """
        self.configuration_manager.apply_system_override(
            {'rename-command': [old_name, new_name]})

    def get_logfile(self):
        """Specify the log file name. Also the empty string can be used to
        force Redis to log on the standard output.
        Note that if you use standard output for logging but daemonize,
        logs will be sent to /dev/null
        """
        return self.get_configuration_property('logfile')

    def get_db_filename(self):
        """The filename where to dump the DB.
        """
        return self.get_configuration_property('dbfilename')

    def get_working_dir(self):
        """The DB will be written inside this directory,
        with the filename specified the 'dbfilename' configuration directive.
        The Append Only File will also be created inside this directory.
        """
        return self.get_configuration_property('dir')

    def get_persistence_filepath(self):
        """Returns the full path to the persistence file."""
        return guestagent_utils.build_file_path(self.get_working_dir(),
                                                self.get_db_filename())

    def get_port(self):
        """Port for this instance or default if not set."""
        return self.get_configuration_property('port', system.REDIS_PORT)

    def get_auth_password(self):
        """Client authentication password for this instance or None if not set.
        """
        return self.get_configuration_property('requirepass')

    def is_appendonly_enabled(self):
        """True if the Append Only File (AOF) persistence mode is enabled.
        """
        return self.get_configuration_property('appendonly', False)

    def get_append_file_name(self):
        """The name of the append only file (AOF).
        """
        return self.get_configuration_property('appendfilename')

    def is_cluster_enabled(self):
        """Only nodes that are started as cluster nodes can be part of a
        Redis Cluster.
        """
        return self.get_configuration_property('cluster-enabled', False)

    def enable_cluster(self):
        """In order to start a Redis instance as a cluster node enable the
        cluster support
        """
        self.configuration_manager.apply_system_override(
            {'cluster-enabled': 'yes'}, CLUSTER_CFG)

    def get_cluster_config_filename(self):
        """Cluster node configuration file.
        """
        return self.get_configuration_property('cluster-config-file')

    def set_cluster_config_filename(self, name):
        """Make sure that instances running in the same system do not have
        overlapping cluster configuration file names.
        """
        self.configuration_manager.apply_system_override(
            {'cluster-config-file': name}, CLUSTER_CFG)

    def get_cluster_node_timeout(self):
        """Cluster node timeout is the amount of milliseconds a node must be
        unreachable for it to be considered in failure state.
        """
        return self.get_configuration_property('cluster-node-timeout')

    def get_configuration_property(self, name, default=None):
        """Return the value of a Redis configuration property.
        Returns a single value for single-argument properties or
        a list otherwise.
        """
        return utils.unpack_singleton(
            self.configuration_manager.get_value(name, default))

    def cluster_meet(self, ip, port):
        try:
            utils.execute_with_timeout('redis-cli', 'cluster', 'meet', ip,
                                       port)
        except exception.ProcessExecutionError:
            LOG.exception('Error joining node to cluster at %s.', ip)
            raise

    def cluster_addslots(self, first_slot, last_slot):
        try:
            group_size = 200
            # Create list of slots represented in strings
            # eg. ['10', '11', '12', '13']
            slots = list(map(str, range(first_slot, last_slot + 1)))
            while slots:
                cmd = (['redis-cli', 'cluster', 'addslots'] +
                       slots[0:group_size])
                out, err = utils.execute_with_timeout(*cmd,
                                                      run_as_root=True,
                                                      root_helper='sudo')
                if 'OK' not in out:
                    raise RuntimeError(_('Error executing addslots: %s') % out)
                del slots[0:group_size]
        except exception.ProcessExecutionError:
            LOG.exception(
                'Error adding slots %(first_slot)s-%(last_slot)s'
                ' to cluster.', {
                    'first_slot': first_slot,
                    'last_slot': last_slot
                })
            raise

    def _get_node_info(self):
        try:
            out, _ = utils.execute_with_timeout('redis-cli', '--csv',
                                                'cluster', 'nodes')
            return [line.split(' ') for line in out.splitlines()]
        except exception.ProcessExecutionError:
            LOG.exception('Error getting node info.')
            raise

    def _get_node_details(self):
        for node_details in self._get_node_info():
            if 'myself' in node_details[2]:
                return node_details
        raise exception.TroveError(_("Unable to determine node details"))

    def get_node_ip(self):
        """Returns [ip, port] where both values are strings"""
        return self._get_node_details()[1].split(':')

    def get_node_id_for_removal(self):
        node_details = self._get_node_details()
        node_id = node_details[0]
        my_ip = node_details[1].split(':')[0]
        try:
            slots, _ = utils.execute_with_timeout('redis-cli', '--csv',
                                                  'cluster', 'slots')
            return node_id if my_ip not in slots else None
        except exception.ProcessExecutionError:
            LOG.exception('Error validating node to for removal.')
            raise

    def remove_nodes(self, node_ids):
        try:
            for node_id in node_ids:
                utils.execute_with_timeout('redis-cli', 'cluster', 'forget',
                                           node_id)
        except exception.ProcessExecutionError:
            LOG.exception('Error removing node from cluster.')
            raise

    def enable_root(self, password=None):
        if not password:
            password = utils.generate_random_password()
        redis_password = RedisRootUser(password=password)
        try:
            self.configuration_manager.apply_system_override(
                {
                    'requirepass': password,
                    'masterauth': password
                },
                change_id=SYS_OVERRIDES_AUTH)
            self.apply_overrides(self.admin, {
                'requirepass': password,
                'masterauth': password
            })
        except exception.TroveError:
            LOG.exception('Error enabling authentication for instance.')
            raise
        return redis_password.serialize()

    def disable_root(self):
        try:
            self.configuration_manager.remove_system_override(
                change_id=SYS_OVERRIDES_AUTH)
            self.apply_overrides(self.admin, {
                'requirepass': '',
                'masterauth': ''
            })
        except exception.TroveError:
            LOG.exception('Error disabling authentication for instance.')
            raise
Exemple #11
0
class PgSqlApp(object):

    OS = operating_system.get_os()
    LISTEN_ADDRESSES = ['*']  # Listen on all available IP (v4/v6) interfaces.
    ADMIN_USER = '******'  # Trove's administrative user.

    def __init__(self):
        super(PgSqlApp, self).__init__()

        self._current_admin_user = None
        self.status = PgSqlAppStatus(self.pgsql_extra_bin_dir)

        revision_dir = guestagent_utils.build_file_path(
            os.path.dirname(self.pgsql_config),
            ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR)
        self.configuration_manager = ConfigurationManager(
            self.pgsql_config,
            self.pgsql_owner,
            self.pgsql_owner,
            PropertiesCodec(delimiter='=',
                            string_mappings={
                                'on': True,
                                'off': False,
                                "''": None
                            }),
            requires_root=True,
            override_strategy=OneFileOverrideStrategy(revision_dir))

    @property
    def service_candidates(self):
        return ['postgresql']

    @property
    def pgsql_owner(self):
        return 'postgres'

    @property
    def default_superuser_name(self):
        return "postgres"

    @property
    def pgsql_base_data_dir(self):
        return '/var/lib/postgresql/'

    @property
    def pgsql_pid_file(self):
        return guestagent_utils.build_file_path(self.pgsql_run_dir,
                                                'postgresql.pid')

    @property
    def pgsql_run_dir(self):
        return '/var/run/postgresql/'

    @property
    def pgsql_extra_bin_dir(self):
        """Redhat and Ubuntu packages for PgSql do not place 'extra' important
        binaries in /usr/bin, but rather in a directory like /usr/pgsql-9.4/bin
        in the case of PostgreSQL 9.4 for RHEL/CentOS
        """
        return {
            operating_system.DEBIAN: '/usr/lib/postgresql/%s/bin/',
            operating_system.REDHAT: '/usr/pgsql-%s/bin/',
            operating_system.SUSE: '/usr/bin/'
        }[self.OS] % self.pg_version[1]

    @property
    def pgsql_config(self):
        return self._find_config_file('postgresql.conf')

    @property
    def pgsql_hba_config(self):
        return self._find_config_file('pg_hba.conf')

    @property
    def pgsql_ident_config(self):
        return self._find_config_file('pg_ident.conf')

    def _find_config_file(self, name_pattern):
        version_base = guestagent_utils.build_file_path(
            self.pgsql_config_dir, self.pg_version[1])
        return sorted(operating_system.list_files_in_directory(
            version_base, recursive=True, pattern=name_pattern, as_root=True),
                      key=len)[0]

    @property
    def pgsql_config_dir(self):
        return {
            operating_system.DEBIAN: '/etc/postgresql/',
            operating_system.REDHAT: '/var/lib/postgresql/',
            operating_system.SUSE: '/var/lib/pgsql/'
        }[self.OS]

    @property
    def pgsql_log_dir(self):
        return "/var/log/postgresql/"

    def build_admin(self):
        return PgSqlAdmin(self.get_current_admin_user())

    def update_overrides(self, context, overrides, remove=False):
        if remove:
            self.configuration_manager.remove_user_override()
        elif overrides:
            self.configuration_manager.apply_user_override(overrides)

    def set_current_admin_user(self, user):
        self._current_admin_user = user

    def get_current_admin_user(self):
        if self._current_admin_user is not None:
            return self._current_admin_user

        if self.status.is_installed:
            return models.PostgreSQLUser(self.ADMIN_USER)

        return models.PostgreSQLUser(self.default_superuser_name)

    def apply_overrides(self, context, overrides):
        self.reload_configuration()

    def reload_configuration(self):
        """Send a signal to the server, causing configuration files to be
        reloaded by all server processes.
        Active queries or connections to the database will not be
        interrupted.

        NOTE: Do not use the 'SET' command as it only affects the current
        session.
        """
        self.build_admin().psql("SELECT pg_reload_conf()")

    def reset_configuration(self, context, configuration):
        """Reset the PgSql configuration to the one given.
        """
        config_contents = configuration['config_contents']
        self.configuration_manager.save_configuration(config_contents)

    def start_db_with_conf_changes(self, context, config_contents):
        """Starts the PgSql instance with a new configuration."""
        if self.status.is_running:
            raise RuntimeError(_("The service is still running."))

        self.configuration_manager.save_configuration(config_contents)
        # The configuration template has to be updated with
        # guestagent-controlled settings.
        self.apply_initial_guestagent_configuration()
        self.start_db()

    def apply_initial_guestagent_configuration(self):
        """Update guestagent-controlled configuration properties.
        """
        LOG.debug("Applying initial guestagent configuration.")
        file_locations = {
            'data_directory': self._quote(self.pgsql_data_dir),
            'hba_file': self._quote(self.pgsql_hba_config),
            'ident_file': self._quote(self.pgsql_ident_config),
            'external_pid_file': self._quote(self.pgsql_pid_file),
            'unix_socket_directories': self._quote(self.pgsql_run_dir),
            'listen_addresses': self._quote(','.join(self.LISTEN_ADDRESSES)),
            'port': cfg.get_configuration_property('postgresql_port')
        }
        self.configuration_manager.apply_system_override(file_locations)
        self._apply_access_rules()

    @staticmethod
    def _quote(value):
        return "'%s'" % value

    def _apply_access_rules(self):
        LOG.debug("Applying database access rules.")

        # Connections to all resources are granted.
        #
        # Local access from administrative users is implicitly trusted.
        #
        # Remote access from the Trove's account is always rejected as
        # it is not needed and could be used by malicious users to hijack the
        # instance.
        #
        # Connections from other accounts always require a double-MD5-hashed
        # password.
        #
        # Make the rules readable only by the Postgres service.
        #
        # NOTE: The order of entries is important.
        # The first failure to authenticate stops the lookup.
        # That is why the 'local' connections validate first.
        # The OrderedDict is necessary to guarantee the iteration order.
        local_admins = ','.join([self.default_superuser_name, self.ADMIN_USER])
        remote_admins = self.ADMIN_USER
        access_rules = OrderedDict([
            ('local', [['all', local_admins, None, 'trust'],
                       ['replication', local_admins, None, 'trust'],
                       ['all', 'all', None, 'md5']]),
            ('host', [['all', local_admins, '127.0.0.1/32', 'trust'],
                      ['all', local_admins, '::1/128', 'trust'],
                      ['all', local_admins, 'localhost', 'trust'],
                      ['all', remote_admins, '0.0.0.0/0', 'reject'],
                      ['all', remote_admins, '::/0', 'reject'],
                      ['all', 'all', '0.0.0.0/0', 'md5'],
                      ['all', 'all', '::/0', 'md5']])
        ])
        operating_system.write_file(
            self.pgsql_hba_config,
            access_rules,
            PropertiesCodec(string_mappings={'\t': None}),
            as_root=True)
        operating_system.chown(self.pgsql_hba_config,
                               self.pgsql_owner,
                               self.pgsql_owner,
                               as_root=True)
        operating_system.chmod(self.pgsql_hba_config,
                               FileMode.SET_USR_RO,
                               as_root=True)

    def disable_backups(self):
        """Reverse overrides applied by PgBaseBackup strategy"""
        if not self.configuration_manager.has_system_override(
                BACKUP_CFG_OVERRIDE):
            return
        LOG.info("Removing configuration changes for backups")
        self.configuration_manager.remove_system_override(BACKUP_CFG_OVERRIDE)
        self.remove_wal_archive_dir()
        self.restart()

    def enable_backups(self):
        """Apply necessary changes to config to enable WAL-based backups
        if we are using the PgBaseBackup strategy
        """
        LOG.info("Checking if we need to apply changes to WAL config")
        if 'PgBaseBackup' not in self.backup_strategy:
            return
        if self.configuration_manager.has_system_override(BACKUP_CFG_OVERRIDE):
            return

        LOG.info("Applying changes to WAL config for use by base backups")
        wal_arch_loc = self.wal_archive_location
        if not os.path.isdir(wal_arch_loc):
            raise RuntimeError(
                _("Cannot enable backup as WAL dir '%s' does "
                  "not exist.") % wal_arch_loc)
        arch_cmd = "'test ! -f {wal_arch}/%f && cp %p {wal_arch}/%f'".format(
            wal_arch=wal_arch_loc)
        # Only support pg version > 9.6, wal_level set to replica, and
        # remove parameter "checkpoint_segments".
        opts = {
            'wal_level': 'replica',
            'archive_mode': 'on',
            'max_wal_senders': 8,
            'wal_log_hints': 'on',
            'wal_keep_segments': 8,
            'archive_command': arch_cmd
        }

        self.configuration_manager.apply_system_override(
            opts, BACKUP_CFG_OVERRIDE)
        self.restart()

    def disable_debugging(self, level=1):
        """Disable debug-level logging in postgres"""
        self.configuration_manager.remove_system_override(DEBUG_MODE_OVERRIDE)

    def enable_debugging(self, level=1):
        """Enable debug-level logging in postgres"""
        opt = {'log_min_messages': 'DEBUG%s' % level}
        self.configuration_manager.apply_system_override(
            opt, DEBUG_MODE_OVERRIDE)

    def install(self, context, packages):
        """Install one or more packages that postgresql needs to run.

        The packages parameter is a string representing the package names that
        should be given to the system's package manager.
        """

        LOG.debug("{guest_id}: Beginning PgSql package installation.".format(
            guest_id=CONF.guest_id))
        self.recreate_wal_archive_dir()

        packager = pkg.Package()
        if not packager.pkg_is_installed(packages):
            try:
                LOG.info("{guest_id}: Installing ({packages}).".format(
                    guest_id=CONF.guest_id,
                    packages=packages,
                ))
                packager.pkg_install(packages, {}, 1000)
            except (pkg.PkgAdminLockError, pkg.PkgPermissionError,
                    pkg.PkgPackageStateError, pkg.PkgNotFoundError,
                    pkg.PkgTimeout, pkg.PkgScriptletError,
                    pkg.PkgDownloadError, pkg.PkgSignError,
                    pkg.PkgBrokenError):
                LOG.exception(
                    "{guest_id}: There was a package manager error while "
                    "trying to install ({packages}).".format(
                        guest_id=CONF.guest_id,
                        packages=packages,
                    ))
                raise
            except Exception:
                LOG.exception(
                    "{guest_id}: The package manager encountered an unknown "
                    "error while trying to install ({packages}).".format(
                        guest_id=CONF.guest_id,
                        packages=packages,
                    ))
                raise
            else:
                self.start_db()
                LOG.debug("{guest_id}: Completed package installation.".format(
                    guest_id=CONF.guest_id, ))

    @property
    def pgsql_recovery_config(self):
        return os.path.join(self.pgsql_data_dir, "recovery.conf")

    @property
    def pgsql_data_dir(self):
        return os.path.dirname(self.pg_version[0])

    @property
    def pg_version(self):
        """Find the database version file stored in the data directory.

        :returns: A tuple with the path to the version file
                  (in the root of the data directory) and the version string.
        """
        version_files = operating_system.list_files_in_directory(
            self.pgsql_base_data_dir,
            recursive=True,
            pattern='PG_VERSION',
            as_root=True)
        version_file = sorted(version_files, key=len)[0]
        version = operating_system.read_file(version_file, as_root=True)
        return version_file, version.strip()

    def restart(self):
        self.status.restart_db_service(self.service_candidates,
                                       CONF.state_change_wait_time)

    def start_db(self, enable_on_boot=True, update_db=False):
        self.status.start_db_service(self.service_candidates,
                                     CONF.state_change_wait_time,
                                     enable_on_boot=enable_on_boot,
                                     update_db=update_db)

    def stop_db(self, do_not_start_on_reboot=False, update_db=False):
        self.status.stop_db_service(self.service_candidates,
                                    CONF.state_change_wait_time,
                                    disable_on_boot=do_not_start_on_reboot,
                                    update_db=update_db)

    def secure(self, context):
        """Create an administrative user for Trove.
        Force password encryption.
        Also disable the built-in superuser
        """
        password = utils.generate_random_password()

        os_admin_db = models.PostgreSQLSchema(self.ADMIN_USER)
        os_admin = models.PostgreSQLUser(self.ADMIN_USER, password)
        os_admin.databases.append(os_admin_db.serialize())

        postgres = models.PostgreSQLUser(self.default_superuser_name)
        admin = PgSqlAdmin(postgres)
        admin._create_database(context, os_admin_db)
        admin._create_admin_user(context, os_admin, encrypt_password=True)

        PgSqlAdmin(os_admin).alter_user(context, postgres, None, 'NOSUPERUSER',
                                        'NOLOGIN')

        self.set_current_admin_user(os_admin)

    def pg_current_xlog_location(self):
        """Wrapper for pg_current_xlog_location()
        Cannot be used against a running slave
        """
        version = int(self.pg_version[1])
        if version < 10:
            query = "SELECT pg_current_xlog_location()"
        else:
            query = "SELECT pg_current_wal_lsn()"
        r = self.build_admin().query(query)
        return r[0][0]

    def pg_last_xlog_replay_location(self):
        """Wrapper for pg_last_xlog_replay_location()
         For use on standby servers
         """
        version = int(self.pg_version[1])
        if version < 10:
            query = "SELECT pg_last_xlog_replay_location()"
        else:
            query = "SELECT pg_last_wal_replay_lsn()"
        r = self.build_admin().query(query)
        return r[0][0]

    def pg_is_in_recovery(self):
        """Wrapper for pg_is_in_recovery() for detecting a server in
        standby mode
        """
        r = self.build_admin().query("SELECT pg_is_in_recovery()")
        return r[0][0]

    def pg_primary_host(self):
        """There seems to be no way to programmatically determine this
        on a hot standby, so grab what we have written to the recovery
        file
        """
        r = operating_system.read_file(self.pgsql_recovery_config,
                                       as_root=True)
        regexp = re.compile(r"host=(\d+.\d+.\d+.\d+) ")
        m = regexp.search(r)
        return m.group(1)

    def recreate_wal_archive_dir(self):
        wal_archive_dir = self.wal_archive_location
        operating_system.remove(wal_archive_dir,
                                force=True,
                                recursive=True,
                                as_root=True)
        operating_system.create_directory(wal_archive_dir,
                                          user=self.pgsql_owner,
                                          group=self.pgsql_owner,
                                          force=True,
                                          as_root=True)

    def remove_wal_archive_dir(self):
        wal_archive_dir = self.wal_archive_location
        operating_system.remove(wal_archive_dir,
                                force=True,
                                recursive=True,
                                as_root=True)

    def is_root_enabled(self, context):
        """Return True if there is a superuser account enabled.
        """
        results = self.build_admin().query(
            pgsql_query.UserQuery.list_root(),
            timeout=30,
        )

        # There should be only one superuser (Trove's administrative account).
        return len(results) > 1 or (results[0][0] != self.ADMIN_USER)

    def enable_root(self, context, root_password=None):
        """Create a superuser user or reset the superuser password.

        The default PostgreSQL administration account is 'postgres'.
        This account always exists and cannot be removed.
        Its attributes and access can however be altered.

        Clients can connect from the localhost or remotely via TCP/IP:

        Local clients (e.g. psql) can connect from a preset *system* account
        called 'postgres'.
        This system account has no password and is *locked* by default,
        so that it can be used by *local* users only.
        It should *never* be enabled (or its password set)!!!
        That would just open up a new attack vector on the system account.

        Remote clients should use a build-in *database* account of the same
        name. It's password can be changed using the "ALTER USER" statement.

        Access to this account is disabled by Trove exposed only once the
        superuser access is requested.
        Trove itself creates its own administrative account.

            {"_name": "postgres", "_password": "******"}
        """
        user = self.build_root_user(root_password)
        self.build_admin().alter_user(context, user, None,
                                      *PgSqlAdmin.ADMIN_OPTIONS)
        return user.serialize()

    def build_root_user(self, password=None):
        return models.PostgreSQLUser.root(password=password)

    def pg_start_backup(self, backup_label):
        r = self.build_admin().query("SELECT pg_start_backup('%s', true)" %
                                     backup_label)
        return r[0][0]

    def pg_xlogfile_name(self, start_segment):
        version = int(self.pg_version[1])
        if version < 10:
            query = "SELECT pg_xlogfile_name('%s')"
        else:
            query = "SELECT pg_walfile_name('%s')"
        r = self.build_admin().query(query % start_segment)
        return r[0][0]

    def pg_stop_backup(self):
        r = self.build_admin().query("SELECT pg_stop_backup()")
        return r[0][0]

    def disable_root(self, context):
        """Generate a new random password for the public superuser account.
        Do not disable its access rights. Once enabled the account should
        stay that way.
        """
        self.enable_root(context)

    def enable_root_with_password(self, context, root_password=None):
        return self.enable_root(context, root_password)

    @property
    def wal_archive_location(self):
        return cfg.get_configuration_property('wal_archive_location')

    @property
    def backup_strategy(self):
        return cfg.get_configuration_property('backup_strategy')

    def save_files_pre_upgrade(self, mount_point):
        LOG.debug('Saving files pre-upgrade.')
        mnt_etc_dir = os.path.join(mount_point, 'save_etc')
        if self.OS not in [operating_system.REDHAT]:
            # No need to store the config files away for Redhat because
            # they are already stored in the data volume.
            operating_system.remove(mnt_etc_dir, force=True, as_root=True)
            operating_system.copy(self.pgsql_config_dir,
                                  mnt_etc_dir,
                                  preserve=True,
                                  recursive=True,
                                  as_root=True)
        return {'save_etc': mnt_etc_dir}

    def restore_files_post_upgrade(self, upgrade_info):
        LOG.debug('Restoring files post-upgrade.')
        if self.OS not in [operating_system.REDHAT]:
            # No need to restore the config files for Redhat because
            # they are already in the data volume.
            operating_system.copy('%s/.' % upgrade_info['save_etc'],
                                  self.pgsql_config_dir,
                                  preserve=True,
                                  recursive=True,
                                  force=True,
                                  as_root=True)
            operating_system.remove(upgrade_info['save_etc'],
                                    force=True,
                                    as_root=True)
        self.configuration_manager.refresh_cache()
        self.status.set_ready()
Exemple #12
0
class VerticaApp(object):
    """Prepares DBaaS on a Guest container."""

    def __init__(self, status):
        self.state_change_wait_time = CONF.state_change_wait_time
        self.status = status
        revision_dir = \
            guestagent_utils.build_file_path(
                os.path.join(MOUNT_POINT,
                             os.path.dirname(system.VERTICA_ADMIN)),
                ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR)

        if not operating_system.exists(FAKE_CFG):
            operating_system.write_file(FAKE_CFG, '', as_root=True)
            operating_system.chown(FAKE_CFG, system.VERTICA_ADMIN,
                                   system.VERTICA_ADMIN_GRP, as_root=True)
            operating_system.chmod(FAKE_CFG, FileMode.ADD_GRP_RX_OTH_RX(),
                                   as_root=True)
        self.configuration_manager = \
            ConfigurationManager(FAKE_CFG, system.VERTICA_ADMIN,
                                 system.VERTICA_ADMIN_GRP,
                                 PropertiesCodec(delimiter='='),
                                 requires_root=True,
                                 override_strategy=ImportOverrideStrategy(
                                     revision_dir, "cnf"))

    def update_overrides(self, context, overrides, remove=False):
        if overrides:
            self.apply_overrides(overrides)

    def remove_overrides(self):
        config = self.configuration_manager.get_user_override()
        self._reset_config(config)
        self.configuration_manager.remove_user_override()

    def apply_overrides(self, overrides):
        self.configuration_manager.apply_user_override(overrides)
        self._apply_config(overrides)

    def _reset_config(self, config):
        try:
            db_password = self._get_database_password()
            for k, v in config.iteritems():
                alter_db_cmd = system.ALTER_DB_RESET_CFG % (DB_NAME, str(k))
                out, err = system.exec_vsql_command(db_password, alter_db_cmd)
                if err:
                    if err.is_warning():
                        LOG.warning(err)
                    else:
                        LOG.error(err)
                        raise RuntimeError(_("Failed to remove config %s") % k)

        except Exception:
            LOG.exception(_("Vertica configuration remove failed."))
            raise RuntimeError(_("Vertica configuration remove failed."))
        LOG.info(_("Vertica configuration reset completed."))

    def _apply_config(self, config):
        try:
            db_password = self._get_database_password()
            for k, v in config.iteritems():
                alter_db_cmd = system.ALTER_DB_CFG % (DB_NAME, str(k), str(v))
                out, err = system.exec_vsql_command(db_password, alter_db_cmd)
                if err:
                    if err.is_warning():
                        LOG.warning(err)
                    else:
                        LOG.error(err)
                        raise RuntimeError(_("Failed to apply config %s") % k)

        except Exception:
            LOG.exception(_("Vertica configuration apply failed"))
            raise RuntimeError(_("Vertica configuration apply failed"))
        LOG.info(_("Vertica config apply completed."))

    def _enable_db_on_boot(self):
        try:
            command = ["sudo", "su", "-", system.VERTICA_ADMIN, "-c",
                       (system.SET_RESTART_POLICY % (DB_NAME, "always"))]
            subprocess.Popen(command)
            command = ["sudo", "su", "-", "root", "-c",
                       (system.VERTICA_AGENT_SERVICE_COMMAND % "enable")]
            subprocess.Popen(command)
        except Exception:
            LOG.exception(_("Failed to enable db on boot."))
            raise RuntimeError("Could not enable db on boot.")

    def _disable_db_on_boot(self):
        try:
            command = (system.SET_RESTART_POLICY % (DB_NAME, "never"))
            system.shell_execute(command, system.VERTICA_ADMIN)
            command = (system.VERTICA_AGENT_SERVICE_COMMAND % "disable")
            system.shell_execute(command)
        except exception.ProcessExecutionError:
            LOG.exception(_("Failed to disable db on boot."))
            raise RuntimeError("Could not disable db on boot.")

    def stop_db(self, update_db=False, do_not_start_on_reboot=False):
        """Stop the database."""
        LOG.info(_("Stopping Vertica."))
        if do_not_start_on_reboot:
            self._disable_db_on_boot()

        try:
            # Stop vertica-agent service
            command = (system.VERTICA_AGENT_SERVICE_COMMAND % "stop")
            system.shell_execute(command)
            # Using Vertica adminTools to stop db.
            db_password = self._get_database_password()
            stop_db_command = (system.STOP_DB % (DB_NAME, db_password))
            out, err = system.shell_execute(system.STATUS_ACTIVE_DB,
                                            system.VERTICA_ADMIN)
            if out.strip() == DB_NAME:
                system.shell_execute(stop_db_command, system.VERTICA_ADMIN)
                if not self.status._is_restarting:
                    if not self.status.wait_for_real_status_to_change_to(
                            rd_instance.ServiceStatuses.SHUTDOWN,
                            self.state_change_wait_time, update_db):
                        LOG.error(_("Could not stop Vertica."))
                        self.status.end_restart()
                        raise RuntimeError("Could not stop Vertica!")
                LOG.debug("Database stopped.")
            else:
                LOG.debug("Database is not running.")
        except exception.ProcessExecutionError:
            LOG.exception(_("Failed to stop database."))
            raise RuntimeError("Could not stop database.")

    def start_db(self, update_db=False):
        """Start the database."""
        LOG.info(_("Starting Vertica."))
        try:
            self._enable_db_on_boot()
            # Start vertica-agent service
            command = ["sudo", "su", "-", "root", "-c",
                       (system.VERTICA_AGENT_SERVICE_COMMAND % "start")]
            subprocess.Popen(command)
            # Using Vertica adminTools to start db.
            db_password = self._get_database_password()
            start_db_command = ["sudo", "su", "-", system.VERTICA_ADMIN, "-c",
                                (system.START_DB % (DB_NAME, db_password))]
            subprocess.Popen(start_db_command)
            if not self.status._is_restarting:
                self.status.end_restart()
            LOG.debug("Database started.")
        except Exception as e:
            raise RuntimeError(_("Could not start Vertica due to %s") % e)

    def start_db_with_conf_changes(self, config_contents):
        """
         Currently all that this method does is to start Vertica. This method
         needs to be implemented to enable volume resize on guestagent side.
        """
        LOG.info(_("Starting Vertica with configuration changes."))
        if self.status.is_running:
            format = 'Cannot start_db_with_conf_changes because status is %s.'
            LOG.debug(format, self.status)
            raise RuntimeError(format % self.status)
        LOG.info(_("Initiating config."))
        self.configuration_manager.save_configuration(config_contents)
        self.start_db(True)

    def restart(self):
        """Restart the database."""
        try:
            self.status.begin_restart()
            self.stop_db()
            self.start_db()
        finally:
            self.status.end_restart()

    def add_db_to_node(self, members=netutils.get_my_ipv4()):
        """Add db to host with admintools"""
        LOG.info(_("Calling admintools to add DB to host"))
        try:
            # Create db after install
            db_password = self._get_database_password()
            create_db_command = (system.ADD_DB_TO_NODE % (members,
                                                          DB_NAME,
                                                          db_password))
            system.shell_execute(create_db_command, "dbadmin")
        except exception.ProcessExecutionError:
            # Give vertica some time to get the node up, won't be available
            # by the time adminTools -t db_add_node completes
            LOG.info(_("adminTools failed as expected - wait for node"))
        self.wait_for_node_status()
        LOG.info(_("Vertica add db to host completed."))

    def remove_db_from_node(self, members=netutils.get_my_ipv4()):
        """Remove db from node with admintools"""
        LOG.info(_("Removing db from node"))
        try:
            # Create db after install
            db_password = self._get_database_password()
            create_db_command = (system.REMOVE_DB_FROM_NODE % (members,
                                                               DB_NAME,
                                                               db_password))
            system.shell_execute(create_db_command, "dbadmin")
        except exception.ProcessExecutionError:
            # Give vertica some time to get the node up, won't be available
            # by the time adminTools -t db_add_node completes
            LOG.info(_("adminTools failed as expected - wait for node"))

        # Give vertica some time to take the node down - it won't be available
        # by the time adminTools -t db_add_node completes
        self.wait_for_node_status()
        LOG.info(_("Vertica remove host from db completed."))

    def create_db(self, members=netutils.get_my_ipv4()):
        """Prepare the guest machine with a Vertica db creation."""
        LOG.info(_("Creating database on Vertica host."))
        try:
            # Create db after install
            db_password = self._get_database_password()
            create_db_command = (system.CREATE_DB % (members, DB_NAME,
                                                     MOUNT_POINT, MOUNT_POINT,
                                                     db_password))
            system.shell_execute(create_db_command, system.VERTICA_ADMIN)
        except Exception:
            LOG.exception(_("Vertica database create failed."))
            raise RuntimeError(_("Vertica database create failed."))
        LOG.info(_("Vertica database create completed."))

    def install_vertica(self, members=netutils.get_my_ipv4()):
        """Prepare the guest machine with a Vertica db creation."""
        LOG.info(_("Installing Vertica Server."))
        try:
            # Create db after install
            install_vertica_cmd = (system.INSTALL_VERTICA % (members,
                                                             MOUNT_POINT))
            system.shell_execute(install_vertica_cmd)
        except exception.ProcessExecutionError:
            LOG.exception(_("install_vertica failed."))
            raise RuntimeError(_("install_vertica failed."))
        self._generate_database_password()
        LOG.info(_("install_vertica completed."))

    def update_vertica(self, command, members=netutils.get_my_ipv4()):
        LOG.info(_("Calling update_vertica with command %s") % command)
        try:
            update_vertica_cmd = (system.UPDATE_VERTICA % (command, members,
                                                           MOUNT_POINT))
            system.shell_execute(update_vertica_cmd)
        except exception.ProcessExecutionError:
            LOG.exception(_("update_vertica failed."))
            raise RuntimeError(_("update_vertica failed."))
        # self._generate_database_password()
        LOG.info(_("update_vertica completed."))

    def add_udls(self):
        """Load the user defined load libraries into the database."""
        LOG.info(_("Adding configured user defined load libraries."))
        password = self._get_database_password()
        loaded_udls = []
        for lib in system.UDL_LIBS:
            func_name = lib['func_name']
            lib_name = lib['lib_name']
            language = lib['language']
            factory = lib['factory']
            path = lib['path']
            if os.path.isfile(path):
                LOG.debug("Adding the %s library as %s." %
                          (func_name, lib_name))
                out, err = system.exec_vsql_command(
                    password,
                    system.CREATE_LIBRARY % (lib_name, path)
                )
                if err:
                    if err.is_warning():
                        LOG.warning(err)
                    else:
                        LOG.error(err)
                        raise RuntimeError(_("Failed to create library %s.")
                                           % lib_name)
                out, err = system.exec_vsql_command(
                    password,
                    system.CREATE_SOURCE % (func_name, language,
                                            factory, lib_name)
                )
                if err:
                    if err.is_warning():
                        LOG.warning(err)
                    else:
                        LOG.error(err)
                        raise RuntimeError(_("Failed to create source %s.")
                                           % func_name)
                loaded_udls.append(func_name)
            else:
                LOG.warning("Skipping %s as path %s not found." %
                            (func_name, path))
        LOG.info(_("The following UDL functions are available for use: %s")
                 % loaded_udls)

    def _generate_database_password(self):
        """Generate and write the password to vertica.cnf file."""
        config = configparser.ConfigParser()
        config.add_section('credentials')
        config.set('credentials', 'dbadmin_password',
                   utils.generate_random_password())
        self.write_config(config)

    def write_config(self, config,
                     unlink_function=os.unlink,
                     temp_function=tempfile.NamedTemporaryFile):
        """Write the configuration contents to vertica.cnf file."""
        LOG.debug('Defining config holder at %s.' % system.VERTICA_CONF)
        tempfile = temp_function(delete=False)
        try:
            config.write(tempfile)
            tempfile.close()
            command = (("install -o root -g root -m 644 %(source)s %(target)s"
                        ) % {'source': tempfile.name,
                             'target': system.VERTICA_CONF})
            system.shell_execute(command)
            unlink_function(tempfile.name)
        except Exception:
            unlink_function(tempfile.name)
            raise

    def read_config(self):
        """Reads and returns the Vertica config."""
        try:
            config = configparser.ConfigParser()
            config.read(system.VERTICA_CONF)
            return config
        except Exception:
            LOG.exception(_("Failed to read config %s.") % system.VERTICA_CONF)
            raise RuntimeError

    def _get_database_password(self):
        """Read the password from vertica.cnf file and return it."""
        return self.read_config().get('credentials', 'dbadmin_password')

    def install_if_needed(self, packages):
        """Install Vertica package if needed."""
        LOG.info(_("Preparing Guest as Vertica Server."))
        if not packager.pkg_is_installed(packages):
            LOG.debug("Installing Vertica Package.")
            packager.pkg_install(packages, None, system.INSTALL_TIMEOUT)

    def _set_readahead_for_disks(self):
        """This method sets readhead size for disks as needed by Vertica."""
        device = volume.VolumeDevice(CONF.device_path)
        device.set_readahead_size(CONF.vertica.readahead_size)
        LOG.debug("Set readhead size as required by Vertica.")

    def prepare_for_install_vertica(self):
        """This method executes preparatory methods before
        executing install_vertica.
        """
        command = ("VERT_DBA_USR=%s VERT_DBA_HOME=/home/dbadmin "
                   "VERT_DBA_GRP=%s /opt/vertica/oss/python/bin/python"
                   " -m vertica.local_coerce" %
                   (system.VERTICA_ADMIN, system.VERTICA_ADMIN_GRP))
        try:
            self._set_readahead_for_disks()
            system.shell_execute(command)
        except exception.ProcessExecutionError:
            LOG.exception(_("Failed to prepare for install_vertica."))
            raise

    def mark_design_ksafe(self, k):
        """Wrapper for mark_design_ksafe function for setting k-safety """
        LOG.info(_("Setting Vertica k-safety to %s") % str(k))
        out, err = system.exec_vsql_command(self._get_database_password(),
                                            system.MARK_DESIGN_KSAFE % k)
        # Only fail if we get an ERROR as opposed to a warning complaining
        # about setting k = 0
        if "ERROR" in err:
            LOG.error(err)
            raise RuntimeError(_("Failed to set k-safety level %s.") % k)

    def _create_user(self, username, password, role=None):
        """Creates a user, granting and enabling the given role for it."""
        LOG.info(_("Creating user in Vertica database."))
        out, err = system.exec_vsql_command(self._get_database_password(),
                                            system.CREATE_USER %
                                            (username, password))
        if err:
            if err.is_warning():
                LOG.warning(err)
            else:
                LOG.error(err)
                raise RuntimeError(_("Failed to create user %s.") % username)
        if role:
            self._grant_role(username, role)

    def _grant_role(self, username, role):
        """Grants a role to the user on the schema."""
        out, err = system.exec_vsql_command(self._get_database_password(),
                                            system.GRANT_TO_USER
                                            % (role, username))
        if err:
            if err.is_warning():
                LOG.warning(err)
            else:
                LOG.error(err)
                raise RuntimeError(_("Failed to grant role %(r)s to user "
                                     "%(u)s.")
                                   % {'r': role, 'u': username})
        out, err = system.exec_vsql_command(self._get_database_password(),
                                            system.ENABLE_FOR_USER
                                            % (username, role))
        if err:
            LOG.warning(err)

    def enable_root(self, root_password=None):
        """Resets the root password."""
        LOG.info(_LI("Enabling root."))
        user = models.RootUser()
        user.name = "root"
        user.host = "%"
        user.password = root_password or utils.generate_random_password()
        if not self.is_root_enabled():
            self._create_user(user.name, user.password, 'pseudosuperuser')
        else:
            LOG.debug("Updating %s password." % user.name)
            try:
                out, err = system.exec_vsql_command(
                    self._get_database_password(),
                    system.ALTER_USER_PASSWORD % (user.name, user.password))
                if err:
                    if err.is_warning():
                        LOG.warning(err)
                    else:
                        LOG.error(err)
                        raise RuntimeError(_("Failed to update %s "
                                             "password.") % user.name)
            except exception.ProcessExecutionError:
                LOG.error(_("Failed to update %s password.") % user.name)
                raise RuntimeError(_("Failed to update %s password.")
                                   % user.name)
        return user.serialize()

    def is_root_enabled(self):
        """Return True if root access is enabled else False."""
        LOG.debug("Checking is root enabled.")
        try:
            out, err = system.shell_execute(system.USER_EXISTS %
                                            (self._get_database_password(),
                                             'root'), system.VERTICA_ADMIN)
            if err:
                LOG.error(err)
                raise RuntimeError(_("Failed to query for root user."))
        except exception.ProcessExecutionError:
            raise RuntimeError(_("Failed to query for root user."))
        return out.rstrip() == "1"

    def get_public_keys(self, user):
        """Generates key (if not found), and sends public key for user."""
        LOG.debug("Public keys requested for user: %s." % user)
        user_home_directory = os.path.expanduser('~' + user)
        public_key_file_name = user_home_directory + '/.ssh/id_rsa.pub'

        try:
            key_generate_command = (system.SSH_KEY_GEN % user_home_directory)
            system.shell_execute(key_generate_command, user)
        except exception.ProcessExecutionError:
            LOG.debug("Cannot generate key.")

        try:
            read_key_cmd = ("cat %(file)s" % {'file': public_key_file_name})
            out, err = system.shell_execute(read_key_cmd)
        except exception.ProcessExecutionError:
            LOG.exception(_("Cannot read public key."))
            raise
        return out.strip()

    def authorize_public_keys(self, user, public_keys):
        """Adds public key to authorized_keys for user."""
        LOG.debug("public keys to be added for user: %s." % (user))
        user_home_directory = os.path.expanduser('~' + user)
        authorized_file_name = user_home_directory + '/.ssh/authorized_keys'

        try:
            read_key_cmd = ("cat %(file)s" % {'file': authorized_file_name})
            out, err = system.shell_execute(read_key_cmd)
            public_keys.append(out.strip())
        except exception.ProcessExecutionError:
            LOG.debug("Cannot read authorized_keys.")
        all_keys = '\n'.join(public_keys) + "\n"

        try:
            with tempfile.NamedTemporaryFile(delete=False) as tempkeyfile:
                tempkeyfile.write(all_keys)
            copy_key_cmd = (("install -o %(user)s -m 600 %(source)s %(target)s"
                             ) % {'user': user, 'source': tempkeyfile.name,
                                  'target': authorized_file_name})
            system.shell_execute(copy_key_cmd)
            os.remove(tempkeyfile.name)
        except exception.ProcessExecutionError:
            LOG.exception(_("Cannot install public keys."))
            os.remove(tempkeyfile.name)
            raise

    def _export_conf_to_members(self, members):
        """This method exports conf files to other members."""
        try:
            for member in members:
                COPY_CMD = (system.SEND_CONF_TO_SERVER % (system.VERTICA_CONF,
                                                          member,
                                                          system.VERTICA_CONF))
                system.shell_execute(COPY_CMD)
        except exception.ProcessExecutionError:
            LOG.exception(_("Cannot export configuration."))
            raise

    def install_cluster(self, members):
        """Installs & configures cluster."""
        cluster_members = ','.join(members)
        LOG.debug("Installing cluster with members: %s." % cluster_members)
        self.install_vertica(cluster_members)
        self._export_conf_to_members(members)
        LOG.debug("Creating database with members: %s." % cluster_members)
        self.create_db(cluster_members)
        LOG.debug("Cluster configured on members: %s." % cluster_members)

    def grow_cluster(self, members):
        """Adds nodes to cluster."""
        cluster_members = ','.join(members)
        LOG.debug("Growing cluster with members: %s." % cluster_members)
        self.update_vertica("--add-hosts", cluster_members)
        self._export_conf_to_members(members)
        LOG.debug("Creating database with members: %s." % cluster_members)
        self.add_db_to_node(cluster_members)
        LOG.debug("Cluster configured on members: %s." % cluster_members)

    def shrink_cluster(self, members):
        """Removes nodes from cluster."""
        cluster_members = ','.join(members)
        LOG.debug("Shrinking cluster with members: %s." % cluster_members)
        self.remove_db_from_node(cluster_members)
        self.update_vertica("--remove-hosts", cluster_members)

    def wait_for_node_status(self, status='UP'):
        """Wait until all nodes are the same status"""
        # select node_state from nodes where node_state <> 'UP'
        def _wait_for_node_status():
            out, err = system.exec_vsql_command(self._get_database_password(),
                                                system.NODE_STATUS % status)
            LOG.debug("Polled vertica node states: %s" % out)

            if err:
                LOG.error(err)
                raise RuntimeError(_("Failed to query for root user."))

            return "0 rows" in out

        try:
            utils.poll_until(_wait_for_node_status, time_out=600,
                             sleep_time=15)
        except exception.PollTimeOut:
            raise RuntimeError(_("Timed out waiting for cluster to"
                                 "change to status %s") % status)
Exemple #13
0
class RabbitmqApp(object):
    """
    Handles installation and configuration of the rabbitmq
    in trove.
    """
    def __init__(self, state_change_wait_time=None):
        """
        Set default status and state_change_wait_time.
        """
        if state_change_wait_time:
            self.state_change_wait_time = state_change_wait_time
        else:
            self.state_change_wait_time = CONF.state_change_wait_time

        revision_dir = guestagent_utils.build_file_path(
            os.path.dirname(system.RABBITMQ_CONFIG),
            ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR)
        config_value_mappings = {'yes': True, 'no': False, "''": None}
        self._value_converter = StringConverter(config_value_mappings)
        self.configuration_manager = ConfigurationManager(
            system.RABBITMQ_CONFIG,
            system.RABBITMQ_OWNER,
            system.RABBITMQ_OWNER,
            PropertiesCodec(unpack_singletons=False,
                            string_mappings=config_value_mappings),
            requires_root=True,
            override_strategy=OneFileOverrideStrategy(revision_dir))

        self.admin = self._build_admin_client()
        self.status = RabbitmqStatus(self.admin)

    def _build_admin_client(self):
        api_url = self.get_configuration_property('api_url')
        username = self.get_configuration_property('username')
        password = self.get_configuration_property('requirepass')

        return RabbitmqAdmin(api_url=api_url,
                             username=username,
                             password=password)

    def install_if_needed(self, packages):
        """
        Install rabbitmq if needed to  nothing if it is already installed.
        """
        pass

    def start_db_with_conf_changes(self, config_contents):
        LOG.info(_('Starting rabbitmq with conf changes.'))
        if self.status.is_running:
            msg = 'Cannot start_db_with_conf_changes because status is %s.'
            LOG.debug(msg, self.status)
            raise RuntimeError(msg % self.status)
        LOG.info(_("Initiating config."))
        self.configuration_manager.save_configuration(config_contents)
        # The configuration template has to be updated with
        # guestagent-controlled settings.
        self.apply_initial_guestagent_configuration()
        self.start_db(True)

    def start_db(self, enable_on_boot=True, update_db=False):
        self.status.start_db_service(system.SERVICE_CANDIDATES,
                                     CONF.state_change_wait_time,
                                     enable_on_boot=enable_on_boot,
                                     update_db=update_db)

    def apply_initial_guestagent_configuration(self):
        """Update guestagent-controlled configuration properties.
        """

        # Hide the 'CONFIG' command from end users by mangling its name.
        self.admin.set_config_command_name(self._mangle_config_command_name())

        self.configuration_manager.apply_system_override({
            'daemonize':
            'yes',
            'protected-mode':
            'no',
            'supervised':
            'systemd',
            'pidfile':
            system.RABBITMQ_PID_FILE,
            'logfile':
            system.RABBITMQ_LOG_FILE,
            'dir':
            system.RABBITMQ_DATA_DIR
        })

    def stop_db(self, update_db=False, do_not_start_on_reboot=False):
        self.status.stop_db_service(system.SERVICE_CANDIDATES,
                                    self.state_change_wait_time,
                                    disable_on_boot=do_not_start_on_reboot,
                                    update_db=update_db)

    def restart(self):
        self.status.restart_db_service(system.SERVICE_CANDIDATES,
                                       self.state_change_wait_time)

    def get_config_command_name(self):
        """Get current name of the 'CONFIG' command.
        """
        renamed_cmds = self.configuration_manager.get_value('rename-command')
        for name_pair in renamed_cmds:
            if name_pair[0] == 'CONFIG':
                return name_pair[1]

        return None

    def _mangle_config_command_name(self):
        """Hide the 'CONFIG' command from the clients by renaming it to a
        random string known only to the guestagent.
        Return the mangled name.
        """
        mangled = utils.generate_random_password()
        self._rename_command('CONFIG', mangled)
        return mangled

    def _rename_command(self, old_name, new_name):
        """It is possible to completely disable a command by renaming it
        to an empty string.
        """
        self.configuration_manager.apply_system_override(
            {'rename-command': [old_name, new_name]})

    def update_overrides(self, overrides):
        if overrides:
            self.configuration_manager.apply_user_override(overrides)

    def get_configuration_property(self, name, default=None):
        """Return the value of a Rabbitmq configuration property.
        Returns a single value for single-argument properties or
        a list otherwise.
        """
        return utils.unpack_singleton(
            self.configuration_manager.get_value(name, default))

    def is_cluster_enabled(self):
        pass

    def enable_cluster(self):
        pass

    def get_cluster_config_filename(self):
        pass

    def cluster_addslots(self):
        pass

    def get_node_ip(self):
        pass

    def get_node_id_for_removal(self):
        pass

    def remove_node(self, node_ids):
        pass
Exemple #14
0
class RedisApp(object):
    """
    Handles installation and configuration of redis
    on a trove instance.
    """

    @classmethod
    def _init_overrides_dir(cls):
        """Initialize a directory for configuration overrides.
        """
        revision_dir = guestagent_utils.build_file_path(
            os.path.dirname(system.REDIS_CONFIG),
            ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR)

        if not os.path.exists(revision_dir):
            operating_system.create_directory(
                revision_dir,
                user=system.REDIS_OWNER, group=system.REDIS_OWNER,
                force=True, as_root=True)

        return revision_dir

    def __init__(self, state_change_wait_time=None):
        """
        Sets default status and state_change_wait_time
        """
        if state_change_wait_time:
            self.state_change_wait_time = state_change_wait_time
        else:
            self.state_change_wait_time = CONF.state_change_wait_time

        revision_dir = self._init_overrides_dir()
        config_value_mappings = {'yes': True, 'no': False, "''": None}
        self._value_converter = StringConverter(config_value_mappings)
        self.configuration_manager = ConfigurationManager(
            system.REDIS_CONFIG,
            system.REDIS_OWNER, system.REDIS_OWNER,
            PropertiesCodec(
                unpack_singletons=False,
                string_mappings=config_value_mappings
            ), requires_root=True,
            override_strategy=OneFileOverrideStrategy(revision_dir))

        self.admin = self._build_admin_client()
        self.status = RedisAppStatus(self.admin)

    def _build_admin_client(self):
        password = self.get_configuration_property('requirepass')
        socket = self.get_configuration_property('unixsocket')

        return RedisAdmin(password=password, unix_socket_path=socket)

    def install_if_needed(self, packages):
        """
        Install redis if needed do nothing if it is already installed.
        """
        LOG.info(_('Preparing Guest as Redis Server.'))
        if not packager.pkg_is_installed(packages):
            LOG.info(_('Installing Redis.'))
            self._install_redis(packages)
        LOG.info(_('Redis installed completely.'))

    def complete_install_or_restart(self):
        """
        finalize status updates for install or restart.
        """
        LOG.debug("Complete install or restart called.")
        self.status.end_install_or_restart()

    def _install_redis(self, packages):
        """
        Install the redis server.
        """
        LOG.debug('Installing redis server.')
        msg = "Creating %s." % system.REDIS_CONF_DIR
        LOG.debug(msg)
        operating_system.create_directory(system.REDIS_CONF_DIR, as_root=True)
        pkg_opts = {}
        packager.pkg_install(packages, pkg_opts, TIME_OUT)
        self.start_redis()
        LOG.debug('Finished installing redis server.')

    def _enable_redis_on_boot(self):
        """
        Enables redis on boot.
        """
        LOG.info(_('Enabling Redis on boot.'))
        operating_system.enable_service_on_boot(system.SERVICE_CANDIDATES)

    def _disable_redis_on_boot(self):
        """
        Disables redis on boot.
        """
        LOG.info(_("Disabling Redis on boot."))
        operating_system.disable_service_on_boot(system.SERVICE_CANDIDATES)

    def stop_db(self, update_db=False, do_not_start_on_reboot=False):
        """
        Stops the redis application on the trove instance.
        """
        LOG.info(_('Stopping redis.'))
        if do_not_start_on_reboot:
            self._disable_redis_on_boot()

        operating_system.stop_service(system.SERVICE_CANDIDATES)
        if not self.status.wait_for_real_status_to_change_to(
                rd_instance.ServiceStatuses.SHUTDOWN,
                self.state_change_wait_time, update_db):
            LOG.error(_('Could not stop Redis.'))
            self.status.end_install_or_restart()

    def restart(self):
        """
        Restarts the redis daemon.
        """
        LOG.debug("Restarting Redis daemon.")
        try:
            self.status.begin_restart()
            self.stop_db()
            self.start_redis()
        finally:
            self.status.end_install_or_restart()

    def update_overrides(self, context, overrides, remove=False):
        if overrides:
            self.configuration_manager.apply_user_override(overrides)

    def apply_overrides(self, client, overrides):
        """Use the 'CONFIG SET' command to apply configuration at runtime.

        Commands that appear multiple times have values separated by a
        white space. For instance, the following two 'save' directives from the
        configuration file...

            save 900 1
            save 300 10

        ... would be applied in a single command as:

            CONFIG SET save "900 1 300 10"

        Note that the 'CONFIG' command has been renamed to prevent
        users from using it to bypass configuration groups.
        """
        for prop_name, prop_args in overrides.items():
            args_string = self._join_lists(
                self._value_converter.to_strings(prop_args), ' ')
            client.config_set(prop_name, args_string)

    def _join_lists(self, items, sep):
        """Join list items (including items from sub-lists) into a string.
        Non-list inputs are returned unchanged.

        _join_lists('1234', ' ') = "1234"
        _join_lists(['1','2','3','4'], ' ') = "1 2 3 4"
        _join_lists([['1','2'], ['3','4']], ' ') = "1 2 3 4"
        """
        if isinstance(items, list):
            return sep.join([sep.join(e) if isinstance(e, list) else e
                             for e in items])
        return items

    def remove_overrides(self):
        self.configuration_manager.remove_user_override()

    def start_db_with_conf_changes(self, config_contents):
        LOG.info(_('Starting redis with conf changes.'))
        if self.status.is_running:
            format = 'Cannot start_db_with_conf_changes because status is %s.'
            LOG.debug(format, self.status)
            raise RuntimeError(format % self.status)
        LOG.info(_("Initiating config."))
        self.configuration_manager.save_configuration(config_contents)
        # The configuration template has to be updated with
        # guestagent-controlled settings.
        self.apply_initial_guestagent_configuration()
        self.start_redis(True)

    def reset_configuration(self, configuration):
        LOG.info(_("Resetting configuration."))
        config_contents = configuration['config_contents']
        self.configuration_manager.save_configuration(config_contents)

    def start_redis(self, update_db=False):
        """
        Start the redis daemon.
        """
        LOG.info(_("Starting redis."))
        self._enable_redis_on_boot()
        operating_system.start_service(system.SERVICE_CANDIDATES)
        if not self.status.wait_for_real_status_to_change_to(
                rd_instance.ServiceStatuses.RUNNING,
                self.state_change_wait_time, update_db):
            LOG.error(_("Start up of redis failed."))
            try:
                utils.execute_with_timeout('pkill', '-9',
                                           'redis-server',
                                           run_as_root=True,
                                           root_helper='sudo')
            except exception.ProcessExecutionError:
                LOG.exception(_('Error killing stalled redis start command.'))
            self.status.end_install_or_restart()

    def apply_initial_guestagent_configuration(self):
        """Update guestagent-controlled configuration properties.
        """

        # Hide the 'CONFIG' command from end users by mangling its name.
        self.admin.set_config_command_name(self._mangle_config_command_name())

        self.configuration_manager.apply_system_override(
            {'daemonize': 'yes',
             'pidfile': system.REDIS_PID_FILE,
             'logfile': system.REDIS_LOG_FILE,
             'dir': system.REDIS_DATA_DIR})

    def get_config_command_name(self):
        """Get current name of the 'CONFIG' command.
        """
        renamed_cmds = self.configuration_manager.get_value('rename-command')
        for name_pair in renamed_cmds:
            if name_pair[0] == 'CONFIG':
                return name_pair[1]

        return None

    def _mangle_config_command_name(self):
        """Hide the 'CONFIG' command from the clients by renaming it to a
        random string known only to the guestagent.
        Return the mangled name.
        """
        mangled = utils.generate_random_password()
        self._rename_command('CONFIG', mangled)
        return mangled

    def _rename_command(self, old_name, new_name):
        """It is possible to completely disable a command by renaming it
        to an empty string.
        """
        self.configuration_manager.apply_system_override(
            {'rename-command': [old_name, new_name]})

    def get_logfile(self):
        """Specify the log file name. Also the empty string can be used to
        force Redis to log on the standard output.
        Note that if you use standard output for logging but daemonize,
        logs will be sent to /dev/null
        """
        return self.get_configuration_property('logfile')

    def get_db_filename(self):
        """The filename where to dump the DB.
        """
        return self.get_configuration_property('dbfilename')

    def get_working_dir(self):
        """The DB will be written inside this directory,
        with the filename specified the 'dbfilename' configuration directive.
        The Append Only File will also be created inside this directory.
        """
        return self.get_configuration_property('dir')

    def get_auth_password(self):
        """Client authentication password for this instance or None if not set.
        """
        return self.get_configuration_property('requirepass')

    def is_appendonly_enabled(self):
        """True if the Append Only File (AOF) persistence mode is enabled.
        """
        return self.get_configuration_property('appendonly', False)

    def get_append_file_name(self):
        """The name of the append only file (AOF).
        """
        return self.get_configuration_property('appendfilename')

    def is_cluster_enabled(self):
        """Only nodes that are started as cluster nodes can be part of a
        Redis Cluster.
        """
        return self.get_configuration_property('cluster-enabled', False)

    def enable_cluster(self):
        """In order to start a Redis instance as a cluster node enable the
        cluster support
        """
        self.configuration_manager.apply_system_override(
            {'cluster-enabled': 'yes'}, CLUSTER_CFG)

    def get_cluster_config_filename(self):
        """Cluster node configuration file.
        """
        return self.get_configuration_property('cluster-config-file')

    def set_cluster_config_filename(self, name):
        """Make sure that instances running in the same system do not have
        overlapping cluster configuration file names.
        """
        self.configuration_manager.apply_system_override(
            {'cluster-config-file': name}, CLUSTER_CFG)

    def get_cluster_node_timeout(self):
        """Cluster node timeout is the amount of milliseconds a node must be
        unreachable for it to be considered in failure state.
        """
        return self.get_configuration_property('cluster-node-timeout')

    def get_configuration_property(self, name, default=None):
        """Return the value of a Redis configuration property.
        Returns a single value for single-argument properties or
        a list otherwise.
        """
        return utils.unpack_singleton(
            self.configuration_manager.get_value(name, default))
Exemple #15
0
class RedisApp(object):
    """
    Handles installation and configuration of redis
    on a trove instance.
    """

    def __init__(self, state_change_wait_time=None):
        """
        Sets default status and state_change_wait_time
        """
        if state_change_wait_time:
            self.state_change_wait_time = state_change_wait_time
        else:
            self.state_change_wait_time = CONF.state_change_wait_time

        revision_dir = guestagent_utils.build_file_path(
            os.path.dirname(system.REDIS_CONFIG),
            ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR)
        config_value_mappings = {'yes': True, 'no': False, "''": None}
        self._value_converter = StringConverter(config_value_mappings)
        self.configuration_manager = ConfigurationManager(
            system.REDIS_CONFIG,
            system.REDIS_OWNER, system.REDIS_OWNER,
            PropertiesCodec(
                unpack_singletons=False,
                string_mappings=config_value_mappings
            ), requires_root=True,
            override_strategy=OneFileOverrideStrategy(revision_dir))

        self.admin = self._build_admin_client()
        self.status = RedisAppStatus(self.admin)

    def _build_admin_client(self):
        password = self.get_configuration_property('requirepass')
        socket = self.get_configuration_property('unixsocket')

        return RedisAdmin(password=password, unix_socket_path=socket)

    def install_if_needed(self, packages):
        """
        Install redis if needed do nothing if it is already installed.
        """
        LOG.info(_('Preparing Guest as Redis Server.'))
        if not packager.pkg_is_installed(packages):
            LOG.info(_('Installing Redis.'))
            self._install_redis(packages)
        LOG.info(_('Redis installed completely.'))

    def _install_redis(self, packages):
        """
        Install the redis server.
        """
        LOG.debug('Installing redis server.')
        msg = "Creating %s." % system.REDIS_CONF_DIR
        LOG.debug(msg)
        operating_system.create_directory(system.REDIS_CONF_DIR, as_root=True)
        pkg_opts = {}
        packager.pkg_install(packages, pkg_opts, TIME_OUT)
        self.start_db()
        LOG.debug('Finished installing redis server.')

    def stop_db(self, update_db=False, do_not_start_on_reboot=False):
        self.status.stop_db_service(
            system.SERVICE_CANDIDATES, self.state_change_wait_time,
            disable_on_boot=do_not_start_on_reboot, update_db=update_db)

    def restart(self):
        self.status.restart_db_service(
            system.SERVICE_CANDIDATES, self.state_change_wait_time)

    def update_overrides(self, context, overrides, remove=False):
        if overrides:
            self.configuration_manager.apply_user_override(overrides)

    def apply_overrides(self, client, overrides):
        """Use the 'CONFIG SET' command to apply configuration at runtime.

        Commands that appear multiple times have values separated by a
        white space. For instance, the following two 'save' directives from the
        configuration file...

            save 900 1
            save 300 10

        ... would be applied in a single command as:

            CONFIG SET save "900 1 300 10"

        Note that the 'CONFIG' command has been renamed to prevent
        users from using it to bypass configuration groups.
        """
        for prop_name, prop_args in overrides.items():
            args_string = self._join_lists(
                self._value_converter.to_strings(prop_args), ' ')
            client.config_set(prop_name, args_string)

    def _join_lists(self, items, sep):
        """Join list items (including items from sub-lists) into a string.
        Non-list inputs are returned unchanged.

        _join_lists('1234', ' ') = "1234"
        _join_lists(['1','2','3','4'], ' ') = "1 2 3 4"
        _join_lists([['1','2'], ['3','4']], ' ') = "1 2 3 4"
        """
        if isinstance(items, list):
            return sep.join([sep.join(e) if isinstance(e, list) else e
                             for e in items])
        return items

    def remove_overrides(self):
        self.configuration_manager.remove_user_override()

    def make_read_only(self, read_only):
        # Redis has no mechanism to make an instance read-only at present
        pass

    def start_db_with_conf_changes(self, config_contents):
        LOG.info(_('Starting redis with conf changes.'))
        if self.status.is_running:
            format = 'Cannot start_db_with_conf_changes because status is %s.'
            LOG.debug(format, self.status)
            raise RuntimeError(format % self.status)
        LOG.info(_("Initiating config."))
        self.configuration_manager.save_configuration(config_contents)
        # The configuration template has to be updated with
        # guestagent-controlled settings.
        self.apply_initial_guestagent_configuration()
        self.start_db(True)

    def start_db(self, update_db=False):
        self.status.start_db_service(
            system.SERVICE_CANDIDATES, self.state_change_wait_time,
            enable_on_boot=True, update_db=update_db)

    def apply_initial_guestagent_configuration(self):
        """Update guestagent-controlled configuration properties.
        """

        # Hide the 'CONFIG' command from end users by mangling its name.
        self.admin.set_config_command_name(self._mangle_config_command_name())

        self.configuration_manager.apply_system_override(
            {'daemonize': 'yes',
             'pidfile': system.REDIS_PID_FILE,
             'logfile': system.REDIS_LOG_FILE,
             'dir': system.REDIS_DATA_DIR})

    def get_config_command_name(self):
        """Get current name of the 'CONFIG' command.
        """
        renamed_cmds = self.configuration_manager.get_value('rename-command')
        for name_pair in renamed_cmds:
            if name_pair[0] == 'CONFIG':
                return name_pair[1]

        return None

    def _mangle_config_command_name(self):
        """Hide the 'CONFIG' command from the clients by renaming it to a
        random string known only to the guestagent.
        Return the mangled name.
        """
        mangled = utils.generate_random_password()
        self._rename_command('CONFIG', mangled)
        return mangled

    def _rename_command(self, old_name, new_name):
        """It is possible to completely disable a command by renaming it
        to an empty string.
        """
        self.configuration_manager.apply_system_override(
            {'rename-command': [old_name, new_name]})

    def get_logfile(self):
        """Specify the log file name. Also the empty string can be used to
        force Redis to log on the standard output.
        Note that if you use standard output for logging but daemonize,
        logs will be sent to /dev/null
        """
        return self.get_configuration_property('logfile')

    def get_db_filename(self):
        """The filename where to dump the DB.
        """
        return self.get_configuration_property('dbfilename')

    def get_working_dir(self):
        """The DB will be written inside this directory,
        with the filename specified the 'dbfilename' configuration directive.
        The Append Only File will also be created inside this directory.
        """
        return self.get_configuration_property('dir')

    def get_persistence_filepath(self):
        """Returns the full path to the persistence file."""
        return guestagent_utils.build_file_path(
            self.get_working_dir(), self.get_db_filename())

    def get_port(self):
        """Port for this instance or default if not set."""
        return self.get_configuration_property('port', system.REDIS_PORT)

    def get_auth_password(self):
        """Client authentication password for this instance or None if not set.
        """
        return self.get_configuration_property('requirepass')

    def is_appendonly_enabled(self):
        """True if the Append Only File (AOF) persistence mode is enabled.
        """
        return self.get_configuration_property('appendonly', False)

    def get_append_file_name(self):
        """The name of the append only file (AOF).
        """
        return self.get_configuration_property('appendfilename')

    def is_cluster_enabled(self):
        """Only nodes that are started as cluster nodes can be part of a
        Redis Cluster.
        """
        return self.get_configuration_property('cluster-enabled', False)

    def enable_cluster(self):
        """In order to start a Redis instance as a cluster node enable the
        cluster support
        """
        self.configuration_manager.apply_system_override(
            {'cluster-enabled': 'yes'}, CLUSTER_CFG)

    def get_cluster_config_filename(self):
        """Cluster node configuration file.
        """
        return self.get_configuration_property('cluster-config-file')

    def set_cluster_config_filename(self, name):
        """Make sure that instances running in the same system do not have
        overlapping cluster configuration file names.
        """
        self.configuration_manager.apply_system_override(
            {'cluster-config-file': name}, CLUSTER_CFG)

    def get_cluster_node_timeout(self):
        """Cluster node timeout is the amount of milliseconds a node must be
        unreachable for it to be considered in failure state.
        """
        return self.get_configuration_property('cluster-node-timeout')

    def get_configuration_property(self, name, default=None):
        """Return the value of a Redis configuration property.
        Returns a single value for single-argument properties or
        a list otherwise.
        """
        return utils.unpack_singleton(
            self.configuration_manager.get_value(name, default))

    def cluster_meet(self, ip, port):
        try:
            utils.execute_with_timeout('redis-cli', 'cluster', 'meet',
                                       ip, port)
        except exception.ProcessExecutionError:
            LOG.exception(_('Error joining node to cluster at %s.'), ip)
            raise

    def cluster_addslots(self, first_slot, last_slot):
        try:
            slots = map(str, range(first_slot, last_slot + 1))
            group_size = 200
            while slots:
                cmd = ([system.REDIS_CLI, 'cluster', 'addslots']
                       + slots[0:group_size])
                out, err = utils.execute_with_timeout(*cmd, run_as_root=True,
                                                      root_helper='sudo')
                if 'OK' not in out:
                    raise RuntimeError(_('Error executing addslots: %s')
                                       % out)
                del slots[0:group_size]
        except exception.ProcessExecutionError:
            LOG.exception(_('Error adding slots %(first_slot)s-%(last_slot)s'
                            ' to cluster.'),
                          {'first_slot': first_slot, 'last_slot': last_slot})
            raise

    def _get_node_info(self):
        try:
            out, _ = utils.execute_with_timeout('redis-cli', '--csv',
                                                'cluster', 'nodes')
            return [line.split(' ') for line in out.splitlines()]
        except exception.ProcessExecutionError:
            LOG.exception(_('Error getting node info.'))
            raise

    def _get_node_details(self):
        for node_details in self._get_node_info():
            if 'myself' in node_details[2]:
                return node_details
        raise exception.TroveError(_("Unable to determine node details"))

    def get_node_ip(self):
        """Returns [ip, port] where both values are strings"""
        return self._get_node_details()[1].split(':')

    def get_node_id_for_removal(self):
        node_details = self._get_node_details()
        node_id = node_details[0]
        my_ip = node_details[1].split(':')[0]
        try:
            slots, _ = utils.execute_with_timeout('redis-cli', '--csv',
                                                  'cluster', 'slots')
            return node_id if my_ip not in slots else None
        except exception.ProcessExecutionError:
            LOG.exception(_('Error validating node to for removal.'))
            raise

    def remove_nodes(self, node_ids):
        try:
            for node_id in node_ids:
                utils.execute_with_timeout('redis-cli', 'cluster',
                                           'forget', node_id)
        except exception.ProcessExecutionError:
            LOG.exception(_('Error removing node from cluster.'))
            raise
Exemple #16
0
class VerticaApp(object):
    """Prepares DBaaS on a Guest container."""
    def __init__(self, status):
        self.state_change_wait_time = CONF.state_change_wait_time
        self.status = status
        revision_dir = \
            guestagent_utils.build_file_path(
                os.path.join(MOUNT_POINT,
                             os.path.dirname(system.VERTICA_ADMIN)),
                ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR)

        if not operating_system.exists(FAKE_CFG):
            operating_system.write_file(FAKE_CFG, '', as_root=True)
            operating_system.chown(FAKE_CFG,
                                   system.VERTICA_ADMIN,
                                   system.VERTICA_ADMIN_GRP,
                                   as_root=True)
            operating_system.chmod(FAKE_CFG,
                                   FileMode.ADD_GRP_RX_OTH_RX(),
                                   as_root=True)
        self.configuration_manager = \
            ConfigurationManager(FAKE_CFG, system.VERTICA_ADMIN,
                                 system.VERTICA_ADMIN_GRP,
                                 PropertiesCodec(delimiter='='),
                                 requires_root=True,
                                 override_strategy=ImportOverrideStrategy(
                                     revision_dir, "cnf"))

    def update_overrides(self, context, overrides, remove=False):
        if overrides:
            self.apply_overrides(overrides)

    def remove_overrides(self):
        config = self.configuration_manager.get_user_override()
        self._reset_config(config)
        self.configuration_manager.remove_user_override()

    def apply_overrides(self, overrides):
        self.configuration_manager.apply_user_override(overrides)
        self._apply_config(overrides)

    def _reset_config(self, config):
        try:
            db_password = self._get_database_password()
            for k, v in config.items():
                alter_db_cmd = system.ALTER_DB_RESET_CFG % (DB_NAME, str(k))
                out, err = system.exec_vsql_command(db_password, alter_db_cmd)
                if err:
                    if err.is_warning():
                        LOG.warning(err)
                    else:
                        LOG.error(err)
                        raise RuntimeError(_("Failed to remove config %s") % k)

        except Exception:
            LOG.exception("Vertica configuration remove failed.")
            raise RuntimeError(_("Vertica configuration remove failed."))
        LOG.info("Vertica configuration reset completed.")

    def _apply_config(self, config):
        try:
            db_password = self._get_database_password()
            for k, v in config.items():
                alter_db_cmd = system.ALTER_DB_CFG % (DB_NAME, str(k), str(v))
                out, err = system.exec_vsql_command(db_password, alter_db_cmd)
                if err:
                    if err.is_warning():
                        LOG.warning(err)
                    else:
                        LOG.error(err)
                        raise RuntimeError(_("Failed to apply config %s") % k)

        except Exception:
            LOG.exception("Vertica configuration apply failed")
            raise RuntimeError(_("Vertica configuration apply failed"))
        LOG.info("Vertica config apply completed.")

    def _enable_db_on_boot(self):
        try:
            command = [
                "sudo", "su", "-", system.VERTICA_ADMIN, "-c",
                (system.SET_RESTART_POLICY % (DB_NAME, "always"))
            ]
            subprocess.Popen(command)
            command = [
                "sudo", "su", "-", "root", "-c",
                (system.VERTICA_AGENT_SERVICE_COMMAND % "enable")
            ]
            subprocess.Popen(command)
        except Exception:
            LOG.exception("Failed to enable database on boot.")
            raise RuntimeError(_("Could not enable database on boot."))

    def _disable_db_on_boot(self):
        try:
            command = (system.SET_RESTART_POLICY % (DB_NAME, "never"))
            system.shell_execute(command, system.VERTICA_ADMIN)
            command = (system.VERTICA_AGENT_SERVICE_COMMAND % "disable")
            system.shell_execute(command)
        except exception.ProcessExecutionError:
            LOG.exception("Failed to disable database on boot.")
            raise RuntimeError(_("Could not disable database on boot."))

    def stop_db(self, update_db=False, do_not_start_on_reboot=False):
        """Stop the database."""
        LOG.info("Stopping Vertica.")
        if do_not_start_on_reboot:
            self._disable_db_on_boot()

        try:
            # Stop vertica-agent service
            command = (system.VERTICA_AGENT_SERVICE_COMMAND % "stop")
            system.shell_execute(command)
            # Using Vertica adminTools to stop db.
            db_password = self._get_database_password()
            stop_db_command = (system.STOP_DB % (DB_NAME, db_password))
            out, err = system.shell_execute(system.STATUS_ACTIVE_DB,
                                            system.VERTICA_ADMIN)
            if out.strip() == DB_NAME:
                system.shell_execute(stop_db_command, system.VERTICA_ADMIN)
                if not self.status._is_restarting:
                    if not self.status.wait_for_real_status_to_change_to(
                            rd_instance.ServiceStatuses.SHUTDOWN,
                            self.state_change_wait_time, update_db):
                        LOG.error("Could not stop Vertica.")
                        self.status.end_restart()
                        raise RuntimeError(_("Could not stop Vertica!"))
                LOG.debug("Database stopped.")
            else:
                LOG.debug("Database is not running.")
        except exception.ProcessExecutionError:
            LOG.exception("Failed to stop database.")
            raise RuntimeError(_("Could not stop database."))

    def start_db(self, update_db=False):
        """Start the database."""
        LOG.info("Starting Vertica.")
        try:
            self._enable_db_on_boot()
            # Start vertica-agent service
            command = [
                "sudo", "su", "-", "root", "-c",
                (system.VERTICA_AGENT_SERVICE_COMMAND % "start")
            ]
            subprocess.Popen(command)
            # Using Vertica adminTools to start db.
            db_password = self._get_database_password()
            start_db_command = [
                "sudo", "su", "-", system.VERTICA_ADMIN, "-c",
                (system.START_DB % (DB_NAME, db_password))
            ]
            subprocess.Popen(start_db_command)
            if not self.status._is_restarting:
                self.status.end_restart()
            LOG.debug("Database started.")
        except Exception as e:
            raise RuntimeError(_("Could not start Vertica due to %s") % e)

    def start_db_with_conf_changes(self, config_contents):
        """
         Currently all that this method does is to start Vertica. This method
         needs to be implemented to enable volume resize on guestagent side.
        """
        LOG.info("Starting Vertica with configuration changes.")
        if self.status.is_running:
            format = 'Cannot start_db_with_conf_changes because status is %s.'
            LOG.debug(format, self.status)
            raise RuntimeError(format % self.status)
        LOG.info("Initiating config.")
        self.configuration_manager.save_configuration(config_contents)
        self.start_db(True)

    def restart(self):
        """Restart the database."""
        try:
            self.status.begin_restart()
            self.stop_db()
            self.start_db()
        finally:
            self.status.end_restart()

    def add_db_to_node(self, members=netutils.get_my_ipv4()):
        """Add db to host with admintools"""
        LOG.info("Calling admintools to add DB to host")
        try:
            # Create db after install
            db_password = self._get_database_password()
            create_db_command = (system.ADD_DB_TO_NODE %
                                 (members, DB_NAME, db_password))
            system.shell_execute(create_db_command, "dbadmin")
        except exception.ProcessExecutionError:
            # Give vertica some time to get the node up, won't be available
            # by the time adminTools -t db_add_node completes
            LOG.info("adminTools failed as expected - wait for node")
        self.wait_for_node_status()
        LOG.info("Vertica add db to host completed.")

    def remove_db_from_node(self, members=netutils.get_my_ipv4()):
        """Remove db from node with admintools"""
        LOG.info("Removing db from node")
        try:
            # Create db after install
            db_password = self._get_database_password()
            create_db_command = (system.REMOVE_DB_FROM_NODE %
                                 (members, DB_NAME, db_password))
            system.shell_execute(create_db_command, "dbadmin")
        except exception.ProcessExecutionError:
            # Give vertica some time to get the node up, won't be available
            # by the time adminTools -t db_add_node completes
            LOG.info("adminTools failed as expected - wait for node")

        # Give vertica some time to take the node down - it won't be available
        # by the time adminTools -t db_add_node completes
        self.wait_for_node_status()
        LOG.info("Vertica remove host from db completed.")

    def create_db(self, members=netutils.get_my_ipv4()):
        """Prepare the guest machine with a Vertica db creation."""
        LOG.info("Creating database on Vertica host.")
        try:
            # Create db after install
            db_password = self._get_database_password()
            create_db_command = (
                system.CREATE_DB %
                (members, DB_NAME, MOUNT_POINT, MOUNT_POINT, db_password))
            system.shell_execute(create_db_command, system.VERTICA_ADMIN)
        except Exception:
            LOG.exception("Vertica database create failed.")
            raise RuntimeError(_("Vertica database create failed."))
        LOG.info("Vertica database create completed.")

    def install_vertica(self, members=netutils.get_my_ipv4()):
        """Prepare the guest machine with a Vertica db creation."""
        LOG.info("Installing Vertica Server.")
        try:
            # Create db after install
            install_vertica_cmd = (system.INSTALL_VERTICA %
                                   (members, MOUNT_POINT))
            system.shell_execute(install_vertica_cmd)
        except exception.ProcessExecutionError:
            LOG.exception("install_vertica failed.")
            raise RuntimeError(_("install_vertica failed."))
        self._generate_database_password()
        LOG.info("install_vertica completed.")

    def update_vertica(self, command, members=netutils.get_my_ipv4()):
        LOG.info("Calling update_vertica with command %s", command)
        try:
            update_vertica_cmd = (system.UPDATE_VERTICA %
                                  (command, members, MOUNT_POINT))
            system.shell_execute(update_vertica_cmd)
        except exception.ProcessExecutionError:
            LOG.exception("update_vertica failed.")
            raise RuntimeError(_("update_vertica failed."))
        # self._generate_database_password()
        LOG.info("update_vertica completed.")

    def add_udls(self):
        """Load the user defined load libraries into the database."""
        LOG.info("Adding configured user defined load libraries.")
        password = self._get_database_password()
        loaded_udls = []
        for lib in system.UDL_LIBS:
            func_name = lib['func_name']
            lib_name = lib['lib_name']
            language = lib['language']
            factory = lib['factory']
            path = lib['path']
            if os.path.isfile(path):
                LOG.debug("Adding the %(func)s library as %(lib)s.", {
                    'func': func_name,
                    'lib': lib_name
                })
                out, err = system.exec_vsql_command(
                    password, system.CREATE_LIBRARY % (lib_name, path))
                if err:
                    if err.is_warning():
                        LOG.warning(err)
                    else:
                        LOG.error(err)
                        raise RuntimeError(
                            _("Failed to create library %s.") % lib_name)
                out, err = system.exec_vsql_command(
                    password, system.CREATE_SOURCE %
                    (func_name, language, factory, lib_name))
                if err:
                    if err.is_warning():
                        LOG.warning(err)
                    else:
                        LOG.error(err)
                        raise RuntimeError(
                            _("Failed to create source %s.") % func_name)
                loaded_udls.append(func_name)
            else:
                LOG.warning("Skipping %(func)s as path %(path)s not "
                            "found.", {
                                "func": func_name,
                                "path": path
                            })
        LOG.info("The following UDL functions are available for use: %s",
                 loaded_udls)

    def _generate_database_password(self):
        """Generate and write the password to vertica.cnf file."""
        config = configparser.ConfigParser()
        config.add_section('credentials')
        config.set('credentials', 'dbadmin_password',
                   utils.generate_random_password())
        self.write_config(config)

    def write_config(self,
                     config,
                     unlink_function=os.unlink,
                     temp_function=tempfile.NamedTemporaryFile):
        """Write the configuration contents to vertica.cnf file."""
        LOG.debug('Defining config holder at %s.', system.VERTICA_CONF)
        tempfile = temp_function('w', delete=False)
        try:
            config.write(tempfile)
            tempfile.close()
            command = (
                ("install -o root -g root -m 644 %(source)s %(target)s") % {
                    'source': tempfile.name,
                    'target': system.VERTICA_CONF
                })
            system.shell_execute(command)
            unlink_function(tempfile.name)
        except Exception:
            unlink_function(tempfile.name)
            raise

    def read_config(self):
        """Reads and returns the Vertica config."""
        try:
            config = configparser.ConfigParser()
            config.read(system.VERTICA_CONF)
            return config
        except Exception:
            LOG.exception("Failed to read config %s.", system.VERTICA_CONF)
            raise RuntimeError

    def _get_database_password(self):
        """Read the password from vertica.cnf file and return it."""
        return self.read_config().get('credentials', 'dbadmin_password')

    def install_if_needed(self, packages):
        """Install Vertica package if needed."""
        LOG.info("Preparing Guest as Vertica Server.")
        if not packager.pkg_is_installed(packages):
            LOG.debug("Installing Vertica Package.")
            packager.pkg_install(packages, None, system.INSTALL_TIMEOUT)

    def _set_readahead_for_disks(self):
        """This method sets readhead size for disks as needed by Vertica."""
        device = volume.VolumeDevice(CONF.device_path)
        device.set_readahead_size(CONF.vertica.readahead_size)
        LOG.debug("Set readhead size as required by Vertica.")

    def prepare_for_install_vertica(self):
        """This method executes preparatory methods before
        executing install_vertica.
        """
        command = ("VERT_DBA_USR=%s VERT_DBA_HOME=/home/dbadmin "
                   "VERT_DBA_GRP=%s /opt/vertica/oss/python/bin/python"
                   " -m vertica.local_coerce" %
                   (system.VERTICA_ADMIN, system.VERTICA_ADMIN_GRP))
        try:
            self._set_readahead_for_disks()
            system.shell_execute(command)
        except exception.ProcessExecutionError:
            LOG.exception("Failed to prepare for install_vertica.")
            raise

    def mark_design_ksafe(self, k):
        """Wrapper for mark_design_ksafe function for setting k-safety """
        LOG.info("Setting Vertica k-safety to %s", str(k))
        out, err = system.exec_vsql_command(self._get_database_password(),
                                            system.MARK_DESIGN_KSAFE % k)
        # Only fail if we get an ERROR as opposed to a warning complaining
        # about setting k = 0
        if "ERROR" in err:
            LOG.error(err)
            raise RuntimeError(_("Failed to set k-safety level %s.") % k)

    def _create_user(self, username, password, role=None):
        """Creates a user, granting and enabling the given role for it."""
        LOG.info("Creating user in Vertica database.")
        out, err = system.exec_vsql_command(
            self._get_database_password(),
            system.CREATE_USER % (username, password))
        if err:
            if err.is_warning():
                LOG.warning(err)
            else:
                LOG.error(err)
                raise RuntimeError(_("Failed to create user %s.") % username)
        if role:
            self._grant_role(username, role)

    def _grant_role(self, username, role):
        """Grants a role to the user on the schema."""
        out, err = system.exec_vsql_command(
            self._get_database_password(),
            system.GRANT_TO_USER % (role, username))
        if err:
            if err.is_warning():
                LOG.warning(err)
            else:
                LOG.error(err)
                raise RuntimeError(
                    _("Failed to grant role %(r)s to user "
                      "%(u)s.") % {
                          'r': role,
                          'u': username
                      })
        out, err = system.exec_vsql_command(
            self._get_database_password(),
            system.ENABLE_FOR_USER % (username, role))
        if err:
            LOG.warning(err)

    def enable_root(self, root_password=None):
        """Resets the root password."""
        LOG.info("Enabling root.")
        user = models.DatastoreUser.root(password=root_password)
        if not self.is_root_enabled():
            self._create_user(user.name, user.password, 'pseudosuperuser')
        else:
            LOG.debug("Updating %s password.", user.name)
            try:
                out, err = system.exec_vsql_command(
                    self._get_database_password(),
                    system.ALTER_USER_PASSWORD % (user.name, user.password))
                if err:
                    if err.is_warning():
                        LOG.warning(err)
                    else:
                        LOG.error(err)
                        raise RuntimeError(
                            _("Failed to update %s "
                              "password.") % user.name)
            except exception.ProcessExecutionError:
                LOG.error("Failed to update %s password.", user.name)
                raise RuntimeError(
                    _("Failed to update %s password.") % user.name)
        return user.serialize()

    def is_root_enabled(self):
        """Return True if root access is enabled else False."""
        LOG.debug("Checking is root enabled.")
        try:
            out, err = system.shell_execute(
                system.USER_EXISTS % (self._get_database_password(), 'root'),
                system.VERTICA_ADMIN)
            if err:
                LOG.error(err)
                raise RuntimeError(_("Failed to query for root user."))
        except exception.ProcessExecutionError:
            raise RuntimeError(_("Failed to query for root user."))
        return out.rstrip() == "1"

    def get_public_keys(self, user):
        """Generates key (if not found), and sends public key for user."""
        LOG.debug("Public keys requested for user: %s.", user)
        user_home_directory = os.path.expanduser('~' + user)
        public_key_file_name = user_home_directory + '/.ssh/id_rsa.pub'

        try:
            key_generate_command = (system.SSH_KEY_GEN % user_home_directory)
            system.shell_execute(key_generate_command, user)
        except exception.ProcessExecutionError:
            LOG.debug("Cannot generate key.")

        try:
            read_key_cmd = ("cat %(file)s" % {'file': public_key_file_name})
            out, err = system.shell_execute(read_key_cmd)
        except exception.ProcessExecutionError:
            LOG.exception("Cannot read public key.")
            raise
        return out.strip()

    def authorize_public_keys(self, user, public_keys):
        """Adds public key to authorized_keys for user."""
        LOG.debug("public keys to be added for user: %s.", user)
        user_home_directory = os.path.expanduser('~' + user)
        authorized_file_name = user_home_directory + '/.ssh/authorized_keys'

        try:
            read_key_cmd = ("cat %(file)s" % {'file': authorized_file_name})
            out, err = system.shell_execute(read_key_cmd)
            public_keys.append(out.strip())
        except exception.ProcessExecutionError:
            LOG.debug("Cannot read authorized_keys.")
        all_keys = '\n'.join(public_keys) + "\n"

        try:
            with tempfile.NamedTemporaryFile("w", delete=False) as tempkeyfile:
                tempkeyfile.write(all_keys)
            copy_key_cmd = (
                ("install -o %(user)s -m 600 %(source)s %(target)s") % {
                    'user': user,
                    'source': tempkeyfile.name,
                    'target': authorized_file_name
                })
            system.shell_execute(copy_key_cmd)
            os.remove(tempkeyfile.name)
        except exception.ProcessExecutionError:
            LOG.exception("Cannot install public keys.")
            os.remove(tempkeyfile.name)
            raise

    def _export_conf_to_members(self, members):
        """This method exports conf files to other members."""
        try:
            for member in members:
                COPY_CMD = (system.SEND_CONF_TO_SERVER %
                            (system.VERTICA_CONF, member, system.VERTICA_CONF))
                system.shell_execute(COPY_CMD)
        except exception.ProcessExecutionError:
            LOG.exception("Cannot export configuration.")
            raise

    def install_cluster(self, members):
        """Installs & configures cluster."""
        cluster_members = ','.join(members)
        LOG.debug("Installing cluster with members: %s.", cluster_members)
        self.install_vertica(cluster_members)
        self._export_conf_to_members(members)
        LOG.debug("Creating database with members: %s.", cluster_members)
        self.create_db(cluster_members)
        LOG.debug("Cluster configured on members: %s.", cluster_members)

    def grow_cluster(self, members):
        """Adds nodes to cluster."""
        cluster_members = ','.join(members)
        LOG.debug("Growing cluster with members: %s.", cluster_members)
        self.update_vertica("--add-hosts", cluster_members)
        self._export_conf_to_members(members)
        LOG.debug("Creating database with members: %s.", cluster_members)
        self.add_db_to_node(cluster_members)
        LOG.debug("Cluster configured on members: %s.", cluster_members)

    def shrink_cluster(self, members):
        """Removes nodes from cluster."""
        cluster_members = ','.join(members)
        LOG.debug("Shrinking cluster with members: %s.", cluster_members)
        self.remove_db_from_node(cluster_members)
        self.update_vertica("--remove-hosts", cluster_members)

    def wait_for_node_status(self, status='UP'):
        """Wait until all nodes are the same status"""

        # select node_state from nodes where node_state <> 'UP'
        def _wait_for_node_status():
            out, err = system.exec_vsql_command(self._get_database_password(),
                                                system.NODE_STATUS % status)
            LOG.debug("Polled vertica node states: %s", out)

            if err:
                LOG.error(err)
                raise RuntimeError(_("Failed to query for root user."))

            return "0 rows" in out

        try:
            utils.poll_until(_wait_for_node_status,
                             time_out=600,
                             sleep_time=15)
        except exception.PollTimeOut:
            raise RuntimeError(
                _("Timed out waiting for cluster to "
                  "change to status %s") % status)
Exemple #17
0
class CassandraApp(object):
    """Prepares DBaaS on a Guest container."""

    _ADMIN_USER = '******'

    _CONF_AUTH_SEC = 'authentication'
    _CONF_USR_KEY = 'username'
    _CONF_PWD_KEY = 'password'
    _CONF_DIR_MODS = stat.S_IRWXU
    _CONF_FILE_MODS = stat.S_IRUSR

    CASSANDRA_KILL_CMD = "sudo killall java  || true"

    def __init__(self):
        """By default login with root no password for initial setup."""
        self.state_change_wait_time = CONF.state_change_wait_time
        self.status = CassandraAppStatus(self.get_current_superuser())

        revision_dir = guestagent_utils.build_file_path(
            os.path.dirname(self.cassandra_conf),
            ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR)
        self.configuration_manager = ConfigurationManager(
            self.cassandra_conf,
            self.cassandra_owner,
            self.cassandra_owner,
            SafeYamlCodec(default_flow_style=False),
            requires_root=True,
            override_strategy=OneFileOverrideStrategy(revision_dir))

    @property
    def service_candidates(self):
        return ['cassandra']

    @property
    def cassandra_conf(self):
        return {
            operating_system.REDHAT:
            "/etc/cassandra/default.conf/cassandra.yaml",
            operating_system.DEBIAN: "/etc/cassandra/cassandra.yaml",
            operating_system.SUSE: "/etc/cassandra/default.conf/cassandra.yaml"
        }[operating_system.get_os()]

    @property
    def cassandra_owner(self):
        return 'cassandra'

    @property
    def cassandra_data_dir(self):
        return guestagent_utils.build_file_path(self.cassandra_working_dir,
                                                'data')

    @property
    def cassandra_working_dir(self):
        return "/var/lib/cassandra"

    @property
    def default_superuser_name(self):
        return "cassandra"

    @property
    def default_superuser_password(self):
        return "cassandra"

    @property
    def default_superuser_pwd_hash(self):
        # Default 'salted_hash' value for 'cassandra' user on Cassandra 2.1.
        return "$2a$10$wPEVuXBU7WE2Uwzqq3t19ObRJyoKztzC/Doyfr0VtDmVXC4GDAV3e"

    @property
    def cqlsh_conf_path(self):
        return "~/.cassandra/cqlshrc"

    def install_if_needed(self, packages):
        """Prepare the guest machine with a Cassandra server installation."""
        LOG.info(_("Preparing Guest as a Cassandra Server"))
        if not packager.pkg_is_installed(packages):
            self._install_db(packages)
        LOG.debug("Cassandra install_if_needed complete")

    def init_storage_structure(self, mount_point):
        try:
            operating_system.create_directory(mount_point, as_root=True)
        except exception.ProcessExecutionError:
            LOG.exception(_("Error while initiating storage structure."))

    def start_db(self, update_db=False):
        self.status.start_db_service(self.service_candidates,
                                     self.state_change_wait_time,
                                     enable_on_boot=True,
                                     update_db=update_db)

    def stop_db(self, update_db=False, do_not_start_on_reboot=False):
        self.status.stop_db_service(self.service_candidates,
                                    self.state_change_wait_time,
                                    disable_on_boot=do_not_start_on_reboot,
                                    update_db=update_db)

    def restart(self):
        self.status.restart_db_service(self.service_candidates,
                                       self.state_change_wait_time)

    def _install_db(self, packages):
        """Install Cassandra server"""
        LOG.debug("Installing Cassandra server.")
        packager.pkg_install(packages, None, 10000)
        LOG.debug("Finished installing Cassandra server")

    def secure(self, update_user=None):
        """Configure the Trove administrative user.
        Update an existing user if given.
        Create a new one using the default database credentials
        otherwise and drop the built-in user when finished.
        """
        LOG.info(_('Configuring Trove superuser.'))

        current_superuser = update_user or models.CassandraUser(
            self.default_superuser_name, self.default_superuser_password)

        if update_user:
            os_admin = models.CassandraUser(update_user.name,
                                            utils.generate_random_password())
            CassandraAdmin(current_superuser).alter_user_password(os_admin)
        else:
            os_admin = models.CassandraUser(self._ADMIN_USER,
                                            utils.generate_random_password())
            CassandraAdmin(current_superuser)._create_superuser(os_admin)
            CassandraAdmin(os_admin).drop_user(current_superuser)

        self.__create_cqlsh_config({
            self._CONF_AUTH_SEC: {
                self._CONF_USR_KEY: os_admin.name,
                self._CONF_PWD_KEY: os_admin.password
            }
        })

        # Update the internal status with the new user.
        self.status = CassandraAppStatus(os_admin)

        return os_admin

    def __create_cqlsh_config(self, sections):
        config_path = self._get_cqlsh_conf_path()
        config_dir = os.path.dirname(config_path)
        if not os.path.exists(config_dir):
            os.mkdir(config_dir, self._CONF_DIR_MODS)
        else:
            os.chmod(config_dir, self._CONF_DIR_MODS)
        operating_system.write_file(config_path, sections, codec=IniCodec())
        os.chmod(config_path, self._CONF_FILE_MODS)

    def get_current_superuser(self):
        """
        Build the Trove superuser.
        Use the stored credentials.
        If not available fall back to the defaults.
        """
        if self.has_user_config():
            return self._load_current_superuser()

        LOG.warn(
            _("Trove administrative user has not been configured yet. "
              "Using the built-in default: %s") % self.default_superuser_name)
        return models.CassandraUser(self.default_superuser_name,
                                    self.default_superuser_password)

    def has_user_config(self):
        """
        Return TRUE if there is a client configuration file available
        on the guest.
        """
        return os.path.exists(self._get_cqlsh_conf_path())

    def _load_current_superuser(self):
        config = operating_system.read_file(self._get_cqlsh_conf_path(),
                                            codec=IniCodec())
        return models.CassandraUser(
            config[self._CONF_AUTH_SEC][self._CONF_USR_KEY],
            config[self._CONF_AUTH_SEC][self._CONF_PWD_KEY])

    def apply_initial_guestagent_configuration(self):
        """
        Some of these settings may be overriden by user defined
        configuration groups.

        cluster_name
            - Use the unique guest id by default.
            - Prevents nodes from one logical cluster from talking
              to another. All nodes in a cluster must have the same value.
        authenticator and authorizer
            - Necessary to enable users and permissions.
        rpc_address - Enable remote connections on all interfaces.
        broadcast_rpc_address - RPC address to broadcast to drivers and
                                other clients. Must be set if
                                rpc_address = 0.0.0.0 and can never be
                                0.0.0.0 itself.
        listen_address - The address on which the node communicates with
                         other nodes. Can never be 0.0.0.0.
        seed_provider - A list of discovery contact points.
        """
        updates = {
            'cluster_name': CONF.guest_id,
            'authenticator': 'org.apache.cassandra.auth.PasswordAuthenticator',
            'authorizer': 'org.apache.cassandra.auth.CassandraAuthorizer',
            'rpc_address': "0.0.0.0",
            'broadcast_rpc_address': netutils.get_my_ipv4(),
            'listen_address': netutils.get_my_ipv4(),
            'seed_provider': {
                'parameters': [{
                    'seeds': netutils.get_my_ipv4()
                }]
            }
        }

        self.configuration_manager.apply_system_override(updates)

    def update_overrides(self, context, overrides, remove=False):
        if overrides:
            self.configuration_manager.apply_user_override(overrides)

    def remove_overrides(self):
        self.configuration_manager.remove_user_override()

    def start_db_with_conf_changes(self, config_contents):
        LOG.debug("Starting database with configuration changes.")
        if self.status.is_running:
            raise RuntimeError(_("The service is still running."))

        self.configuration_manager.save_configuration(config_contents)
        # The configuration template has to be updated with
        # guestagent-controlled settings.
        self.apply_initial_guestagent_configuration()
        self.start_db(True)

    def reset_configuration(self, configuration):
        LOG.debug("Resetting configuration.")
        config_contents = configuration['config_contents']
        self.configuration_manager.save_configuration(config_contents)

    def _get_cqlsh_conf_path(self):
        return os.path.expanduser(self.cqlsh_conf_path)
Exemple #18
0
class CassandraApp(object):
    """Prepares DBaaS on a Guest container."""

    _ADMIN_USER = '******'

    _CONF_AUTH_SEC = 'authentication'
    _CONF_USR_KEY = 'username'
    _CONF_PWD_KEY = 'password'
    _CONF_DIR_MODS = stat.S_IRWXU
    _CONF_FILE_MODS = stat.S_IRUSR

    CASSANDRA_CONF_FILE = "cassandra.yaml"
    CASSANDRA_TOPOLOGY_FILE = 'cassandra-rackdc.properties'

    _TOPOLOGY_CODEC = PropertiesCodec(
        delimiter='=', unpack_singletons=True, string_mappings={
            'true': True, 'false': False})

    CASSANDRA_KILL_CMD = "sudo killall java  || true"

    def __init__(self):
        self.state_change_wait_time = CONF.state_change_wait_time
        self.status = CassandraAppStatus(self.get_current_superuser())

        revision_dir = guestagent_utils.build_file_path(
            os.path.dirname(self.cassandra_conf),
            ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR)
        self.configuration_manager = ConfigurationManager(
            self.cassandra_conf,
            self.cassandra_owner, self.cassandra_owner,
            SafeYamlCodec(default_flow_style=False), requires_root=True,
            override_strategy=OneFileOverrideStrategy(revision_dir))

    @property
    def service_candidates(self):
        return ['cassandra']

    @property
    def cassandra_conf_dir(self):
        return {
            operating_system.REDHAT: "/etc/cassandra/default.conf/",
            operating_system.DEBIAN: "/etc/cassandra/",
            operating_system.SUSE: "/etc/cassandra/default.conf/"
        }[operating_system.get_os()]

    @property
    def cassandra_conf(self):
        return guestagent_utils.build_file_path(self.cassandra_conf_dir,
                                                self.CASSANDRA_CONF_FILE)

    @property
    def cassandra_topology(self):
        return guestagent_utils.build_file_path(self.cassandra_conf_dir,
                                                self.CASSANDRA_TOPOLOGY_FILE)

    @property
    def cassandra_owner(self):
        return 'cassandra'

    @property
    def cassandra_data_dir(self):
        return guestagent_utils.build_file_path(
            self.cassandra_working_dir, 'data')

    @property
    def cassandra_working_dir(self):
        return "/var/lib/cassandra"

    @property
    def default_superuser_name(self):
        return "cassandra"

    @property
    def default_superuser_password(self):
        return "cassandra"

    @property
    def default_superuser_pwd_hash(self):
        # Default 'salted_hash' value for 'cassandra' user on Cassandra 2.1.
        return "$2a$10$wPEVuXBU7WE2Uwzqq3t19ObRJyoKztzC/Doyfr0VtDmVXC4GDAV3e"

    @property
    def cqlsh_conf_path(self):
        return "~/.cassandra/cqlshrc"

    def build_admin(self):
        return CassandraAdmin(self.get_current_superuser())

    def install_if_needed(self, packages):
        """Prepare the guest machine with a Cassandra server installation."""
        LOG.info(_("Preparing Guest as a Cassandra Server"))
        if not packager.pkg_is_installed(packages):
            self._install_db(packages)
        LOG.debug("Cassandra install_if_needed complete")

    def init_storage_structure(self, mount_point):
        try:
            operating_system.create_directory(mount_point, as_root=True)
        except exception.ProcessExecutionError:
            LOG.exception(_("Error while initiating storage structure."))

    def start_db(self, update_db=False, enable_on_boot=True):
        self.status.start_db_service(
            self.service_candidates, self.state_change_wait_time,
            enable_on_boot=enable_on_boot, update_db=update_db)

    def stop_db(self, update_db=False, do_not_start_on_reboot=False):
        self.status.stop_db_service(
            self.service_candidates, self.state_change_wait_time,
            disable_on_boot=do_not_start_on_reboot, update_db=update_db)

    def restart(self):
        self.status.restart_db_service(
            self.service_candidates, self.state_change_wait_time)

    def _install_db(self, packages):
        """Install Cassandra server"""
        LOG.debug("Installing Cassandra server.")
        packager.pkg_install(packages, None, 10000)
        LOG.debug("Finished installing Cassandra server")

    def _remove_system_tables(self):
        """
        Clean up the system keyspace.

        System tables are initialized on the first boot.
        They store certain properties, such as 'cluster_name',
        that cannot be easily changed once afterwards.
        The system keyspace needs to be cleaned up first. The
        tables will be regenerated on the next startup.
        Make sure to also cleanup the commitlog and caches to avoid
        startup errors due to inconsistencies.

        The service should not be running at this point.
        """
        if self.status.is_running:
            raise RuntimeError(_("Cannot remove system tables. "
                                 "The service is still running."))

        LOG.info(_('Removing existing system tables.'))
        system_keyspace_dir = guestagent_utils.build_file_path(
            self.cassandra_data_dir, 'system')
        commitlog_file = guestagent_utils.build_file_path(
            self.cassandra_working_dir, 'commitlog')
        chaches_dir = guestagent_utils.build_file_path(
            self.cassandra_working_dir, 'saved_caches')

        operating_system.remove(system_keyspace_dir,
                                force=True, recursive=True, as_root=True)
        operating_system.remove(commitlog_file,
                                force=True, recursive=True, as_root=True)
        operating_system.remove(chaches_dir,
                                force=True, recursive=True, as_root=True)

        operating_system.create_directory(
            system_keyspace_dir,
            user=self.cassandra_owner, group=self.cassandra_owner,
            force=True, as_root=True)
        operating_system.create_directory(
            commitlog_file,
            user=self.cassandra_owner, group=self.cassandra_owner,
            force=True, as_root=True)
        operating_system.create_directory(
            chaches_dir,
            user=self.cassandra_owner, group=self.cassandra_owner,
            force=True, as_root=True)

    def _apply_post_restore_updates(self, backup_info):
        """The service should not be running at this point.

        The restored database files carry some properties over from the
        original instance that need to be updated with appropriate
        values for the new instance.
        These include:

            - Reset the 'cluster_name' property to match the new unique
              ID of this instance.
              This is to ensure that the restored instance is a part of a new
              single-node cluster rather than forming a one with the
              original node.
            - Reset the administrator's password.
              The original password from the parent instance may be
              compromised or long lost.

        A general procedure is:
            - update the configuration property with the current value
              so that the service can start up
            - reset the superuser password
            - restart the service
            - change the cluster name
            - restart the service

        :seealso: _reset_admin_password
        :seealso: change_cluster_name
        """

        if self.status.is_running:
            raise RuntimeError(_("Cannot reset the cluster name. "
                                 "The service is still running."))

        LOG.debug("Applying post-restore updates to the database.")

        try:
            # Change the 'cluster_name' property to the current in-database
            # value so that the database can start up.
            self._update_cluster_name_property(backup_info['instance_id'])

            # Reset the superuser password so that we can log-in.
            self._reset_admin_password()

            # Start the database and update the 'cluster_name' to the
            # new value.
            self.start_db(update_db=False)
            self.change_cluster_name(CONF.guest_id)
        finally:
            self.stop_db()  # Always restore the initial state of the service.

    def cluster_secure(self, password):
        return self.secure(password=password).serialize()

    def secure(self, update_user=None, password=None):
        """Configure the Trove administrative user.
        Update an existing user if given.
        Create a new one using the default database credentials
        otherwise and drop the built-in user when finished.
        """
        LOG.info(_('Configuring Trove superuser.'))

        if password is None:
            password = utils.generate_random_password()

        admin_username = update_user.name if update_user else self._ADMIN_USER
        os_admin = models.CassandraUser(admin_username, password)

        if update_user:
            CassandraAdmin(update_user).alter_user_password(os_admin)
        else:
            cassandra = models.CassandraUser(
                self.default_superuser_name, self.default_superuser_password)
            CassandraAdmin(cassandra)._create_superuser(os_admin)
            CassandraAdmin(os_admin).drop_user(cassandra)

        self._update_admin_credentials(os_admin)

        return os_admin

    def _update_admin_credentials(self, user):
        self.__create_cqlsh_config({self._CONF_AUTH_SEC:
                                    {self._CONF_USR_KEY: user.name,
                                     self._CONF_PWD_KEY: user.password}})

        # Update the internal status with the new user.
        self.status = CassandraAppStatus(user)

    def store_admin_credentials(self, admin_credentials):
        user = models.CassandraUser.deserialize_user(admin_credentials)
        self._update_admin_credentials(user)

    def get_admin_credentials(self):
        return self.get_current_superuser().serialize()

    def _reset_admin_password(self):
        """
        Reset the password of the Trove's administrative superuser.

        The service should not be running at this point.

        A general password reset procedure is:
            - disable user authentication and remote access
            - restart the service
            - update the password in the 'system_auth.credentials' table
            - re-enable authentication and make the host reachable
            - restart the service
        """
        if self.status.is_running:
            raise RuntimeError(_("Cannot reset the administrative password. "
                                 "The service is still running."))

        try:
            # Disable automatic startup in case the node goes down before
            # we have the superuser secured.
            operating_system.disable_service_on_boot(self.service_candidates)

            self.__disable_remote_access()
            self.__disable_authentication()

            # We now start up the service and immediately re-enable
            # authentication in the configuration file (takes effect after
            # restart).
            # Then we reset the superuser password to its default value
            # and restart the service to get user functions back.
            self.start_db(update_db=False, enable_on_boot=False)
            self.__enable_authentication()
            os_admin = self.__reset_user_password_to_default(self._ADMIN_USER)
            self.status = CassandraAppStatus(os_admin)
            self.restart()

            # Now change the administrative password to a new secret value.
            self.secure(update_user=os_admin)
        finally:
            self.stop_db()  # Always restore the initial state of the service.

        # At this point, we should have a secured database with new Trove-only
        # superuser password.
        # Proceed to re-enable remote access and automatic startup.
        self.__enable_remote_access()
        operating_system.enable_service_on_boot(self.service_candidates)

    def __reset_user_password_to_default(self, username):
        LOG.debug("Resetting the password of user '%s' to '%s'."
                  % (username, self.default_superuser_password))

        user = models.CassandraUser(username, self.default_superuser_password)
        with CassandraLocalhostConnection(user) as client:
            client.execute(
                "UPDATE system_auth.credentials SET salted_hash=%s "
                "WHERE username='******';", (user.name,),
                (self.default_superuser_pwd_hash,))

            return user

    def change_cluster_name(self, cluster_name):
        """Change the 'cluster_name' property of an exesting running instance.
        Cluster name is stored in the database and is required to match the
        configuration value. Cassandra fails to start otherwise.
        """

        if not self.status.is_running:
            raise RuntimeError(_("Cannot change the cluster name. "
                                 "The service is not running."))

        LOG.debug("Changing the cluster name to '%s'." % cluster_name)

        # Update the in-database value.
        self.__reset_cluster_name(cluster_name)

        # Update the configuration property.
        self._update_cluster_name_property(cluster_name)

        self.restart()

    def __reset_cluster_name(self, cluster_name):
        # Reset the in-database value stored locally on this node.
        current_superuser = self.get_current_superuser()
        with CassandraLocalhostConnection(current_superuser) as client:
            client.execute(
                "UPDATE system.local SET cluster_name = '{}' "
                "WHERE key='local';", (cluster_name,))

        # Newer version of Cassandra require a flush to ensure the changes
        # to the local system keyspace persist.
        self.flush_tables('system', 'local')

    def __create_cqlsh_config(self, sections):
        config_path = self._get_cqlsh_conf_path()
        config_dir = os.path.dirname(config_path)
        if not os.path.exists(config_dir):
            os.mkdir(config_dir, self._CONF_DIR_MODS)
        else:
            os.chmod(config_dir, self._CONF_DIR_MODS)
        operating_system.write_file(config_path, sections, codec=IniCodec())
        os.chmod(config_path, self._CONF_FILE_MODS)

    def get_current_superuser(self):
        """
        Build the Trove superuser.
        Use the stored credentials.
        If not available fall back to the defaults.
        """
        if self.has_user_config():
            return self._load_current_superuser()

        LOG.warn(_("Trove administrative user has not been configured yet. "
                   "Using the built-in default: %s")
                 % self.default_superuser_name)
        return models.CassandraUser(self.default_superuser_name,
                                    self.default_superuser_password)

    def has_user_config(self):
        """
        Return TRUE if there is a client configuration file available
        on the guest.
        """
        return os.path.exists(self._get_cqlsh_conf_path())

    def _load_current_superuser(self):
        config = operating_system.read_file(self._get_cqlsh_conf_path(),
                                            codec=IniCodec())
        return models.CassandraUser(
            config[self._CONF_AUTH_SEC][self._CONF_USR_KEY],
            config[self._CONF_AUTH_SEC][self._CONF_PWD_KEY]
        )

    def apply_initial_guestagent_configuration(self, cluster_name=None):
        """Update guestagent-controlled configuration properties.
        These changes to the default template are necessary in order to make
        the database service bootable and accessible in the guestagent context.

        :param cluster_name:  The 'cluster_name' configuration property.
                              Use the unique guest id by default.
        :type cluster_name:   string
        """
        self.configuration_manager.apply_system_override(
            {'data_file_directories': [self.cassandra_data_dir]})
        self._make_host_reachable()
        self._update_cluster_name_property(cluster_name or CONF.guest_id)
        # A single-node instance may use the SimpleSnitch
        # (keyspaces use SimpleStrategy).
        # A network-aware snitch has to be used otherwise.
        if cluster_name is None:
            updates = {'endpoint_snitch': 'SimpleSnitch'}
        else:
            updates = {'endpoint_snitch': 'GossipingPropertyFileSnitch'}
        self.configuration_manager.apply_system_override(updates)

    def _make_host_reachable(self):
        """
        Some of these settings may be overriden by user defined
        configuration groups.

        authenticator and authorizer
            - Necessary to enable users and permissions.
        rpc_address - Enable remote connections on all interfaces.
        broadcast_rpc_address - RPC address to broadcast to drivers and
                                other clients. Must be set if
                                rpc_address = 0.0.0.0 and can never be
                                0.0.0.0 itself.
        listen_address - The address on which the node communicates with
                         other nodes. Can never be 0.0.0.0.
        seed_provider - A list of discovery contact points.
        """
        self.__enable_authentication()
        self.__enable_remote_access()

    def __enable_remote_access(self):
        updates = {
            'rpc_address': "0.0.0.0",
            'broadcast_rpc_address': netutils.get_my_ipv4(),
            'listen_address': netutils.get_my_ipv4(),
            'seed_provider': {'parameters':
                              [{'seeds': netutils.get_my_ipv4()}]
                              }
        }

        self.configuration_manager.apply_system_override(updates)

    def __disable_remote_access(self):
        updates = {
            'rpc_address': "127.0.0.1",
            'listen_address': '127.0.0.1',
            'seed_provider': {'parameters':
                              [{'seeds': '127.0.0.1'}]
                              }
        }

        self.configuration_manager.apply_system_override(updates)

    def __enable_authentication(self):
        updates = {
            'authenticator': 'org.apache.cassandra.auth.PasswordAuthenticator',
            'authorizer': 'org.apache.cassandra.auth.CassandraAuthorizer'
        }

        self.configuration_manager.apply_system_override(updates)

    def __disable_authentication(self):
        updates = {
            'authenticator': 'org.apache.cassandra.auth.AllowAllAuthenticator',
            'authorizer': 'org.apache.cassandra.auth.AllowAllAuthorizer'
        }

        self.configuration_manager.apply_system_override(updates)

    def _update_cluster_name_property(self, name):
        """This 'cluster_name' property prevents nodes from one
        logical cluster from talking to another.
        All nodes in a cluster must have the same value.
        """
        self.configuration_manager.apply_system_override({'cluster_name':
                                                          name})

    def update_overrides(self, context, overrides, remove=False):
        if overrides:
            self.configuration_manager.apply_user_override(overrides)

    def remove_overrides(self):
        self.configuration_manager.remove_user_override()

    def write_cluster_topology(self, data_center, rack, prefer_local=True):
        LOG.info(_('Saving Cassandra cluster topology configuration.'))

        config = {'dc': data_center,
                  'rack': rack,
                  'prefer_local': prefer_local}

        operating_system.write_file(self.cassandra_topology, config,
                                    codec=self._TOPOLOGY_CODEC, as_root=True)
        operating_system.chown(
            self.cassandra_topology,
            self.cassandra_owner, self.cassandra_owner, as_root=True)
        operating_system.chmod(
            self.cassandra_topology, FileMode.ADD_READ_ALL, as_root=True)

    def start_db_with_conf_changes(self, config_contents):
        LOG.debug("Starting database with configuration changes.")
        if self.status.is_running:
            raise RuntimeError(_("The service is still running."))

        self.configuration_manager.save_configuration(config_contents)
        # The configuration template has to be updated with
        # guestagent-controlled settings.
        self.apply_initial_guestagent_configuration()
        self.start_db(True)

    def reset_configuration(self, configuration):
        LOG.debug("Resetting configuration.")
        config_contents = configuration['config_contents']
        self.configuration_manager.save_configuration(config_contents)

    def _get_cqlsh_conf_path(self):
        return os.path.expanduser(self.cqlsh_conf_path)

    def get_data_center(self):
        config = operating_system.read_file(self.cassandra_topology,
                                            codec=self._TOPOLOGY_CODEC)
        return config['dc']

    def get_rack(self):
        config = operating_system.read_file(self.cassandra_topology,
                                            codec=self._TOPOLOGY_CODEC)
        return config['rack']

    def set_seeds(self, seeds):
        LOG.debug("Setting seed nodes: %s" % seeds)
        updates = {
            'seed_provider': {'parameters':
                              [{'seeds': ','.join(seeds)}]
                              }
        }

        self.configuration_manager.apply_system_override(updates)

    def get_seeds(self):
        """Return a list of seed node IPs if any.

        The seed IPs are stored as a comma-separated string in the
        seed-provider parameters:
        [{'class_name': '<name>', 'parameters': [{'seeds': '<ip>,<ip>'}, ...]}]
        """

        def find_first(key, dict_list):
            for item in dict_list:
                if key in item:
                    return item[key]
            return []

        sp_property = self.configuration_manager.get_value('seed_provider', [])
        seeds_str = find_first('seeds', find_first('parameters', sp_property))
        return seeds_str.split(',') if seeds_str else []

    def set_auto_bootstrap(self, enabled):
        """Auto-bootstrap makes new (non-seed) nodes automatically migrate the
        right data to themselves.
        The feature has to be turned OFF when initializing a fresh cluster
        without data.
        It must be turned back ON once the cluster is initialized.
        """
        LOG.debug("Setting auto-bootstrapping: %s" % enabled)
        updates = {'auto_bootstrap': enabled}
        self.configuration_manager.apply_system_override(updates)

    def node_cleanup_begin(self):
        """Suspend periodic status updates and mark the instance busy
        throughout the operation.
        """
        self.status.begin_restart()
        self.status.set_status(rd_instance.ServiceStatuses.BLOCKED)

    def node_cleanup(self):
        """Cassandra does not automatically remove data from nodes that
        lose part of their partition range to a newly added node.
        Cleans up keyspaces and partition keys no longer belonging to the node.

        Do not treat cleanup failures as fatal. Resume the heartbeat after
        finishing and let it signal the true state of the instance to the
        caller.
        """
        LOG.debug("Running node cleanup.")
        # nodetool -h <HOST> -p <PORT> -u <USER> -pw <PASSWORD> cleanup
        try:
            self._run_nodetool_command('cleanup')
            self.status.set_status(rd_instance.ServiceStatuses.RUNNING)
        except Exception:
            LOG.exception(_("The node failed to complete its cleanup."))
        finally:
            self.status.end_restart()

    def node_decommission(self):
        """Causes a live node to decommission itself,
        streaming its data to the next node on the ring.

        Shutdown the database after successfully finishing the operation,
        or leave the node in a failed state otherwise.

        Suspend periodic status updates, so that the caller can poll for the
        database shutdown.
        """
        LOG.debug("Decommissioning the node.")
        # nodetool -h <HOST> -p <PORT> -u <USER> -pw <PASSWORD> decommission
        self.status.begin_restart()
        try:
            self._run_nodetool_command('decommission')
        except Exception:
            LOG.exception(_("The node failed to decommission itself."))
            self.status.set_status(rd_instance.ServiceStatuses.FAILED)
            return
        finally:
            # Cassandra connections have ability to automatically discover and
            # fallback to other cluster nodes whenever a node goes down.
            # Reset the status after decomissioning to ensure the heartbeat
            # connection talks to this node only.
            self.status = CassandraAppStatus(self.get_current_superuser())

        try:
            self.stop_db(update_db=True, do_not_start_on_reboot=True)
        finally:
            self.status.end_restart()

    def flush_tables(self, keyspace, *tables):
        """Flushes one or more tables from the memtable.
        """
        LOG.debug("Flushing tables.")
        # nodetool -h <HOST> -p <PORT> -u <USER> -pw <PASSWORD> flush --
        # <keyspace> ( <table> ... )
        self._run_nodetool_command('flush', keyspace, *tables)

    def _run_nodetool_command(self, cmd, *args, **kwargs):
        """Execute a nodetool command on this node.
        """
        return utils.execute('nodetool', '-h', 'localhost',
                             cmd, *args, **kwargs)

    def enable_root(self, root_password=None):
        """Cassandra's 'root' user is called 'cassandra'.
        Create a new superuser if it does not exist and grant it full
        superuser-level access to all keyspaces.
        """
        cassandra = models.CassandraRootUser(password=root_password)
        admin = self.build_admin()
        if self.is_root_enabled():
            admin.alter_user_password(cassandra)
        else:
            admin._create_superuser(cassandra)

        return cassandra.serialize()

    def is_root_enabled(self):
        """The Trove administrative user ('os_admin') should normally be the
        only superuser in the system.
        """
        found = self.build_admin().list_superusers()
        return len([user for user in found
                    if user.name != self._ADMIN_USER]) > 0
Exemple #19
0
class MongoDBApp(object):
    """Prepares DBaaS on a Guest container."""

    def __init__(self):
        self.state_change_wait_time = CONF.state_change_wait_time

        revision_dir = guestagent_utils.build_file_path(
            os.path.dirname(CONFIG_FILE),
            ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR)
        self.configuration_manager = ConfigurationManager(
            CONFIG_FILE, system.MONGO_USER, system.MONGO_USER,
            SafeYamlCodec(default_flow_style=False),
            requires_root=True,
            override_strategy=OneFileOverrideStrategy(revision_dir))

        self.is_query_router = False
        self.is_cluster_member = False
        self.status = MongoDBAppStatus()

    def install_if_needed(self, packages):
        """Prepare the guest machine with a MongoDB installation."""
        LOG.info(_("Preparing Guest as MongoDB."))
        if not system.PACKAGER.pkg_is_installed(packages):
            LOG.debug("Installing packages: %s." % str(packages))
            system.PACKAGER.pkg_install(packages, {}, system.TIME_OUT)
        LOG.info(_("Finished installing MongoDB server."))

    def _get_service_candidates(self):
        if self.is_query_router:
            return system.MONGOS_SERVICE_CANDIDATES
        return system.MONGOD_SERVICE_CANDIDATES

    def stop_db(self, update_db=False, do_not_start_on_reboot=False):
        self.status.stop_db_service(
            self._get_service_candidates(), self.state_change_wait_time,
            disable_on_boot=do_not_start_on_reboot, update_db=update_db)

    def restart(self):
        self.status.restart_db_service(
            self._get_service_candidates(), self.state_change_wait_time)

    def start_db(self, update_db=False):
        self.status.start_db_service(
            self._get_service_candidates(), self.state_change_wait_time,
            enable_on_boot=True, update_db=update_db)

    def update_overrides(self, context, overrides, remove=False):
        if overrides:
            self.configuration_manager.apply_user_override(overrides)

    def remove_overrides(self):
        self.configuration_manager.remove_user_override()

    def start_db_with_conf_changes(self, config_contents):
        LOG.info(_('Starting MongoDB with configuration changes.'))
        if self.status.is_running:
            format = 'Cannot start_db_with_conf_changes because status is %s.'
            LOG.debug(format, self.status)
            raise RuntimeError(format % self.status)
        LOG.info(_("Initiating config."))
        self.configuration_manager.save_configuration(config_contents)
        # The configuration template has to be updated with
        # guestagent-controlled settings.
        self.apply_initial_guestagent_configuration(
            None, mount_point=system.MONGODB_MOUNT_POINT)
        self.start_db(True)

    def apply_initial_guestagent_configuration(
            self, cluster_config, mount_point=None):
        LOG.debug("Applying initial configuration.")

        # Mongodb init scripts assume the PID-file path is writable by the
        # database service.
        # See: https://jira.mongodb.org/browse/SERVER-20075
        self._initialize_writable_run_dir()

        self.configuration_manager.apply_system_override(
            {'processManagement.fork': False,
             'processManagement.pidFilePath': system.MONGO_PID_FILE,
             'systemLog.destination': 'file',
             'systemLog.path': system.MONGO_LOG_FILE,
             'systemLog.logAppend': True
             })

        if mount_point:
            self.configuration_manager.apply_system_override(
                {'storage.dbPath': mount_point})

        if cluster_config is not None:
            self._configure_as_cluster_instance(cluster_config)
        else:
            self._configure_network(MONGODB_PORT)

    def _initialize_writable_run_dir(self):
        """Create a writable directory for Mongodb's runtime data
        (e.g. PID-file).
        """
        mongodb_run_dir = os.path.dirname(system.MONGO_PID_FILE)
        LOG.debug("Initializing a runtime directory: %s" % mongodb_run_dir)
        operating_system.create_directory(
            mongodb_run_dir, user=system.MONGO_USER, group=system.MONGO_USER,
            force=True, as_root=True)

    def _configure_as_cluster_instance(self, cluster_config):
        """Configure this guest as a cluster instance and return its
        new status.
        """
        if cluster_config['instance_type'] == "query_router":
            self._configure_as_query_router()
        elif cluster_config["instance_type"] == "config_server":
            self._configure_as_config_server()
        elif cluster_config["instance_type"] == "member":
            self._configure_as_cluster_member(
                cluster_config['replica_set_name'])
        else:
            LOG.error(_("Bad cluster configuration; instance type "
                        "given as %s.") % cluster_config['instance_type'])
            return ds_instance.ServiceStatuses.FAILED

        if 'key' in cluster_config:
            self._configure_cluster_security(cluster_config['key'])

    def _configure_as_query_router(self):
        LOG.info(_("Configuring instance as a cluster query router."))
        self.is_query_router = True

        # FIXME(pmalik): We should really have a separate configuration
        # template for the 'mongos' process.
        # Remove all storage configurations from the template.
        # They apply only to 'mongod' processes.
        # Already applied overrides will be integrated into the base file and
        # their current groups removed.
        config = guestagent_utils.expand_dict(
            self.configuration_manager.parse_configuration())
        if 'storage' in config:
            LOG.debug("Removing 'storage' directives from the configuration "
                      "template.")
            del config['storage']
            self.configuration_manager.save_configuration(
                guestagent_utils.flatten_dict(config))

        # Apply 'mongos' configuration.
        self._configure_network(MONGODB_PORT)
        self.configuration_manager.apply_system_override(
            {'sharding.configDB': ''}, CNF_CLUSTER)

    def _configure_as_config_server(self):
        LOG.info(_("Configuring instance as a cluster config server."))
        self._configure_network(CONFIGSVR_PORT)
        self.configuration_manager.apply_system_override(
            {'sharding.clusterRole': 'configsvr'}, CNF_CLUSTER)

    def _configure_as_cluster_member(self, replica_set_name):
        LOG.info(_("Configuring instance as a cluster member."))
        self.is_cluster_member = True
        self._configure_network(MONGODB_PORT)
        # we don't want these thinking they are in a replica set yet
        # as that would prevent us from creating the admin user,
        # so start mongo before updating the config.
        # mongo will be started by the cluster taskmanager
        self.start_db()
        self.configuration_manager.apply_system_override(
            {'replication.replSetName': replica_set_name}, CNF_CLUSTER)

    def _configure_cluster_security(self, key_value):
        """Force cluster key-file-based authentication.

        This will enabled RBAC.
        """
        # Store the cluster member authentication key.
        self.store_key(key_value)

        self.configuration_manager.apply_system_override(
            {'security.clusterAuthMode': 'keyFile',
             'security.keyFile': self.get_key_file()}, CNF_CLUSTER)

    def _configure_network(self, port=None):
        """Make the service accessible at a given (or default if not) port.
        """
        instance_ip = netutils.get_my_ipv4()
        bind_interfaces_string = ','.join([instance_ip, '127.0.0.1'])
        options = {'net.bindIp': bind_interfaces_string}
        if port is not None:
            guestagent_utils.update_dict({'net.port': port}, options)

        self.configuration_manager.apply_system_override(options)
        self.status.set_host(instance_ip, port=port)

    def clear_storage(self):
        mount_point = "/var/lib/mongodb/*"
        LOG.debug("Clearing storage at %s." % mount_point)
        try:
            operating_system.remove(mount_point, force=True, as_root=True)
        except exception.ProcessExecutionError:
            LOG.exception(_("Error clearing storage."))

    def _has_config_db(self):
        value_string = self.configuration_manager.get_value(
            'sharding', {}).get('configDB')

        return value_string is not None

    # FIXME(pmalik): This method should really be called 'set_config_servers'.
    # The current name suggests it adds more config servers, but it
    # rather replaces the existing ones.
    def add_config_servers(self, config_server_hosts):
        """Set config servers on a query router (mongos) instance.
        """
        config_servers_string = ','.join(['%s:%s' % (host, CONFIGSVR_PORT)
                                          for host in config_server_hosts])
        LOG.info(_("Setting config servers: %s") % config_servers_string)
        self.configuration_manager.apply_system_override(
            {'sharding.configDB': config_servers_string}, CNF_CLUSTER)
        self.start_db(True)

    def add_shard(self, replica_set_name, replica_set_member):
        """
        This method is used by query router (mongos) instances.
        """
        url = "%(rs)s/%(host)s:%(port)s"\
              % {'rs': replica_set_name,
                 'host': replica_set_member,
                 'port': MONGODB_PORT}
        MongoDBAdmin().add_shard(url)

    def add_members(self, members):
        """
        This method is used by a replica-set member instance.
        """
        def check_initiate_status():
            """
            This method is used to verify replica-set status.
            """
            status = MongoDBAdmin().get_repl_status()

            if((status["ok"] == 1) and
               (status["members"][0]["stateStr"] == "PRIMARY") and
               (status["myState"] == 1)):
                return True
            else:
                return False

        def check_rs_status():
            """
            This method is used to verify replica-set status.
            """
            status = MongoDBAdmin().get_repl_status()
            primary_count = 0

            if status["ok"] != 1:
                return False
            if len(status["members"]) != (len(members) + 1):
                return False
            for rs_member in status["members"]:
                if rs_member["state"] not in [1, 2, 7]:
                    return False
                if rs_member["health"] != 1:
                    return False
                if rs_member["state"] == 1:
                    primary_count += 1

            return primary_count == 1

        MongoDBAdmin().rs_initiate()
        # TODO(ramashri) see if hardcoded values can be removed
        utils.poll_until(check_initiate_status, sleep_time=30, time_out=100)

        # add replica-set members
        MongoDBAdmin().rs_add_members(members)
        # TODO(ramashri) see if hardcoded values can be removed
        utils.poll_until(check_rs_status, sleep_time=10, time_out=100)

    def _set_localhost_auth_bypass(self, enabled):
        """When active, the localhost exception allows connections from the
        localhost interface to create the first user on the admin database.
        The exception applies only when there are no users created in the
        MongoDB instance.
        """
        self.configuration_manager.apply_system_override(
            {'setParameter': {'enableLocalhostAuthBypass': enabled}})

    def list_all_dbs(self):
        return MongoDBAdmin().list_database_names()

    def db_data_size(self, db_name):
        schema = models.MongoDBSchema(db_name)
        return MongoDBAdmin().db_stats(schema.serialize())['dataSize']

    def admin_cmd_auth_params(self):
        return MongoDBAdmin().cmd_admin_auth_params

    def get_key_file(self):
        return system.MONGO_KEY_FILE

    def get_key(self):
        return operating_system.read_file(
            system.MONGO_KEY_FILE, as_root=True).rstrip()

    def store_key(self, key):
        """Store the cluster key."""
        LOG.debug('Storing key for MongoDB cluster.')
        operating_system.write_file(system.MONGO_KEY_FILE, key, as_root=True)
        operating_system.chmod(system.MONGO_KEY_FILE,
                               operating_system.FileMode.SET_USR_RO,
                               as_root=True)
        operating_system.chown(system.MONGO_KEY_FILE,
                               system.MONGO_USER, system.MONGO_USER,
                               as_root=True)

    def store_admin_password(self, password):
        LOG.debug('Storing admin password.')
        creds = MongoDBCredentials(username=system.MONGO_ADMIN_NAME,
                                   password=password)
        creds.write(system.MONGO_ADMIN_CREDS_FILE)
        return creds

    def create_admin_user(self, password):
        """Create the admin user while the localhost exception is active."""
        LOG.debug('Creating the admin user.')
        creds = self.store_admin_password(password)
        user = models.MongoDBUser(name='admin.%s' % creds.username,
                                  password=creds.password)
        user.roles = system.MONGO_ADMIN_ROLES
        # the driver engine is already cached, but we need to change it it
        with MongoDBClient(None, host='localhost',
                           port=MONGODB_PORT) as client:
            MongoDBAdmin().create_validated_user(user, client=client)
        # now revert to the normal engine
        self.status.set_host(host=netutils.get_my_ipv4(),
                             port=MONGODB_PORT)
        LOG.debug('Created admin user.')

    def secure(self):
        """Create the Trove admin user.

        The service should not be running at this point.
        This will enable role-based access control (RBAC) by default.
        """
        if self.status.is_running:
            raise RuntimeError(_("Cannot secure the instance. "
                                 "The service is still running."))

        try:
            self.configuration_manager.apply_system_override(
                {'security.authorization': 'enabled'})
            self._set_localhost_auth_bypass(True)
            self.start_db(update_db=False)
            password = utils.generate_random_password()
            self.create_admin_user(password)
            LOG.debug("MongoDB secure complete.")
        finally:
            self._set_localhost_auth_bypass(False)
            self.stop_db()

    def get_configuration_property(self, name, default=None):
        """Return the value of a MongoDB configuration property.
        """
        return self.configuration_manager.get_value(name, default)

    def prep_primary(self):
        # Prepare the primary member of a replica set.
        password = utils.generate_random_password()
        self.create_admin_user(password)
        self.restart()

    @property
    def replica_set_name(self):
        return MongoDBAdmin().get_repl_status()['set']

    @property
    def admin_password(self):
        creds = MongoDBCredentials()
        creds.read(system.MONGO_ADMIN_CREDS_FILE)
        return creds.password

    def is_shard_active(self, replica_set_name):
        shards = MongoDBAdmin().list_active_shards()
        if replica_set_name in [shard['_id'] for shard in shards]:
            LOG.debug('Replica set %s is active.' % replica_set_name)
            return True
        else:
            LOG.debug('Replica set %s is not active.' % replica_set_name)
            return False
Exemple #20
0
class TiDbApp(object):
    """Prepares DBaaS on a Guest container."""
    def __init__(self):
        self.state_change_wait_time = CONF.state_change_wait_time

        revision_dir = guestagent_utils.build_file_path(
            os.path.dirname(CONFIG_FILE),
            ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR)
        self.configuration_manager = ConfigurationManager(
            CONFIG_FILE,
            system.MONGO_USER,
            system.MONGO_USER,
            SafeYamlCodec(default_flow_style=False),
            requires_root=True,
            override_strategy=OneFileOverrideStrategy(revision_dir))

        self.is_query_router = False
        self.is_cluster_member = False
        self.status = TiDbAppStatus()

    def install_if_needed(self, packages):
        """Prepare the guest machine with a TiDb installation."""
        LOG.info("Preparing Guest as TiDb.")
        if not system.PACKAGER.pkg_is_installed(packages):
            LOG.debug("Installing packages: %s.", str(packages))
            system.PACKAGER.pkg_install(packages, {}, system.TIME_OUT)
        LOG.info("Finished installing TiDb server.")

    def stop_db(self, update_db=False, do_not_start_on_reboot=False):
        self.status.stop_db_service(self._get_service_candidates(),
                                    self.state_change_wait_time,
                                    disable_on_boot=do_not_start_on_reboot,
                                    update_db=update_db)

    def restart(self):
        self.status.restart_db_service(self._get_service_candidates(),
                                       self.state_change_wait_time)

    def start_db(self, update_db=False):
        self.status.start_db_service(self._get_service_candidates(),
                                     self.state_change_wait_time,
                                     enable_on_boot=True,
                                     update_db=update_db)

    def start_db_with_conf_changes(self, config_contents):
        LOG.info('Starting TiDb with configuration changes.')
        if self.status.is_running:
            format = 'Cannot start_db_with_conf_changes because status is %s.'
            LOG.debug(format, self.status)
            raise RuntimeError(format % self.status)
        LOG.info("Initiating config.")
        self.configuration_manager.save_configuration(config_contents)
        # The configuration template has to be updated with
        # guestagent-controlled settings.
        self.apply_initial_guestagent_configuration(
            None, mount_point=system.MONGODB_MOUNT_POINT)
        self.start_db(True)

    def apply_initial_guestagent_configuration(self,
                                               cluster_config,
                                               mount_point=None):
        LOG.debug("Applying initial configuration.")

        # TiDb init scripts assume the PID-file path is writable by the
        # database service.
        self._initialize_writable_run_dir()

        self.configuration_manager.apply_system_override({
            'processManagement.fork':
            False,
            'systemLog.destination':
            'file',
            'systemLog.logAppend':
            True
        })

        if mount_point:
            self.configuration_manager.apply_system_override(
                {'storage.dbPath': mount_point})

        if cluster_config is not None:
            self._configure_as_cluster_instance(cluster_config)
        else:
            self._configure_network(TIDB_PORT)

    def _configure_as_cluster_instance(self, cluster_config):
        """Configure this guest as a cluster instance and return its
        new status.
        """
        if cluster_config['instance_type'] == "tidb_server":
            self._configure_as_tidb_server()
        elif cluster_config["instance_type"] == "pd_server":
            self._configure_as_pd_server()
        elif cluster_config["instance_type"] == "tikv":
            self._configure_as_tikv_server(cluster_config['replica_set_name'])
        else:
            LOG.error(
                "Bad cluster configuration; instance type "
                "given as %s.", cluster_config['instance_type'])
            return ds_instance.ServiceStatuses.FAILED

    def _configure_as_tidb_server(self):
        LOG.info("Configuring instance as a cluster query router.")
        self.is_query_router = True

        # FIXME(pmalik): We should really have a separate configuration
        # template for the 'mongos' process.
        # Remove all storage configurations from the template.
        # They apply only to 'mongod' processes.
        # Already applied overrides will be integrated into the base file and
        # their current groups removed.
        config = guestagent_utils.expand_dict(
            self.configuration_manager.parse_configuration())
        if 'storage' in config:
            LOG.debug("Removing 'storage' directives from the configuration "
                      "template.")
            del config['storage']
            self.configuration_manager.save_configuration(
                guestagent_utils.flatten_dict(config))

        # Apply 'mongos' configuration.
        self._configure_network(MONGODB_PORT)
        self.configuration_manager.apply_system_override(
            {'sharding.configDB': ''}, CNF_CLUSTER)

    def _configure_as_pd_server(self):
        LOG.info("Configuring instance as a cluster config server.")
        self._configure_network(CONFIGSVR_PORT)
        self.configuration_manager.apply_system_override(
            {'sharding.clusterRole': 'configsvr'}, CNF_CLUSTER)

    def _configure_as_tikv_server(self, replica_set_name):
        LOG.info("Configuring instance as a cluster member.")
        self.is_cluster_member = True
        self._configure_network(MONGODB_PORT)
        # we don't want these thinking they are in a replica set yet
        # as that would prevent us from creating the admin user,
        # so start mongo before updating the config.
        # mongo will be started by the cluster taskmanager
        self.start_db()
        self.configuration_manager.apply_system_override(
            {'replication.replSetName': replica_set_name}, CNF_CLUSTER)

    def _configure_network(self, port=None):
        """Make the service accessible at a given (or default if not) port.
        """
        instance_ip = netutils.get_my_ipv4()
        bind_interfaces_string = ','.join([instance_ip, '127.0.0.1'])
        options = {'net.bindIp': bind_interfaces_string}
        if port is not None:
            guestagent_utils.update_dict({'net.port': port}, options)

        self.configuration_manager.apply_system_override(options)
        self.status.set_host(instance_ip, port=port)