Beispiel #1
0
    def test_import_override_strategy(self):
        base_config_contents = {'Section_1': {'name': 'pi',
                                              'is_number': 'True',
                                              'value': '3.1415'}
                                }

        config_overrides_v1 = {'Section_1': {'name': 'sqrt(2)',
                                             'value': '1.4142'}
                               }

        config_overrides_v2 = {'Section_1': {'is_number': 'False'}}

        config_overrides_seq = [config_overrides_v1, config_overrides_v2]
        expected_contents_seq = [base_config_contents, base_config_contents,
                                 base_config_contents]

        codec = IniCodec()
        current_user = getpass.getuser()
        revision_dir = self._create_temp_dir()

        with tempfile.NamedTemporaryFile() as base_config:

            # Write initial config contents.
            operating_system.write_file(
                base_config.name, base_config_contents, codec)

            strategy = ImportOverrideStrategy(revision_dir, 'ext')
            strategy.configure(
                base_config.name, current_user, current_user, codec, False)

            self._assert_import_override_strategy(
                strategy, config_overrides_seq, expected_contents_seq)
Beispiel #2
0
    def _test_import_override_strategy(self, system_overrides, user_overrides,
                                       test_multi_rev):
        base_config_contents = {
            'Section_1': {
                'name': 'pi',
                'is_number': 'True',
                'value': '3.1415'
            }
        }

        codec = IniCodec()
        current_user = getpass.getuser()
        revision_dir = self._create_temp_dir()

        with tempfile.NamedTemporaryFile() as base_config:

            # Write initial config contents.
            operating_system.write_file(base_config.name, base_config_contents,
                                        codec)

            strategy = ImportOverrideStrategy(revision_dir, 'ext')
            strategy.configure(base_config.name, current_user, current_user,
                               codec, False)

            self._assert_import_override_strategy(strategy, system_overrides,
                                                  user_overrides,
                                                  test_multi_rev)
Beispiel #3
0
    def _test_import_override_strategy(self, system_overrides, user_overrides, test_multi_rev):
        base_config_contents = {"Section_1": {"name": "pi", "is_number": "True", "value": "3.1415"}}

        codec = IniCodec()
        current_user = getpass.getuser()
        revision_dir = self._create_temp_dir()

        with tempfile.NamedTemporaryFile() as base_config:

            # Write initial config contents.
            operating_system.write_file(base_config.name, base_config_contents, codec)

            strategy = ImportOverrideStrategy(revision_dir, "ext")
            strategy.configure(base_config.name, current_user, current_user, codec, False)

            self._assert_import_override_strategy(strategy, system_overrides, user_overrides, test_multi_rev)
Beispiel #4
0
    def __init__(self, status):
        self.state_change_wait_time = CONF.state_change_wait_time
        self.status = status
        revision_dir = \
            guestagent_utils.build_file_path(
                os.path.join(MOUNT_POINT,
                             os.path.dirname(system.VERTICA_ADMIN)),
                ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR)

        if not operating_system.exists(FAKE_CFG):
            operating_system.write_file(FAKE_CFG, '', as_root=True)
            operating_system.chown(FAKE_CFG,
                                   system.VERTICA_ADMIN,
                                   system.VERTICA_ADMIN_GRP,
                                   as_root=True)
            operating_system.chmod(FAKE_CFG,
                                   FileMode.ADD_GRP_RX_OTH_RX(),
                                   as_root=True)
        self.configuration_manager = \
            ConfigurationManager(FAKE_CFG, system.VERTICA_ADMIN,
                                 system.VERTICA_ADMIN_GRP,
                                 PropertiesCodec(delimiter='='),
                                 requires_root=True,
                                 override_strategy=ImportOverrideStrategy(
                                     revision_dir, "cnf"))
Beispiel #5
0
    def init_config(self):
        if not operating_system.exists(MOUNT_POINT, True):
            operating_system.create_directory(MOUNT_POINT,
                                              system.DB2_INSTANCE_OWNER,
                                              system.DB2_INSTANCE_OWNER,
                                              as_root=True)
        """
        The database manager configuration file - db2systm is stored  under the
        /home/db2inst1/sqllib directory. To update the configuration
        parameters, DB2 recommends using the command - UPDATE DBM CONFIGURATION
        commands instead of directly updating the config file.

        The existing PropertiesCodec implementation has been reused to handle
        text-file operations. Configuration overrides are implemented using
        the ImportOverrideStrategy of the guestagent configuration manager.
        """
        LOG.debug("Initialize DB2 configuration")
        revision_dir = (
            guestagent_utils.build_file_path(
                os.path.join(MOUNT_POINT,
                             os.path.dirname(system.DB2_INSTANCE_OWNER)),
                ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR)
        )
        if not operating_system.exists(FAKE_CFG):
            operating_system.write_file(FAKE_CFG, '', as_root=True)
            operating_system.chown(FAKE_CFG, system.DB2_INSTANCE_OWNER,
                                   system.DB2_INSTANCE_OWNER, as_root=True)
        self.configuration_manager = (
            ConfigurationManager(FAKE_CFG, system.DB2_INSTANCE_OWNER,
                                 system.DB2_INSTANCE_OWNER,
                                 PropertiesCodec(delimiter='='),
                                 requires_root=True,
                                 override_strategy=ImportOverrideStrategy(
                                     revision_dir, "cnf"))
        )
        '''
        Below we are getting the database manager default configuration and
        saving it to the DB2_DEFAULT_CFG file. This is done to help with
        correctly resetting the configurations to the original values when
        user wants to detach a user-defined configuration group from an
        instance. DB2 provides a command to reset the database manager
        configuration parameters (RESET DBM CONFIGURATION) but this command
        resets all the configuration parameters to the system defaults. When
        we build a DB2 guest image there are certain configurations
        parameters like SVCENAME which we set so that the instance can start
        correctly. Hence resetting this value to the system default will
        render the instance in an unstable state. Instead, the recommended
        way for resetting a subset of configuration parameters is to save
        the output of GET DBM CONFIGURATION of the original configuration
        and then call UPDATE DBM CONFIGURATION to reset the value.
          http://www.ibm.com/support/knowledgecenter/SSEPGG_10.5.0/
        com.ibm.db2.luw.admin.cmd.doc/doc/r0001970.html
        '''
        if not operating_system.exists(DB2_DEFAULT_CFG):
            run_command(system.GET_DBM_CONFIGURATION % {
                "dbm_config": DB2_DEFAULT_CFG})
        self.process_default_dbm_config()
Beispiel #6
0
    def configuration_manager(self):
        if self._configuration_manager:
            return self._configuration_manager

        self._configuration_manager = ConfigurationManager(
            MYSQL_CONFIG,
            CONF.database_service_uid,
            CONF.database_service_uid,
            service.BaseDbApp.CFG_CODEC,
            requires_root=True,
            override_strategy=ImportOverrideStrategy(CNF_INCLUDE_DIR, CNF_EXT))
        return self._configuration_manager
Beispiel #7
0
    def test_import_override_strategy(self):
        base_config_contents = {
            'Section_1': {
                'name': 'pi',
                'is_number': 'True',
                'value': '3.1415'
            }
        }

        config_overrides_v1 = {
            'Section_1': {
                'name': 'sqrt(2)',
                'value': '1.4142'
            }
        }

        config_overrides_v2 = {'Section_1': {'is_number': 'False'}}

        config_overrides_seq = [config_overrides_v1, config_overrides_v2]
        expected_contents_seq = [
            base_config_contents, base_config_contents, base_config_contents
        ]

        codec = IniCodec()
        current_user = getpass.getuser()
        revision_dir = self._create_temp_dir()

        with tempfile.NamedTemporaryFile() as base_config:

            # Write initial config contents.
            operating_system.write_file(base_config.name, base_config_contents,
                                        codec)

            strategy = ImportOverrideStrategy(revision_dir, 'ext')
            strategy.configure(base_config.name, current_user, current_user,
                               codec, False)

            self._assert_import_override_strategy(strategy,
                                                  config_overrides_seq,
                                                  expected_contents_seq)
    def _test_import_override_strategy(
            self, system_overrides, user_overrides, test_multi_rev):
        base_config_contents = {'Section_1': {'name': 'pi',
                                              'is_number': 'True',
                                              'value': '3.1415'}
                                }

        codec = IniCodec()
        current_user = getpass.getuser()
        revision_dir = self._create_temp_dir()

        with tempfile.NamedTemporaryFile() as base_config:

            # Write initial config contents.
            operating_system.write_file(
                base_config.name, base_config_contents, codec)

            strategy = ImportOverrideStrategy(revision_dir, 'ext')
            strategy.configure(
                base_config.name, current_user, current_user, codec, False)

            self._assert_import_override_strategy(
                strategy, system_overrides, user_overrides, test_multi_rev)
Beispiel #9
0
class BaseMySqlApp(object):
    """Prepares DBaaS on a Guest container."""

    TIME_OUT = 1000
    CFG_CODEC = IniCodec()

    @property
    def local_sql_client(self):
        return self._local_sql_client

    @property
    def keep_alive_connection_cls(self):
        return self._keep_alive_connection_cls

    @property
    def service_candidates(self):
        return ["mysql", "mysqld", "mysql-server"]

    @property
    def mysql_service(self):
        service_candidates = self.service_candidates
        return operating_system.service_discovery(service_candidates)

    configuration_manager = ConfigurationManager(
        MYSQL_CONFIG,
        MYSQL_OWNER,
        MYSQL_OWNER,
        CFG_CODEC,
        requires_root=True,
        override_strategy=ImportOverrideStrategy(CNF_INCLUDE_DIR, CNF_EXT))

    def get_engine(self):
        """Create the default engine with the updated admin user."""
        # TODO(rnirmal):Based on permission issues being resolved we may revert
        # url = URL(drivername='mysql', host='localhost',
        #          query={'read_default_file': '/etc/mysql/my.cnf'})
        global ENGINE
        if ENGINE:
            return ENGINE

        pwd = self.get_auth_password()
        ENGINE = sqlalchemy.create_engine(
            CONNECTION_STR_FORMAT %
            (ADMIN_USER_NAME, urllib.parse.quote(pwd.strip())),
            pool_recycle=120,
            echo=CONF.sql_query_logging,
            listeners=[self.keep_alive_connection_cls()])
        return ENGINE

    @classmethod
    def get_auth_password(cls):
        auth_config = operating_system.read_file(cls.get_client_auth_file(),
                                                 codec=cls.CFG_CODEC)
        return auth_config['client']['password']

    @classmethod
    def get_data_dir(cls):
        return cls.configuration_manager.get_value(
            MySQLConfParser.SERVER_CONF_SECTION).get('datadir')

    @classmethod
    def set_data_dir(cls, value):
        cls.configuration_manager.apply_system_override(
            {MySQLConfParser.SERVER_CONF_SECTION: {
                'datadir': value
            }})

    @classmethod
    def get_client_auth_file(self):
        return guestagent_utils.build_file_path("~", ".my.cnf")

    def __init__(self, status, local_sql_client, keep_alive_connection_cls):
        """By default login with root no password for initial setup."""
        self.state_change_wait_time = CONF.state_change_wait_time
        self.status = status
        self._local_sql_client = local_sql_client
        self._keep_alive_connection_cls = keep_alive_connection_cls

    def _create_admin_user(self, client, password):
        """
        Create a os_admin user with a random password
        with all privileges similar to the root user.
        """
        LOG.debug("Creating Trove admin user '%s'.", ADMIN_USER_NAME)
        host = "127.0.0.1"
        g = sql_query.Grant(permissions='ALL',
                            user=ADMIN_USER_NAME,
                            host=host,
                            grant_option=True,
                            clear=password)
        t = text(str(g))
        client.execute(t)
        LOG.debug("Trove admin user '%s' created.", ADMIN_USER_NAME)

    @staticmethod
    def _generate_root_password(client):
        """Generate and set a random root password and forget about it."""
        localhost = "localhost"
        uu = sql_query.SetPassword(
            models.MySQLUser.root_username,
            host=localhost,
            new_password=utils.generate_random_password())
        t = text(str(uu))
        client.execute(t)

    def install_if_needed(self, packages):
        """Prepare the guest machine with a secure
           mysql server installation.
        """
        LOG.info(_("Preparing Guest as MySQL Server."))
        if not packager.pkg_is_installed(packages):
            LOG.debug("Installing MySQL server.")
            self._clear_mysql_config()
            # set blank password on pkg configuration stage
            pkg_opts = {'root_password': '', 'root_password_again': ''}
            packager.pkg_install(packages, pkg_opts, self.TIME_OUT)
            self._create_mysql_confd_dir()
            LOG.info(_("Finished installing MySQL server."))
        self.start_mysql()

    def secure(self, config_contents):
        LOG.debug("Securing MySQL now.")
        clear_expired_password()
        LOG.debug("Generating admin password.")
        admin_password = utils.generate_random_password()
        engine = sqlalchemy.create_engine(CONNECTION_STR_FORMAT % ('root', ''),
                                          echo=True)
        with self.local_sql_client(engine, use_flush=False) as client:
            self._create_admin_user(client, admin_password)

        LOG.debug("Switching to the '%s' user now.", ADMIN_USER_NAME)
        engine = sqlalchemy.create_engine(
            CONNECTION_STR_FORMAT %
            (ADMIN_USER_NAME, urllib.parse.quote(admin_password)),
            echo=True)
        with self.local_sql_client(engine) as client:
            self._remove_anonymous_user(client)

        self.stop_db()
        self._reset_configuration(config_contents, admin_password)
        self.start_mysql()
        LOG.debug("MySQL secure complete.")

    def _reset_configuration(self, configuration, admin_password=None):
        if not admin_password:
            # Take the current admin password from the base configuration file
            # if not given.
            admin_password = self.get_auth_password()

        self.configuration_manager.save_configuration(configuration)
        self._save_authentication_properties(admin_password)
        self.wipe_ib_logfiles()

    def _save_authentication_properties(self, admin_password):
        client_sect = {
            'client': {
                'user': ADMIN_USER_NAME,
                'password': admin_password,
                'host': '127.0.0.1'
            }
        }
        operating_system.write_file(self.get_client_auth_file(),
                                    client_sect,
                                    codec=self.CFG_CODEC)

    def secure_root(self, secure_remote_root=True):
        with self.local_sql_client(self.get_engine()) as client:
            LOG.info(_("Preserving root access from restore."))
            self._generate_root_password(client)
            if secure_remote_root:
                self._remove_remote_root_access(client)

    def _clear_mysql_config(self):
        """Clear old configs, which can be incompatible with new version."""
        LOG.debug("Clearing old MySQL config.")
        random_uuid = str(uuid.uuid4())
        configs = ["/etc/my.cnf", "/etc/mysql/conf.d", "/etc/mysql/my.cnf"]
        for config in configs:
            try:
                old_conf_backup = "%s_%s" % (config, random_uuid)
                operating_system.move(config, old_conf_backup, as_root=True)
                LOG.debug("%(cfg)s saved to %(saved_cfg)s_%(uuid)s.", {
                    'cfg': config,
                    'saved_cfg': config,
                    'uuid': random_uuid
                })
            except exception.ProcessExecutionError:
                pass

    def _create_mysql_confd_dir(self):
        conf_dir = "/etc/mysql/conf.d"
        LOG.debug("Creating %s.", conf_dir)
        operating_system.create_directory(conf_dir, as_root=True)

    def _enable_mysql_on_boot(self):
        LOG.debug("Enabling MySQL on boot.")
        try:
            utils.execute_with_timeout(self.mysql_service['cmd_enable'],
                                       shell=True)
        except KeyError:
            LOG.exception(_("Error enabling MySQL start on boot."))
            raise RuntimeError(_("Service is not discovered."))

    def _disable_mysql_on_boot(self):
        try:
            utils.execute_with_timeout(self.mysql_service['cmd_disable'],
                                       shell=True)
        except KeyError:
            LOG.exception(_("Error disabling MySQL start on boot."))
            raise RuntimeError(_("Service is not discovered."))

    def stop_db(self, update_db=False, do_not_start_on_reboot=False):
        LOG.info(_("Stopping MySQL."))
        if do_not_start_on_reboot:
            self._disable_mysql_on_boot()
        try:
            utils.execute_with_timeout(self.mysql_service['cmd_stop'],
                                       shell=True)
        except KeyError:
            LOG.exception(_("Error stopping MySQL."))
            raise RuntimeError(_("Service is not discovered."))
        if not self.status.wait_for_real_status_to_change_to(
                rd_instance.ServiceStatuses.SHUTDOWN,
                self.state_change_wait_time, update_db):
            LOG.error(_("Could not stop MySQL."))
            self.status.end_restart()
            raise RuntimeError(_("Could not stop MySQL!"))

    def _remove_anonymous_user(self, client):
        LOG.debug("Removing anonymous user.")
        t = text(sql_query.REMOVE_ANON)
        client.execute(t)
        LOG.debug("Anonymous user removed.")

    def _remove_remote_root_access(self, client):
        LOG.debug("Removing root access.")
        t = text(sql_query.REMOVE_ROOT)
        client.execute(t)
        LOG.debug("Root access removed.")

    def restart(self):
        try:
            self.status.begin_restart()
            self.stop_db()
            self.start_mysql()
        finally:
            self.status.end_restart()

    def update_overrides(self, overrides):
        self._apply_user_overrides(overrides)

    def _apply_user_overrides(self, overrides):
        # All user-defined values go to the server section of the configuration
        # file.
        if overrides:
            self.configuration_manager.apply_user_override(
                {MySQLConfParser.SERVER_CONF_SECTION: overrides})

    def apply_overrides(self, overrides):
        LOG.debug("Applying overrides to MySQL.")
        with self.local_sql_client(self.get_engine()) as client:
            LOG.debug("Updating override values in running MySQL.")
            for k, v in overrides.items():
                byte_value = guestagent_utils.to_bytes(v)
                q = sql_query.SetServerVariable(key=k, value=byte_value)
                t = text(str(q))
                try:
                    client.execute(t)
                except exc.OperationalError:
                    output = {'key': k, 'value': byte_value}
                    LOG.exception(
                        _("Unable to set %(key)s with value "
                          "%(value)s."), output)

    def make_read_only(self, read_only):
        with self.local_sql_client(self.get_engine()) as client:
            q = "set global read_only = %s" % read_only
            client.execute(text(str(q)))

    def wipe_ib_logfiles(self):
        """Destroys the iblogfiles.

        If for some reason the selected log size in the conf changes from the
        current size of the files MySQL will fail to start, so we delete the
        files to be safe.
        """
        LOG.info(_("Wiping ib_logfiles."))
        for index in range(2):
            try:
                # On restarts, sometimes these are wiped. So it can be a race
                # to have MySQL start up before it's restarted and these have
                # to be deleted. That's why its ok if they aren't found and
                # that is why we use the "force" option to "remove".
                operating_system.remove("%s/ib_logfile%d" %
                                        (self.get_data_dir(), index),
                                        force=True,
                                        as_root=True)
            except exception.ProcessExecutionError:
                LOG.exception(_("Could not delete logfile."))
                raise

    def remove_overrides(self):
        self.configuration_manager.remove_user_override()

    def _remove_replication_overrides(self, cnf_file):
        LOG.info(_("Removing replication configuration file."))
        if os.path.exists(cnf_file):
            operating_system.remove(cnf_file, as_root=True)

    def exists_replication_source_overrides(self):
        return self.configuration_manager.has_system_override(CNF_MASTER)

    def write_replication_source_overrides(self, overrideValues):
        self.configuration_manager.apply_system_override(
            overrideValues, CNF_MASTER)

    def write_replication_replica_overrides(self, overrideValues):
        self.configuration_manager.apply_system_override(
            overrideValues, CNF_SLAVE)

    def remove_replication_source_overrides(self):
        self.configuration_manager.remove_system_override(CNF_MASTER)

    def remove_replication_replica_overrides(self):
        self.configuration_manager.remove_system_override(CNF_SLAVE)

    def grant_replication_privilege(self, replication_user):
        LOG.info(_("Granting Replication Slave privilege."))

        LOG.debug("grant_replication_privilege: %s", replication_user)

        with self.local_sql_client(self.get_engine()) as client:
            g = sql_query.Grant(permissions=['REPLICATION SLAVE'],
                                user=replication_user['name'],
                                clear=replication_user['password'])

            t = text(str(g))
            client.execute(t)

    def get_port(self):
        with self.local_sql_client(self.get_engine()) as client:
            result = client.execute('SELECT @@port').first()
            return result[0]

    def get_binlog_position(self):
        with self.local_sql_client(self.get_engine()) as client:
            result = client.execute('SHOW MASTER STATUS').first()
            binlog_position = {
                'log_file': result['File'],
                'position': result['Position']
            }
            return binlog_position

    def execute_on_client(self, sql_statement):
        LOG.debug("Executing SQL: %s", sql_statement)
        with self.local_sql_client(self.get_engine()) as client:
            return client.execute(sql_statement)

    def start_slave(self):
        LOG.info(_("Starting slave replication."))
        with self.local_sql_client(self.get_engine()) as client:
            client.execute('START SLAVE')
            self._wait_for_slave_status("ON", client, 60)

    def stop_slave(self, for_failover):
        replication_user = None
        LOG.info(_("Stopping slave replication."))
        with self.local_sql_client(self.get_engine()) as client:
            result = client.execute('SHOW SLAVE STATUS')
            replication_user = result.first()['Master_User']
            client.execute('STOP SLAVE')
            client.execute('RESET SLAVE ALL')
            self._wait_for_slave_status("OFF", client, 30)
            if not for_failover:
                client.execute('DROP USER ' + replication_user)
        return {'replication_user': replication_user}

    def stop_master(self):
        LOG.info(_("Stopping replication master."))
        with self.local_sql_client(self.get_engine()) as client:
            client.execute('RESET MASTER')

    def _wait_for_slave_status(self, status, client, max_time):
        def verify_slave_status():
            actual_status = client.execute(
                "SHOW GLOBAL STATUS like 'slave_running'").first()[1]
            return actual_status.upper() == status.upper()

        LOG.debug("Waiting for SLAVE_RUNNING to change to %s.", status)
        try:
            utils.poll_until(verify_slave_status,
                             sleep_time=3,
                             time_out=max_time)
            LOG.info(_("Replication is now %s."), status.lower())
        except PollTimeOut:
            raise RuntimeError(
                _("Replication is not %(status)s after %(max)d seconds.") % {
                    'status': status.lower(),
                    'max': max_time
                })

    def start_mysql(self, update_db=False, disable_on_boot=False, timeout=120):
        LOG.info(_("Starting MySQL."))
        # This is the site of all the trouble in the restart tests.
        # Essentially what happens is that mysql start fails, but does not
        # die. It is then impossible to kill the original, so

        if disable_on_boot:
            self._disable_mysql_on_boot()
        else:
            self._enable_mysql_on_boot()

        try:
            utils.execute_with_timeout(self.mysql_service['cmd_start'],
                                       shell=True,
                                       timeout=timeout)
        except KeyError:
            raise RuntimeError(_("Service is not discovered."))
        except exception.ProcessExecutionError:
            # it seems mysql (percona, at least) might come back with [Fail]
            # but actually come up ok. we're looking into the timing issue on
            # parallel, but for now, we'd like to give it one more chance to
            # come up. so regardless of the execute_with_timeout() response,
            # we'll assume mysql comes up and check its status for a while.
            pass
        if not self.status.wait_for_real_status_to_change_to(
                rd_instance.ServiceStatuses.RUNNING,
                self.state_change_wait_time, update_db):
            LOG.error(_("Start up of MySQL failed."))
            # If it won't start, but won't die either, kill it by hand so we
            # don't let a rouge process wander around.
            try:
                utils.execute_with_timeout("sudo", "pkill", "-9", "mysql")
            except exception.ProcessExecutionError:
                LOG.exception(_("Error killing stalled MySQL start command."))
                # There's nothing more we can do...
            self.status.end_restart()
            raise RuntimeError(_("Could not start MySQL!"))

    def start_db_with_conf_changes(self, config_contents):
        LOG.info(_("Starting MySQL with conf changes."))
        LOG.debug("Inside the guest - Status is_running = (%s).",
                  self.status.is_running)
        if self.status.is_running:
            LOG.error(
                _("Cannot execute start_db_with_conf_changes because "
                  "MySQL state == %s."), self.status)
            raise RuntimeError(_("MySQL not stopped."))
        LOG.info(_("Resetting configuration."))
        self._reset_configuration(config_contents)
        self.start_mysql(True)

    def reset_configuration(self, configuration):
        config_contents = configuration['config_contents']
        LOG.info(_("Resetting configuration."))
        self._reset_configuration(config_contents)

    def reset_admin_password(self, admin_password):
        """Replace the password in the my.cnf file."""
        # grant the new  admin password
        with self.local_sql_client(self.get_engine()) as client:
            self._create_admin_user(client, admin_password)
            # reset the ENGINE because the password could have changed
            global ENGINE
            ENGINE = None
        self._save_authentication_properties(admin_password)
Beispiel #10
0
 def test_get_value(self):
     revision_dir = self._create_temp_dir()
     self._assert_get_value(ImportOverrideStrategy(revision_dir, 'ext'))
     self._assert_get_value(OneFileOverrideStrategy(revision_dir))
Beispiel #11
0
class BaseMySqlApp(service.BaseDbApp):
    configuration_manager = ConfigurationManager(
        MYSQL_CONFIG,
        CONF.database_service_uid,
        CONF.database_service_uid,
        service.BaseDbApp.CFG_CODEC,
        requires_root=True,
        override_strategy=ImportOverrideStrategy(CNF_INCLUDE_DIR, CNF_EXT))

    def get_engine(self):
        """Create the default engine with the updated admin user.

        If admin user not created yet, use root instead.
        """
        global ENGINE
        if ENGINE:
            return ENGINE

        user = ADMIN_USER_NAME
        password = ""
        try:
            password = self.get_auth_password()
        except exception.UnprocessableEntity:
            # os_admin user not created yet
            user = '******'

        ENGINE = sqlalchemy.create_engine(
            CONNECTION_STR_FORMAT %
            (user, urllib.parse.quote(password.strip())),
            pool_recycle=120,
            echo=CONF.sql_query_logging,
            listeners=[mysql_util.BaseKeepAliveConnection()])

        return ENGINE

    def execute_sql(self, sql_statement):
        LOG.debug("Executing SQL: %s", sql_statement)
        with mysql_util.SqlClient(self.get_engine()) as client:
            return client.execute(sql_statement)

    @classmethod
    def get_data_dir(cls):
        return cls.configuration_manager.get_value(
            MySQLConfParser.SERVER_CONF_SECTION).get('datadir')

    @classmethod
    def set_data_dir(cls, value):
        cls.configuration_manager.apply_system_override(
            {MySQLConfParser.SERVER_CONF_SECTION: {
                'datadir': value
            }})

    def _create_admin_user(self, client, password):
        """
        Create a os_admin user with a random password
        with all privileges similar to the root user.
        """
        LOG.info("Creating Trove admin user '%s'.", ADMIN_USER_NAME)
        host = "localhost"
        try:
            cu = sql_query.CreateUser(ADMIN_USER_NAME,
                                      host=host,
                                      clear=password)
            t = text(str(cu))
            client.execute(t, **cu.keyArgs)
        except (exc.OperationalError, exc.InternalError) as err:
            # Ignore, user is already created, just reset the password
            # (user will already exist in a restore from backup)
            LOG.debug(err)
            uu = sql_query.SetPassword(ADMIN_USER_NAME,
                                       host=host,
                                       new_password=password,
                                       ds=CONF.datastore_manager,
                                       ds_version=CONF.datastore_version)
            t = text(str(uu))
            client.execute(t)

        g = sql_query.Grant(permissions='ALL',
                            user=ADMIN_USER_NAME,
                            host=host,
                            grant_option=True)
        t = text(str(g))
        client.execute(t)
        LOG.info("Trove admin user '%s' created.", ADMIN_USER_NAME)

    def secure(self):
        LOG.info("Securing MySQL now.")

        root_pass = self.get_auth_password(file="root.cnf")
        admin_password = utils.generate_random_password()

        engine = sqlalchemy.create_engine(CONNECTION_STR_FORMAT %
                                          ('root', root_pass),
                                          echo=True)
        with mysql_util.SqlClient(engine, use_flush=False) as client:
            self._create_admin_user(client, admin_password)

        engine = sqlalchemy.create_engine(
            CONNECTION_STR_FORMAT %
            (ADMIN_USER_NAME, urllib.parse.quote(admin_password)),
            echo=True)
        with mysql_util.SqlClient(engine) as client:
            self._remove_anonymous_user(client)

        self.save_password(ADMIN_USER_NAME, admin_password)
        LOG.info("MySQL secure complete.")

    def secure_root(self):
        with mysql_util.SqlClient(self.get_engine()) as client:
            self._remove_remote_root_access(client)

    def _remove_anonymous_user(self, client):
        LOG.debug("Removing anonymous user.")
        t = text(sql_query.REMOVE_ANON)
        client.execute(t)
        LOG.debug("Anonymous user removed.")

    def _remove_remote_root_access(self, client):
        LOG.debug("Removing remote root access.")
        t = text(sql_query.REMOVE_ROOT)
        client.execute(t)
        LOG.debug("Root remote access removed.")

    def update_overrides(self, overrides):
        if overrides:
            self.configuration_manager.apply_user_override(
                {MySQLConfParser.SERVER_CONF_SECTION: overrides})

    def apply_overrides(self, overrides):
        with mysql_util.SqlClient(self.get_engine()) as client:
            for k, v in overrides.items():
                byte_value = guestagent_utils.to_bytes(v)
                q = sql_query.SetServerVariable(key=k, value=byte_value)
                t = text(str(q))
                try:
                    client.execute(t)
                except exc.OperationalError:
                    output = {'key': k, 'value': byte_value}
                    LOG.error("Unable to set %(key)s with value %(value)s.",
                              output)

    def start_db(self,
                 update_db=False,
                 ds_version=None,
                 command=None,
                 extra_volumes=None):
        """Start and wait for database service."""
        docker_image = CONF.get(CONF.datastore_manager).docker_image
        image = (f'{docker_image}:latest'
                 if not ds_version else f'{docker_image}:{ds_version}')
        command = command if command else ''

        try:
            root_pass = self.get_auth_password(file="root.cnf")
        except exception.UnprocessableEntity:
            root_pass = utils.generate_random_password()

        # Get uid and gid
        user = "******" % (CONF.database_service_uid, CONF.database_service_uid)

        # Create folders for mysql on localhost
        for folder in ['/etc/mysql', '/var/run/mysqld']:
            operating_system.ensure_directory(folder,
                                              user=CONF.database_service_uid,
                                              group=CONF.database_service_uid,
                                              force=True,
                                              as_root=True)

        volumes = {
            "/etc/mysql": {
                "bind": "/etc/mysql",
                "mode": "rw"
            },
            "/var/run/mysqld": {
                "bind": "/var/run/mysqld",
                "mode": "rw"
            },
            "/var/lib/mysql": {
                "bind": "/var/lib/mysql",
                "mode": "rw"
            },
        }
        if extra_volumes:
            volumes.update(extra_volumes)

        try:
            LOG.info("Starting docker container, image: %s", image)
            docker_util.start_container(self.docker_client,
                                        image,
                                        volumes=volumes,
                                        network_mode="host",
                                        user=user,
                                        environment={
                                            "MYSQL_ROOT_PASSWORD": root_pass,
                                            "MYSQL_INITDB_SKIP_TZINFO": 1,
                                        },
                                        command=command)

            # Save root password
            LOG.debug("Saving root credentials to local host.")
            self.save_password('root', root_pass)
        except Exception:
            LOG.exception("Failed to start mysql")
            raise exception.TroveError(_("Failed to start mysql"))

        if not self.status.wait_for_status(
                service_status.ServiceStatuses.HEALTHY,
                CONF.state_change_wait_time, update_db):
            raise exception.TroveError(_("Failed to start mysql"))

    def wipe_ib_logfiles(self):
        """Destroys the iblogfiles.

        If for some reason the selected log size in the conf changes from the
        current size of the files MySQL will fail to start, so we delete the
        files to be safe.
        """
        for index in range(2):
            try:
                # On restarts, sometimes these are wiped. So it can be a race
                # to have MySQL start up before it's restarted and these have
                # to be deleted. That's why its ok if they aren't found and
                # that is why we use the "force" option to "remove".
                operating_system.remove("%s/ib_logfile%d" %
                                        (self.get_data_dir(), index),
                                        force=True,
                                        as_root=True)
            except exception.ProcessExecutionError:
                LOG.exception("Could not delete logfile.")
                raise

    def reset_configuration(self, configuration):
        LOG.info("Resetting configuration.")
        self.configuration_manager.save_configuration(configuration)
        self.wipe_ib_logfiles()

    def restart(self):
        LOG.info("Restarting mysql")

        # Ensure folders permission for database.
        for folder in ['/etc/mysql', '/var/run/mysqld']:
            operating_system.ensure_directory(folder,
                                              user=CONF.database_service_uid,
                                              group=CONF.database_service_uid,
                                              force=True,
                                              as_root=True)

        try:
            docker_util.restart_container(self.docker_client)
        except Exception:
            LOG.exception("Failed to restart mysql")
            raise exception.TroveError("Failed to restart mysql")

        if not self.status.wait_for_status(
                service_status.ServiceStatuses.HEALTHY,
                CONF.state_change_wait_time,
                update_db=True):
            raise exception.TroveError("Failed to start mysql")

        LOG.info("Finished restarting mysql")

    def restore_backup(self, context, backup_info, restore_location):
        backup_id = backup_info['id']
        storage_driver = CONF.storage_strategy
        backup_driver = self.get_backup_strategy()
        user_token = context.auth_token
        auth_url = CONF.service_credentials.auth_url
        user_tenant = context.project_id
        image = self.get_backup_image()
        name = 'db_restore'
        volumes = {'/var/lib/mysql': {'bind': '/var/lib/mysql', 'mode': 'rw'}}

        command = (
            f'/usr/bin/python3 main.py --nobackup '
            f'--storage-driver={storage_driver} --driver={backup_driver} '
            f'--os-token={user_token} --os-auth-url={auth_url} '
            f'--os-tenant-id={user_tenant} '
            f'--restore-from={backup_info["location"]} '
            f'--restore-checksum={backup_info["checksum"]}')
        if CONF.backup_aes_cbc_key:
            command = (f"{command} "
                       f"--backup-encryption-key={CONF.backup_aes_cbc_key}")

        LOG.debug(
            'Stop the database and clean up the data before restore '
            'from %s', backup_id)
        self.stop_db()
        operating_system.chmod(restore_location,
                               operating_system.FileMode.SET_FULL,
                               as_root=True)
        utils.clean_out(restore_location)

        # Start to run restore inside a separate docker container
        LOG.info('Starting to restore backup %s, command: %s', backup_id,
                 command)
        output, ret = docker_util.run_container(self.docker_client,
                                                image,
                                                name,
                                                volumes=volumes,
                                                command=command)
        result = output[-1]
        if not ret:
            msg = f'Failed to run restore container, error: {result}'
            LOG.error(msg)
            raise Exception(msg)

        LOG.debug('Deleting ib_logfile files after restore from backup %s',
                  backup_id)
        operating_system.chown(restore_location,
                               CONF.database_service_uid,
                               CONF.database_service_uid,
                               force=True,
                               as_root=True)
        self.wipe_ib_logfiles()

    def exists_replication_source_overrides(self):
        return self.configuration_manager.has_system_override(CNF_MASTER)

    def write_replication_source_overrides(self, overrideValues):
        self.configuration_manager.apply_system_override(
            overrideValues, CNF_MASTER)

    def write_replication_replica_overrides(self, overrideValues):
        self.configuration_manager.apply_system_override(
            overrideValues, CNF_SLAVE)

    def remove_replication_source_overrides(self):
        self.configuration_manager.remove_system_override(CNF_MASTER)

    def remove_replication_replica_overrides(self):
        self.configuration_manager.remove_system_override(CNF_SLAVE)

    def grant_replication_privilege(self, replication_user):
        LOG.info("Granting replication slave privilege for %s",
                 replication_user['name'])

        with mysql_util.SqlClient(self.get_engine()) as client:
            g = sql_query.Grant(permissions=['REPLICATION SLAVE'],
                                user=replication_user['name'],
                                clear=replication_user['password'])

            t = text(str(g))
            client.execute(t)

    def get_port(self):
        with mysql_util.SqlClient(self.get_engine()) as client:
            result = client.execute('SELECT @@port').first()
            return result[0]

    def wait_for_slave_status(self, status, client, max_time):
        def verify_slave_status():
            ret = client.execute(
                "SELECT SERVICE_STATE FROM "
                "performance_schema.replication_connection_status").first()
            if not ret:
                actual_status = 'OFF'
            else:
                actual_status = ret[0]
            return actual_status.upper() == status.upper()

        LOG.debug("Waiting for slave status %s with timeout %s", status,
                  max_time)
        try:
            utils.poll_until(verify_slave_status,
                             sleep_time=3,
                             time_out=max_time)
            LOG.info("Replication status: %s.", status)
        except exception.PollTimeOut:
            raise RuntimeError(
                _("Replication is not %(status)s after %(max)d seconds.") % {
                    'status': status.lower(),
                    'max': max_time
                })

    def start_slave(self):
        LOG.info("Starting slave replication.")
        with mysql_util.SqlClient(self.get_engine()) as client:
            client.execute('START SLAVE')
            self.wait_for_slave_status("ON", client, 180)

    def stop_slave(self, for_failover):
        LOG.info("Stopping slave replication.")

        replication_user = None
        with mysql_util.SqlClient(self.get_engine()) as client:
            result = client.execute('SHOW SLAVE STATUS')
            replication_user = result.first()['Master_User']
            client.execute('STOP SLAVE')
            client.execute('RESET SLAVE ALL')
            self.wait_for_slave_status('OFF', client, 180)
            if not for_failover:
                client.execute('DROP USER IF EXISTS ' + replication_user)

        return {'replication_user': replication_user}

    def stop_master(self):
        LOG.info("Stopping replication master.")
        with mysql_util.SqlClient(self.get_engine()) as client:
            client.execute('RESET MASTER')

    def make_read_only(self, read_only):
        with mysql_util.SqlClient(self.get_engine()) as client:
            q = "set global read_only = %s" % read_only
            client.execute(text(str(q)))

    def upgrade(self, upgrade_info):
        """Upgrade the database."""
        new_version = upgrade_info.get('datastore_version')
        if new_version == CONF.datastore_version:
            return

        LOG.info('Stopping db container for upgrade')
        self.stop_db()

        LOG.info('Deleting db container for upgrade')
        docker_util.remove_container(self.docker_client)

        LOG.info('Remove unused images before starting new db container')
        docker_util.prune_images(self.docker_client)

        LOG.info('Starting new db container with version %s for upgrade',
                 new_version)
        self.start_db(update_db=True, ds_version=new_version)
Beispiel #12
0
 def test_update_configuration(self):
     revision_dir = self._create_temp_dir()
     self._assert_update_configuration(
         RollingOverrideStrategy(revision_dir))
     self._assert_update_configuration(
         ImportOverrideStrategy(revision_dir, 'ext'))
Beispiel #13
0
class MySqlApp(object):
    """Prepares DBaaS on a Guest container."""

    TIME_OUT = 1000

    configuration_manager = ConfigurationManager(
        MYSQL_CONFIG,
        MYSQL_OWNER,
        MYSQL_OWNER,
        IniCodec(),
        requires_root=True,
        override_strategy=ImportOverrideStrategy(CNF_INCLUDE_DIR, CNF_EXT))

    @classmethod
    def get_auth_password(cls):
        return cls.configuration_manager.get_value('client').get('password')

    @classmethod
    def get_data_dir(cls):
        return cls.configuration_manager.get_value(
            MySQLConfParser.SERVER_CONF_SECTION).get('datadir')

    @classmethod
    def set_data_dir(cls, value):
        cls.configuration_manager.apply_system_override(
            {MySQLConfParser.SERVER_CONF_SECTION: {
                'datadir': value
            }})

    def __init__(self, status):
        """By default login with root no password for initial setup."""
        self.state_change_wait_time = CONF.state_change_wait_time
        self.status = status

    def _create_admin_user(self, client, password):
        """
        Create a os_admin user with a random password
        with all privileges similar to the root user.
        """
        localhost = "localhost"
        g = sql_query.Grant(permissions='ALL',
                            user=ADMIN_USER_NAME,
                            host=localhost,
                            grant_option=True,
                            clear=password)
        t = text(str(g))
        client.execute(t)

    @staticmethod
    def _generate_root_password(client):
        """Generate and set a random root password and forget about it."""
        localhost = "localhost"
        uu = sql_query.UpdateUser("root",
                                  host=localhost,
                                  clear=utils.generate_random_password())
        t = text(str(uu))
        client.execute(t)

    def install_if_needed(self, packages):
        """Prepare the guest machine with a secure
           mysql server installation.
        """
        LOG.info(_("Preparing Guest as MySQL Server."))
        if not packager.pkg_is_installed(packages):
            LOG.debug("Installing MySQL server.")
            self._clear_mysql_config()
            # set blank password on pkg configuration stage
            pkg_opts = {'root_password': '', 'root_password_again': ''}
            packager.pkg_install(packages, pkg_opts, self.TIME_OUT)
            self._create_mysql_confd_dir()
            LOG.info(_("Finished installing MySQL server."))
        self.start_mysql()

    def complete_install_or_restart(self):
        self.status.end_install_or_restart()

    def secure(self, config_contents, overrides):
        LOG.info(_("Generating admin password."))
        admin_password = utils.generate_random_password()
        clear_expired_password()
        engine = sqlalchemy.create_engine("mysql://root:@localhost:3306",
                                          echo=True)
        with LocalSqlClient(engine) as client:
            self._remove_anonymous_user(client)
            self._create_admin_user(client, admin_password)

        self.stop_db()

        self._reset_configuration(config_contents, admin_password)
        self._apply_user_overrides(overrides)
        self.start_mysql()

        LOG.debug("MySQL secure complete.")

    def _reset_configuration(self, configuration, admin_password=None):
        if not admin_password:
            # Take the current admin password from the base configuration file
            # if not given.
            admin_password = MySqlApp.get_auth_password()

        self.configuration_manager.save_configuration(configuration)
        self._save_authentication_properties(admin_password)
        self.wipe_ib_logfiles()

    def _save_authentication_properties(self, admin_password):
        self.configuration_manager.apply_system_override(
            {'client': {
                'user': ADMIN_USER_NAME,
                'password': admin_password
            }})

    def secure_root(self, secure_remote_root=True):
        with LocalSqlClient(get_engine()) as client:
            LOG.info(_("Preserving root access from restore."))
            self._generate_root_password(client)
            if secure_remote_root:
                self._remove_remote_root_access(client)

    def _clear_mysql_config(self):
        """Clear old configs, which can be incompatible with new version."""
        LOG.debug("Clearing old MySQL config.")
        random_uuid = str(uuid.uuid4())
        configs = ["/etc/my.cnf", "/etc/mysql/conf.d", "/etc/mysql/my.cnf"]
        for config in configs:
            try:
                old_conf_backup = "%s_%s" % (config, random_uuid)
                operating_system.move(config, old_conf_backup, as_root=True)
                LOG.debug("%s saved to %s_%s." % (config, config, random_uuid))
            except exception.ProcessExecutionError:
                pass

    def _create_mysql_confd_dir(self):
        conf_dir = "/etc/mysql/conf.d"
        LOG.debug("Creating %s." % conf_dir)
        operating_system.create_directory(conf_dir, as_root=True)

    def _enable_mysql_on_boot(self):
        LOG.debug("Enabling MySQL on boot.")
        try:
            mysql_service = operating_system.service_discovery(
                MYSQL_SERVICE_CANDIDATES)
            utils.execute_with_timeout(mysql_service['cmd_enable'], shell=True)
        except KeyError:
            LOG.exception(_("Error enabling MySQL start on boot."))
            raise RuntimeError("Service is not discovered.")

    def _disable_mysql_on_boot(self):
        try:
            mysql_service = operating_system.service_discovery(
                MYSQL_SERVICE_CANDIDATES)
            utils.execute_with_timeout(mysql_service['cmd_disable'],
                                       shell=True)
        except KeyError:
            LOG.exception(_("Error disabling MySQL start on boot."))
            raise RuntimeError("Service is not discovered.")

    def stop_db(self, update_db=False, do_not_start_on_reboot=False):
        LOG.info(_("Stopping MySQL."))
        if do_not_start_on_reboot:
            self._disable_mysql_on_boot()
        try:
            mysql_service = operating_system.service_discovery(
                MYSQL_SERVICE_CANDIDATES)
            utils.execute_with_timeout(mysql_service['cmd_stop'], shell=True)
        except KeyError:
            LOG.exception(_("Error stopping MySQL."))
            raise RuntimeError("Service is not discovered.")
        if not self.status.wait_for_real_status_to_change_to(
                rd_instance.ServiceStatuses.SHUTDOWN,
                self.state_change_wait_time, update_db):
            LOG.error(_("Could not stop MySQL."))
            self.status.end_install_or_restart()
            raise RuntimeError("Could not stop MySQL!")

    def _remove_anonymous_user(self, client):
        t = text(sql_query.REMOVE_ANON)
        client.execute(t)

    def _remove_remote_root_access(self, client):
        t = text(sql_query.REMOVE_ROOT)
        client.execute(t)

    def restart(self):
        try:
            self.status.begin_restart()
            self.stop_db()
            self.start_mysql()
        finally:
            self.status.end_install_or_restart()

    def update_overrides(self, overrides):
        self._apply_user_overrides(overrides)

    def _apply_user_overrides(self, overrides):
        # All user-defined values go to the server section of the configuration
        # file.
        if overrides:
            self.configuration_manager.apply_user_override(
                {MySQLConfParser.SERVER_CONF_SECTION: overrides})

    def apply_overrides(self, overrides):
        LOG.debug("Applying overrides to MySQL.")
        with LocalSqlClient(get_engine()) as client:
            LOG.debug("Updating override values in running MySQL.")
            for k, v in overrides.iteritems():
                byte_value = guestagent_utils.to_bytes(v)
                q = sql_query.SetServerVariable(key=k, value=byte_value)
                t = text(str(q))
                try:
                    client.execute(t)
                except exc.OperationalError:
                    output = {'key': k, 'value': byte_value}
                    LOG.exception(
                        _("Unable to set %(key)s with value "
                          "%(value)s.") % output)

    def make_read_only(self, read_only):
        with LocalSqlClient(get_engine()) as client:
            q = "set global read_only = %s" % read_only
            client.execute(text(str(q)))

    def wipe_ib_logfiles(self):
        """Destroys the iblogfiles.

        If for some reason the selected log size in the conf changes from the
        current size of the files MySQL will fail to start, so we delete the
        files to be safe.
        """
        LOG.info(_("Wiping ib_logfiles."))
        for index in range(2):
            try:
                # On restarts, sometimes these are wiped. So it can be a race
                # to have MySQL start up before it's restarted and these have
                # to be deleted. That's why its ok if they aren't found and
                # that is why we use the "force" option to "remove".
                operating_system.remove("%s/ib_logfile%d" %
                                        (self.get_data_dir(), index),
                                        force=True,
                                        as_root=True)
            except exception.ProcessExecutionError:
                LOG.exception("Could not delete logfile.")
                raise

    def remove_overrides(self):
        self.configuration_manager.remove_user_override()

    def _remove_replication_overrides(self, cnf_file):
        LOG.info(_("Removing replication configuration file."))
        if os.path.exists(cnf_file):
            operating_system.remove(cnf_file, as_root=True)

    def exists_replication_source_overrides(self):
        return self.configuration_manager.has_system_override(CNF_MASTER)

    def write_replication_source_overrides(self, overrideValues):
        self.configuration_manager.apply_system_override(
            overrideValues, CNF_MASTER)

    def write_replication_replica_overrides(self, overrideValues):
        self.configuration_manager.apply_system_override(
            overrideValues, CNF_SLAVE)

    def remove_replication_source_overrides(self):
        self.configuration_manager.remove_system_override(CNF_MASTER)

    def remove_replication_replica_overrides(self):
        self.configuration_manager.remove_system_override(CNF_SLAVE)

    def grant_replication_privilege(self, replication_user):
        LOG.info(_("Granting Replication Slave privilege."))

        LOG.debug("grant_replication_privilege: %s" % replication_user)

        with LocalSqlClient(get_engine()) as client:
            g = sql_query.Grant(permissions=['REPLICATION SLAVE'],
                                user=replication_user['name'],
                                clear=replication_user['password'])

            t = text(str(g))
            client.execute(t)

    def get_port(self):
        with LocalSqlClient(get_engine()) as client:
            result = client.execute('SELECT @@port').first()
            return result[0]

    def get_binlog_position(self):
        with LocalSqlClient(get_engine()) as client:
            result = client.execute('SHOW MASTER STATUS').first()
            binlog_position = {
                'log_file': result['File'],
                'position': result['Position']
            }
            return binlog_position

    def execute_on_client(self, sql_statement):
        LOG.debug("Executing SQL: %s" % sql_statement)
        with LocalSqlClient(get_engine()) as client:
            return client.execute(sql_statement)

    def start_slave(self):
        LOG.info(_("Starting slave replication."))
        with LocalSqlClient(get_engine()) as client:
            client.execute('START SLAVE')
            self._wait_for_slave_status("ON", client, 60)

    def stop_slave(self, for_failover):
        replication_user = None
        LOG.info(_("Stopping slave replication."))
        with LocalSqlClient(get_engine()) as client:
            result = client.execute('SHOW SLAVE STATUS')
            replication_user = result.first()['Master_User']
            client.execute('STOP SLAVE')
            client.execute('RESET SLAVE ALL')
            self._wait_for_slave_status("OFF", client, 30)
            if not for_failover:
                client.execute('DROP USER ' + replication_user)
        return {'replication_user': replication_user}

    def stop_master(self):
        LOG.info(_("Stopping replication master."))
        with LocalSqlClient(get_engine()) as client:
            client.execute('RESET MASTER')

    def _wait_for_slave_status(self, status, client, max_time):
        def verify_slave_status():
            actual_status = client.execute(
                "SHOW GLOBAL STATUS like 'slave_running'").first()[1]
            return actual_status.upper() == status.upper()

        LOG.debug("Waiting for SLAVE_RUNNING to change to %s.", status)
        try:
            utils.poll_until(verify_slave_status,
                             sleep_time=3,
                             time_out=max_time)
            LOG.info(_("Replication is now %s.") % status.lower())
        except PollTimeOut:
            raise RuntimeError(
                _("Replication is not %(status)s after %(max)d seconds.") % {
                    'status': status.lower(),
                    'max': max_time
                })

    def start_mysql(self, update_db=False):
        LOG.info(_("Starting MySQL."))
        # This is the site of all the trouble in the restart tests.
        # Essentially what happens is that mysql start fails, but does not
        # die. It is then impossible to kill the original, so

        self._enable_mysql_on_boot()

        try:
            mysql_service = operating_system.service_discovery(
                MYSQL_SERVICE_CANDIDATES)
            utils.execute_with_timeout(mysql_service['cmd_start'], shell=True)
        except KeyError:
            raise RuntimeError("Service is not discovered.")
        except exception.ProcessExecutionError:
            # it seems mysql (percona, at least) might come back with [Fail]
            # but actually come up ok. we're looking into the timing issue on
            # parallel, but for now, we'd like to give it one more chance to
            # come up. so regardless of the execute_with_timeout() response,
            # we'll assume mysql comes up and check it's status for a while.
            pass
        if not self.status.wait_for_real_status_to_change_to(
                rd_instance.ServiceStatuses.RUNNING,
                self.state_change_wait_time, update_db):
            LOG.error(_("Start up of MySQL failed."))
            # If it won't start, but won't die either, kill it by hand so we
            # don't let a rouge process wander around.
            try:
                utils.execute_with_timeout("sudo", "pkill", "-9", "mysql")
            except exception.ProcessExecutionError:
                LOG.exception(_("Error killing stalled MySQL start command."))
                # There's nothing more we can do...
            self.status.end_install_or_restart()
            raise RuntimeError("Could not start MySQL!")

    def start_db_with_conf_changes(self, config_contents):
        LOG.info(_("Starting MySQL with conf changes."))
        LOG.debug("Inside the guest - Status is_running = (%s)." %
                  self.status.is_running)
        if self.status.is_running:
            LOG.error(
                _("Cannot execute start_db_with_conf_changes because "
                  "MySQL state == %s.") % self.status)
            raise RuntimeError("MySQL not stopped.")
        LOG.info(_("Resetting configuration."))
        self._reset_configuration(config_contents)
        self.start_mysql(True)

    def reset_configuration(self, configuration):
        config_contents = configuration['config_contents']
        LOG.info(_("Resetting configuration."))
        self._reset_configuration(config_contents)

    # DEPRECATED: Mantain for API Compatibility
    def get_txn_count(self):
        LOG.info(_("Retrieving latest txn id."))
        txn_count = 0
        with LocalSqlClient(get_engine()) as client:
            result = client.execute('SELECT @@global.gtid_executed').first()
            for uuid_set in result[0].split(','):
                for interval in uuid_set.split(':')[1:]:
                    if '-' in interval:
                        iparts = interval.split('-')
                        txn_count += int(iparts[1]) - int(iparts[0])
                    else:
                        txn_count += 1
        return txn_count

    def _get_slave_status(self):
        with LocalSqlClient(get_engine()) as client:
            return client.execute('SHOW SLAVE STATUS').first()

    def _get_master_UUID(self):
        slave_status = self._get_slave_status()
        return slave_status and slave_status['Master_UUID'] or None

    def _get_gtid_executed(self):
        with LocalSqlClient(get_engine()) as client:
            return client.execute('SELECT @@global.gtid_executed').first()[0]

    def get_last_txn(self):
        master_UUID = self._get_master_UUID()
        last_txn_id = '0'
        gtid_executed = self._get_gtid_executed()
        for gtid_set in gtid_executed.split(','):
            uuid_set = gtid_set.split(':')
            if uuid_set[0] == master_UUID:
                last_txn_id = uuid_set[-1].split('-')[-1]
                break
        return master_UUID, int(last_txn_id)

    def get_latest_txn_id(self):
        LOG.info(_("Retrieving latest txn id."))
        return self._get_gtid_executed()

    def wait_for_txn(self, txn):
        LOG.info(_("Waiting on txn '%s'.") % txn)
        with LocalSqlClient(get_engine()) as client:
            client.execute("SELECT WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS('%s')" %
                           txn)