Ejemplo n.º 1
0
def reset_slave(slave):
    """Stop slave and reset it.

    :param slave: slave.
    """
    _replication.stop_slave(slave, wait=True)
    _replication.reset_slave(slave, clean=True)
Ejemplo n.º 2
0
def _setup_replication(shard_id, source_group_id, destn_group_id, split_value,
                                        prune_limit, cmd):
    """Setup replication between the source and the destination groups and
    ensure that they are in sync.

    :param shard_id: The shard ID of the shard that needs to be moved.
    :param source_group_id: The group_id of the source shard.
    :param destn_group_id: The ID of the group to which the shard needs to
                           be moved.
    :param split_value: Indicates the value at which the range for the
                        particular shard will be split. Will be set only
                        for shard split operations.
    :param prune_limit: The number of DELETEs that should be
                        done in one batch.
    :param cmd: Indicates the type of re-sharding operation
    """
    source_group = Group.fetch(source_group_id)
    if source_group is None:
        raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND %
                                    (source_group_id, ))

    destination_group = Group.fetch(destn_group_id)
    if destination_group is None:
        raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND %
                                    (destn_group_id, ))

    master = MySQLServer.fetch(source_group.master)
    if master is None:
        raise _errors.ShardingError(
            _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND)
    master.connect()

    slave = MySQLServer.fetch(destination_group.master)
    if slave is None:
        raise _errors.ShardingError(
            _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND)
    slave.connect()

    #Stop and reset any slave that  might be running on the slave server.
    _utils.set_offline_mode(slave, True) ### TODO: if forced offline_mode
    _replication.stop_slave(slave, wait=True)
    _replication.reset_slave(slave, clean=True)

    #Change the master to the shard group master.
    _replication.switch_master(slave, master, master.repl_user, master.repl_pass)

    #Start the slave so that syncing of the data begins
    _replication.start_slave(slave, wait=True)
    _utils.set_offline_mode(slave, False) ### TODO: if forced offline_mode

    #Setup sync between the source and the destination groups.
    _events.trigger_within_procedure(
                                     SETUP_SYNC,
                                     shard_id,
                                     source_group_id,
                                     destn_group_id,
                                     split_value,
                                     prune_limit,
                                     cmd
                                     )
Ejemplo n.º 3
0
def reset_slave(slave):
    """Stop slave and reset it.

    :param slave: slave.
    """
    _replication.stop_slave(slave, wait=True)
    _replication.reset_slave(slave, clean=True)
Ejemplo n.º 4
0
def _setup_sync(shard_id, source_group_id, destn_group_id, split_value,
                                        prune_limit, cmd):

    """sync the source and the destination groups.

    :param shard_id: The shard ID of the shard that needs to be moved.
    :param source_group_id: The group_id of the source shard.
    :param destn_group_id: The ID of the group to which the shard needs to
                           be moved.
    :param split_value: Indicates the value at which the range for the
                        particular shard will be split. Will be set only
                        for shard split operations.
    :param prune_limit: The number of DELETEs that should be
                        done in one batch.
    :param cmd: Indicates the type of re-sharding operation
    """
    source_group = Group.fetch(source_group_id)
    if source_group is None:
        raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND %
                                    (source_group_id, ))

    destination_group = Group.fetch(destn_group_id)
    if destination_group is None:
        raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND %
                                    (destn_group_id, ))

    master = MySQLServer.fetch(source_group.master)
    if master is None:
        raise _errors.ShardingError(
            _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND)
    master.connect()

    slave = MySQLServer.fetch(destination_group.master)
    if slave is None:
        raise _errors.ShardingError(
            _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND)
    slave.connect()

    #Synchronize until the slave catches up with the master.
    _replication.synchronize_with_read_only(slave, master)

    #Reset replication once the syncing is done.
    _replication.stop_slave(slave, wait=True)
    _replication.reset_slave(slave, clean=True)

    #Trigger changing the mappings for the shard that was copied
    _events.trigger_within_procedure(
                                     SETUP_RESHARDING_SWITCH,
                                     shard_id,
                                     source_group_id,
                                     destn_group_id,
                                     split_value,
                                     prune_limit,
                                     cmd
                                     )
Ejemplo n.º 5
0
def _setup_sync(shard_id, source_group_id, destn_group_id, split_value,
                                        prune_limit, cmd):

    """sync the source and the destination groups.

    :param shard_id: The shard ID of the shard that needs to be moved.
    :param source_group_id: The group_id of the source shard.
    :param destn_group_id: The ID of the group to which the shard needs to
                           be moved.
    :param split_value: Indicates the value at which the range for the
                        particular shard will be split. Will be set only
                        for shard split operations.
    :param prune_limit: The number of DELETEs that should be
                        done in one batch.
    :param cmd: Indicates the type of re-sharding operation
    """
    source_group = Group.fetch(source_group_id)
    if source_group is None:
        raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND %
                                    (source_group_id, ))

    destination_group = Group.fetch(destn_group_id)
    if destination_group is None:
        raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND %
                                    (destn_group_id, ))

    master = MySQLServer.fetch(source_group.master)
    if master is None:
        raise _errors.ShardingError(
            _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND)
    master.connect()

    slave = MySQLServer.fetch(destination_group.master)
    if slave is None:
        raise _errors.ShardingError(
            _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND)
    slave.connect()

    #Synchronize until the slave catches up with the master.
    _replication.synchronize_with_read_only(slave, master)

    #Reset replication once the syncing is done.
    _replication.stop_slave(slave, wait=True)
    _replication.reset_slave(slave, clean=True)

    #Trigger changing the mappings for the shard that was copied
    _events.trigger_within_procedure(
                                     SETUP_RESHARDING_SWITCH,
                                     shard_id,
                                     source_group_id,
                                     destn_group_id,
                                     split_value,
                                     prune_limit,
                                     cmd
                                     )
Ejemplo n.º 6
0
def cleanup_environment():
    """Clean up the existing environment
    """
    #Clean up information on instances.
    MySQLInstances().__instances = {}

    #Clean up information in the state store.
    uuid_server = _server.MySQLServer.discover_uuid(
        MySQLInstances().state_store_address, MySQLInstances().root_user,
        MySQLInstances().root_passwd
    )
    server = _server.MySQLServer(_uuid.UUID(uuid_server),
        MySQLInstances().state_store_address, MySQLInstances().root_user,
        MySQLInstances().root_passwd
    )
    server.connect()

    server.set_foreign_key_checks(False)
    tables = server.exec_stmt(
        "SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES WHERE "
        "TABLE_SCHEMA = 'fabric' and TABLE_TYPE = 'BASE TABLE'"
    )
    for table in tables:
        server.exec_stmt("TRUNCATE fabric.%s" % (table[0], ))
    server.set_foreign_key_checks(True)

    #Remove all the databases from the running MySQL instances
    #other than the standard ones
    server_count = MySQLInstances().get_number_addresses()

    for i in range(0, server_count):
        uuid_server = _server.MySQLServer.discover_uuid(
            MySQLInstances().get_address(i)
        )
        server = _server.MySQLServer(
            _uuid.UUID(uuid_server), MySQLInstances().get_address(i)
        )
        server.connect()
        _replication.stop_slave(server, wait=True)

        server.set_foreign_key_checks(False)
        databases = server.exec_stmt("SHOW DATABASES", {"fetch" : True})
        for database in databases:
            if database[0] not in _server.MySQLServer.NO_USER_DATABASES:
                server.exec_stmt(
                    "DROP DATABASE IF EXISTS %s" % (database[0], )
                )
        server.set_foreign_key_checks(True)

        _replication.reset_master(server)
        _replication.reset_slave(server, clean=True)

    for __file in glob.glob(os.path.join(os.getcwd(), "*.sql")):
        os.remove(__file)
Ejemplo n.º 7
0
def stop_group_slave(group_master_id,  group_slave_id,  clear_ref):
    """Stop the slave on the slave group. This utility method is the
    completement of the setup_group_replication method and is
    used to stop the replication on the slave group. Given a master group ID
    and the slave group ID the method stops the slave on the slave
    group and updates the references on both the master and the
    slave group.

    :param group_master_id: The id of the master group.
    :param group_slave_id: The id of the slave group.
    :param clear_ref: The parameter indicates if the stop_group_slave
                                needs to clear the references to the group's
                                slaves. For example when you do a disable
                                shard the shard group still retains the
                                references to its slaves, since when enabled
                                it needs to enable the replication.
    """
    master_group = Group.fetch(group_master_id)
    slave_group = Group.fetch(group_slave_id)

    if master_group is None:
        raise _errors.GroupError \
        (GROUP_REPLICATION_GROUP_NOT_FOUND_ERROR % (group_master_id, ))

    if slave_group is None:
        raise _errors.GroupError \
        (GROUP_REPLICATION_GROUP_NOT_FOUND_ERROR % (group_slave_id, ))

    slave_group_master = MySQLServer.fetch(slave_group.master)
    if slave_group_master is None:
        raise _errors.GroupError \
        (GROUP_REPLICATION_GROUP_MASTER_NOT_FOUND_ERROR %
          (slave_group.master, ))

    if not server_running(slave_group_master):
        #The server is already down. We cannot connect to it to stop
        #replication.
        return
    try:
        slave_group_master.connect()
    except _errors.DatabaseError:
        #Server is not accessible, unable to connect to the server.
        return

    #Stop replication on the master of the group and clear the references,
    #if clear_ref has been set.
    _replication.stop_slave(slave_group_master, wait=True)
    _replication.reset_slave(slave_group_master,  clean=True)
    if clear_ref:
        slave_group.remove_master_group_id()
        master_group.remove_slave_group_id(group_slave_id)
Ejemplo n.º 8
0
def stop_group_slave(group_master_id, group_slave_id, clear_ref):
    """Stop the slave on the slave group. This utility method is the
    completement of the setup_group_replication method and is
    used to stop the replication on the slave group. Given a master group ID
    and the slave group ID the method stops the slave on the slave
    group and updates the references on both the master and the
    slave group.

    :param group_master_id: The id of the master group.
    :param group_slave_id: The id of the slave group.
    :param clear_ref: The parameter indicates if the stop_group_slave
                                needs to clear the references to the group's
                                slaves. For example when you do a disable
                                shard the shard group still retains the
                                references to its slaves, since when enabled
                                it needs to enable the replication.
    """
    master_group = Group.fetch(group_master_id)
    slave_group = Group.fetch(group_slave_id)

    if master_group is None:
        raise _errors.GroupError \
        (GROUP_REPLICATION_GROUP_NOT_FOUND_ERROR % (group_master_id, ))

    if slave_group is None:
        raise _errors.GroupError \
        (GROUP_REPLICATION_GROUP_NOT_FOUND_ERROR % (group_slave_id, ))

    slave_group_master = MySQLServer.fetch(slave_group.master)
    if slave_group_master is None:
        raise _errors.GroupError \
        (GROUP_REPLICATION_GROUP_MASTER_NOT_FOUND_ERROR %
          (slave_group.master, ))

    if not server_running(slave_group_master):
        #The server is already down. We cannot connect to it to stop
        #replication.
        return
    try:
        slave_group_master.connect()
    except _errors.DatabaseError:
        #Server is not accessible, unable to connect to the server.
        return

    #Stop replication on the master of the group and clear the references,
    #if clear_ref has been set.
    _replication.stop_slave(slave_group_master, wait=True)
    _replication.reset_slave(slave_group_master, clean=True)
    if clear_ref:
        slave_group.remove_master_group_id()
        master_group.remove_slave_group_id(group_slave_id)
    def test_demote_promote(self):
        """Check the sequence demote and promote when some candidates have no
        information on GTIDs.
        """
        # Configure replication.
        instances = tests.utils.MySQLInstances()
        user = instances.user
        passwd = instances.passwd
        instances.configure_instances({0 : [{1 : []}, {2 : []}]}, user, passwd)
        master = instances.get_instance(0)
        slave_1 = instances.get_instance(1)
        slave_2 = instances.get_instance(2)

        self.proxy.group.create("group_id", "")
        self.proxy.group.add("group_id", master.address)
        self.proxy.group.add("group_id", slave_1.address)
        self.proxy.group.add("group_id", slave_2.address)
        self.proxy.group.promote("group_id", str(master.uuid))

        # Create some data.
        master.exec_stmt("CREATE DATABASE IF NOT EXISTS test")
        master.exec_stmt("USE test")
        master.exec_stmt("CREATE TABLE IF NOT EXISTS t_1(id INTEGER)")

        for server in [slave_2, slave_1, master]:
            # Demote the current master.
            status = self.proxy.group.demote("group_id")
            self.assertStatus(status, _executor.Job.SUCCESS)
            self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
            self.assertEqual(status[1][-1]["description"],
                             "Executed action (_wait_slaves_demote).")

            # Reset any information on GTIDs on a server.
            _repl.reset_slave(server, clean=True)
            _repl.reset_master(server)

            # Promote a new master.
            status = self.proxy.group.promote("group_id")
            self.assertStatus(status, _executor.Job.SUCCESS)
            self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
            self.assertEqual(status[1][-1]["description"],
                             "Executed action (_change_to_candidate).")

            # Create some data.
            server.exec_stmt("CREATE DATABASE IF NOT EXISTS test")
            server.exec_stmt("USE test")
            server.exec_stmt("CREATE TABLE IF NOT EXISTS t_1(id INTEGER)")
Ejemplo n.º 10
0
def stop_group_slaves(master_group_id):
    """Stop the group slaves for the given master group. This will be used
    for use cases that required all the slaves replicating from this group to
    be stopped. An example use case would be disabling a shard.

    :param master_group_id: The master group ID.
    """
    master_group = Group.fetch(master_group_id)
    if master_group is None:
        raise _errors.GroupError \
        (GROUP_REPLICATION_GROUP_NOT_FOUND_ERROR % \
        (master_group_id, ))

    # Stop the replication on all of the registered slaves for the group.
    for slave_group_id in master_group.slave_group_ids:

        slave_group = Group.fetch(slave_group_id)
        # Fetch the Slave Group and the master of the Slave Group
        slave_group_master = MySQLServer.fetch(slave_group.master)
        if slave_group_master is None:
            _LOGGER.warning(
                GROUP_REPLICATION_GROUP_MASTER_NOT_FOUND_ERROR % \
                (slave_group.master, )
            )
            continue

        if not server_running(slave_group_master):
            # The server is already down. we cannot connect to it to stop
            # replication.
            continue

        try:
            slave_group_master.connect()
            _replication.stop_slave(slave_group_master, wait=True)
            # Reset the slave to remove the reference of the master so
            # that when the server is used as a slave next it does not
            # complaint about having a different master.
            _replication.reset_slave(slave_group_master, clean=True)
        except _errors.DatabaseError as error:
            # Server is not accessible, unable to connect to the server.
            _LOGGER.warning(
                "Error while unconfiguring group replication between "
                "(%s) and (%s): (%s).", master_group_id, slave_group.group_id,
                error
            )
            continue
Ejemplo n.º 11
0
def stop_group_slaves(master_group_id):
    """Stop the group slaves for the given master group. This will be used
    for use cases that required all the slaves replicating from this group to
    be stopped. An example use case would be disabling a shard.

    :param master_group_id: The master group ID.
    """
    master_group = Group.fetch(master_group_id)
    if master_group is None:
        raise _errors.GroupError \
        (GROUP_REPLICATION_GROUP_NOT_FOUND_ERROR % \
        (master_group_id, ))

    # Stop the replication on all of the registered slaves for the group.
    for slave_group_id in master_group.slave_group_ids:

        slave_group = Group.fetch(slave_group_id)
        # Fetch the Slave Group and the master of the Slave Group
        slave_group_master = MySQLServer.fetch(slave_group.master)
        if slave_group_master is None:
            _LOGGER.warning(GROUP_REPLICATION_GROUP_MASTER_NOT_FOUND_ERROR,
                            slave_group.master)
            continue

        if not server_running(slave_group_master):
            # The server is already down. we cannot connect to it to stop
            # replication.
            continue

        try:
            slave_group_master.connect()
            _replication.stop_slave(slave_group_master, wait=True)
            # Reset the slave to remove the reference of the master so
            # that when the server is used as a slave next it does not
            # complaint about having a different master.
            _replication.reset_slave(slave_group_master, clean=True)
        except _errors.DatabaseError as error:
            # Server is not accessible, unable to connect to the server.
            _LOGGER.warning(
                "Error while unconfiguring group replication between "
                "(%s) and (%s): (%s).", master_group_id, slave_group.group_id,
                error)
            continue
Ejemplo n.º 12
0
    def test_demote_promote(self):
        """Check the sequence demote and promote when some candidates have no
        information on GTIDs.
        """
        # Configure replication.
        instances = tests.utils.MySQLInstances()
        user = instances.user
        passwd = instances.passwd
        instances.configure_instances({0 : [{1 : []}, {2 : []}]}, user, passwd)
        master = instances.get_instance(0)
        slave_1 = instances.get_instance(1)
        slave_2 = instances.get_instance(2)

        self.proxy.group.create("group_id", "")
        self.proxy.group.add("group_id", master.address)
        self.proxy.group.add("group_id", slave_1.address)
        self.proxy.group.add("group_id", slave_2.address)
        self.proxy.group.promote("group_id", str(master.uuid))

        # Create some data.
        master.exec_stmt("CREATE DATABASE IF NOT EXISTS test")
        master.exec_stmt("USE test")
        master.exec_stmt("CREATE TABLE IF NOT EXISTS t_1(id INTEGER)")

        for server in [slave_2, slave_1, master]:
            # Demote the current master.
            status = self.proxy.group.demote("group_id")
            self.check_xmlrpc_command_result(status)

            # Reset any information on GTIDs on a server.
            _repl.reset_slave(server, clean=True)
            _repl.reset_master(server)

            # Promote a new master.
            status = self.proxy.group.promote("group_id")
            self.check_xmlrpc_command_result(status)

            # Create some data.
            server.exec_stmt("CREATE DATABASE IF NOT EXISTS test")
            server.exec_stmt("USE test")
            server.exec_stmt("CREATE TABLE IF NOT EXISTS t_1(id INTEGER)")
Ejemplo n.º 13
0
    def test_check_no_healthy_slave(self):
        """Test promoting when there is no healthy slave.
        """
        # Configure replication.
        instances = tests.utils.MySQLInstances()
        user = instances.user
        passwd = instances.passwd
        instances.configure_instances({0 : [{1 : []}, {2 : []}]}, user, passwd)
        master = instances.get_instance(0)
        slave_1 = instances.get_instance(1)
        slave_2 = instances.get_instance(2)

        self.proxy.group.create("group_id", "")
        self.proxy.group.add("group_id", master.address)
        self.proxy.group.add("group_id", slave_1.address)
        self.proxy.group.add("group_id", slave_2.address)

        # Promote a master.
        status = self.proxy.group.promote("group_id", str(master.uuid))
        self.check_xmlrpc_command_result(status)

        # Check replication.
        status = self.proxy.group.health("group_id")
        self.check_xmlrpc_simple(status, {
            'status':  _server.MySQLServer.SECONDARY
        }, index=2, rowcount=3)
        self.check_xmlrpc_simple(status, {
            'status':  _server.MySQLServer.SECONDARY
        }, index=1, rowcount=3)
        self.check_xmlrpc_simple(status, {
            'status':  _server.MySQLServer.PRIMARY
        }, index=0, rowcount=3)

        # Inject some events that make slaves break.
        slave_1.set_session_binlog(False)
        slave_1.exec_stmt("CREATE DATABASE IF NOT EXISTS test")
        slave_1.exec_stmt("USE test")
        slave_1.exec_stmt("DROP TABLE IF EXISTS test")
        slave_1.exec_stmt("CREATE TABLE test (id INTEGER)")
        slave_1.set_session_binlog(True)

        slave_2.set_session_binlog(False)
        slave_2.exec_stmt("CREATE DATABASE IF NOT EXISTS test")
        slave_2.exec_stmt("USE test")
        slave_2.exec_stmt("DROP TABLE IF EXISTS test")
        slave_2.exec_stmt("CREATE TABLE test (id INTEGER)")
        slave_2.set_session_binlog(True)

        master.exec_stmt("CREATE DATABASE IF NOT EXISTS test")
        master.exec_stmt("USE test")
        master.exec_stmt("SET sql_log_bin=0")
        master.exec_stmt("DROP TABLE IF EXISTS test")
        master.exec_stmt("SET sql_log_bin=1")
        master.exec_stmt("CREATE TABLE test (id INTEGER)")

        # Synchronize replicas.
        self.assertRaises(_errors.DatabaseError, _repl.sync_slave_with_master,
                          slave_1, master, timeout=0)
        self.assertRaises(_errors.DatabaseError, _repl.sync_slave_with_master,
                          slave_2, master, timeout=0)

        # Check replication.
        status = self.proxy.group.health("group_id")
        for info in self.check_xmlrpc_iter(status):
            if info['uuid'] in (str(slave_2.uuid), str(slave_1.uuid)):
                self.assertEqual(
                    info['status'], 
                    _server.MySQLServer.SECONDARY
                )
                self.assertEqual(info['sql_not_running'], True)
            elif info['uuid'] == str(master.uuid):
                self.assertEqual(
                    info['status'],
                    _server.MySQLServer.PRIMARY
                )

        # Try to choose a new master through switch over.
        status = self.proxy.group.promote("group_id")
        self.check_xmlrpc_command_result(status, has_error=True)

        # Try to reset the slave and restart slave.
        _repl.stop_slave(slave_1, wait=True)
        _repl.reset_slave(slave_1, clean=False)

        try:
            _repl.start_slave(slave_1, wait=True)
        except _errors.DatabaseError as error:
            self.assertEqual(
                str(error), "Error 'Table 'test' already exists' "
                "on query. Default database: 'test'. Query: 'CREATE "
                "TABLE test (id INTEGER)'"
                )

        # Synchronize replica.
        self.assertRaises(_errors.DatabaseError, _repl.sync_slave_with_master,
                          slave_1, master, timeout=0)

        # Check replication.
        status = self.proxy.group.health("group_id")
        self.check_xmlrpc_simple(status, {
            'status':  _server.MySQLServer.SECONDARY,
            "sql_not_running": True,
        }, index=2, rowcount=3)
        self.check_xmlrpc_simple(status, {
            'status':  _server.MySQLServer.SECONDARY,
            "sql_not_running": True,
        }, index=1, rowcount=3)
        self.check_xmlrpc_simple(status, {
            'status':  _server.MySQLServer.PRIMARY,
        }, index=0, rowcount=3)

        # Try to drop the table on the slave.
        _repl.stop_slave(slave_1, wait=True)
        _repl.reset_slave(slave_1, clean=False)
        slave_1.set_session_binlog(False)
        slave_1.exec_stmt("DROP TABLE IF EXISTS test")
        slave_1.set_session_binlog(True)
        _repl.start_slave(slave_1, wait=True)
        _repl.stop_slave(slave_2, wait=True)
        _repl.reset_slave(slave_2, clean=False)
        slave_2.set_session_binlog(False)
        slave_2.exec_stmt("DROP TABLE IF EXISTS test")
        slave_2.set_session_binlog(True)
        _repl.start_slave(slave_2, wait=True)

        # Synchronize replicas.
        _repl.sync_slave_with_master(slave_1, master, timeout=0)
        _repl.sync_slave_with_master(slave_2, master, timeout=0)

        # Check replication.
        status = self.proxy.group.health("group_id")
        for info in self.check_xmlrpc_iter(status, rowcount=3):
            if info['uuid'] in (str(slave_2.uuid), str(slave_1.uuid)):
                self.assertEqual(
                    info['status'], 
                    _server.MySQLServer.SECONDARY
                )
                self.assertEqual(info['sql_not_running'], False)
            elif info['uuid'] == str(master.uuid):
                self.assertEqual(
                    info['status'],
                    _server.MySQLServer.PRIMARY
                )
Ejemplo n.º 14
0
    def test_promote_to(self):
        # Create topology: M1 ---> S2, M1 ---> S3
        instances = tests.utils.MySQLInstances()
        user = instances.user
        passwd = instances.passwd
        instances.configure_instances({0: [{1: []}, {2: []}]}, user, passwd)
        master = instances.get_instance(0)
        slave_1 = instances.get_instance(1)
        slave_2 = instances.get_instance(2)

        # Try to use a group that does not exist.
        status = self.proxy.group.promote("group_id", str(slave_1.uuid))
        self.check_xmlrpc_command_result(status, has_error=True)

        # Try to use a slave that does not exist with the group.
        self.proxy.group.create("group_id", "")
        status = self.proxy.group.promote("group_id", str(slave_1.uuid))
        self.check_xmlrpc_command_result(status, has_error=True)

        # Try to use a server that is already a master.
        self.proxy.group.add("group_id", master.address)
        self.proxy.group.add("group_id", slave_1.address)
        self.proxy.group.add("group_id", slave_2.address)
        group = _server.Group.fetch("group_id")
        tests.utils.configure_decoupled_master(group, slave_1)

        status = self.proxy.group.promote("group_id", str(slave_1.uuid))
        self.check_xmlrpc_command_result(status, has_error=True)

        # Try to use a slave whose replication is not properly configured.
        tests.utils.configure_decoupled_master(group, master)
        _repl.stop_slave(slave_1, wait=True)
        _repl.reset_slave(slave_1, clean=True)
        status = self.proxy.group.promote("group_id", str(slave_1.uuid))
        self.check_xmlrpc_command_result(status, has_error=True)

        # Try to use a slave whose replication is not properly running.
        _repl.switch_master(slave_1, master, user, passwd)
        status = self.proxy.group.promote("group_id", str(slave_1.uuid))
        self.check_xmlrpc_command_result(status, has_error=True)

        # Start the slave.
        _repl.start_slave(slave_1, wait=True)

        # Look up servers.
        expected = tests.utils.make_servers_lookup_result([
            [
                str(master.uuid), master.address, _server.MySQLServer.PRIMARY,
                _server.MySQLServer.READ_WRITE,
                _server.MySQLServer.DEFAULT_WEIGHT
            ],
            [
                str(slave_1.uuid), slave_1.address,
                _server.MySQLServer.SECONDARY, _server.MySQLServer.READ_ONLY,
                _server.MySQLServer.DEFAULT_WEIGHT
            ],
            [
                str(slave_2.uuid), slave_2.address,
                _server.MySQLServer.SECONDARY, _server.MySQLServer.READ_ONLY,
                _server.MySQLServer.DEFAULT_WEIGHT
            ],
        ])
        servers = self.proxy.group.lookup_servers("group_id")
        self.check_xmlrpc_result(servers, expected)

        # Do the promote.
        status = self.proxy.group.promote("group_id", str(slave_1.uuid))
        self.check_xmlrpc_command_result(status)

        # Look up servers.
        expected = tests.utils.make_servers_lookup_result([
            [
                str(master.uuid), master.address,
                _server.MySQLServer.SECONDARY, _server.MySQLServer.READ_ONLY,
                _server.MySQLServer.DEFAULT_WEIGHT
            ],
            [
                str(slave_1.uuid), slave_1.address,
                _server.MySQLServer.PRIMARY, _server.MySQLServer.READ_WRITE,
                _server.MySQLServer.DEFAULT_WEIGHT
            ],
            [
                str(slave_2.uuid), slave_2.address,
                _server.MySQLServer.SECONDARY, _server.MySQLServer.READ_ONLY,
                _server.MySQLServer.DEFAULT_WEIGHT
            ],
        ])
        servers = self.proxy.group.lookup_servers("group_id")
        self.check_xmlrpc_result(servers, expected)

        # Do the promote.
        # Note that it is using HOST:PORT instead of UUID.
        status = self.proxy.group.promote("group_id", master.address)
        self.check_xmlrpc_command_result(status)

        # Look up servers.
        servers = self.proxy.group.lookup_servers("group_id")
        expected = tests.utils.make_servers_lookup_result([
            [
                str(master.uuid), master.address, _server.MySQLServer.PRIMARY,
                _server.MySQLServer.READ_WRITE,
                _server.MySQLServer.DEFAULT_WEIGHT
            ],
            [
                str(slave_1.uuid), slave_1.address,
                _server.MySQLServer.SECONDARY, _server.MySQLServer.READ_ONLY,
                _server.MySQLServer.DEFAULT_WEIGHT
            ],
            [
                str(slave_2.uuid), slave_2.address,
                _server.MySQLServer.SECONDARY, _server.MySQLServer.READ_ONLY,
                _server.MySQLServer.DEFAULT_WEIGHT
            ],
        ])
        self.check_xmlrpc_result(servers, expected)
Ejemplo n.º 15
0
def setup_group_replication(group_master_id,  group_slave_id):
    """Sets up replication between the masters of the two groups and
    updates the references to the groups in each other.

    :param group_master_id: The group whose master will act as the master
                                             in the replication setup.
    :param group_slave_id: The group whose master will act as the slave in the
                                      replication setup.
    """
    group_master = Group.fetch(group_master_id)
    group_slave = Group.fetch(group_slave_id)

    if group_master is None:
        raise _errors.GroupError \
        (GROUP_REPLICATION_GROUP_NOT_FOUND_ERROR % (group_master_id, ))

    if group_slave is None:
        raise _errors.GroupError \
        (GROUP_REPLICATION_GROUP_NOT_FOUND_ERROR % (group_slave_id, ))

    if group_master.master is None:
        raise _errors.GroupError \
        (GROUP_REPLICATION_GROUP_MASTER_NOT_FOUND_ERROR % "")

    if group_slave.master is None:
        raise _errors.GroupError \
        (GROUP_REPLICATION_GROUP_MASTER_NOT_FOUND_ERROR % "")

    #Master is the master of the Global Group. We replicate from here to
    #the masters of all the shard Groups.
    master = MySQLServer.fetch(group_master.master)
    if master is None:
        raise _errors.GroupError \
        (GROUP_REPLICATION_GROUP_MASTER_NOT_FOUND_ERROR % \
        (group_master.master, ))

    #Get the master of the shard Group.
    slave = MySQLServer.fetch(group_slave.master)
    if slave is None:
        raise _errors.GroupError \
        (GROUP_REPLICATION_GROUP_MASTER_NOT_FOUND_ERROR % \
        (group_slave.master, ))

    if not server_running(master):
        #The server is already down. We cannot connect to it to setup
        #replication.
        raise _errors.GroupError \
        (GROUP_MASTER_NOT_RUNNING % (group_master.group_id, ))

    try:
        master.connect()
    except _errors.DatabaseError as error: 
        #Server is not accessible, unable to connect to the server.
        raise _errors.GroupError(
            GROUP_REPLICATION_SERVER_ERROR %  (group_slave.master, error)
        )

    if not server_running(slave):
        #The server is already down. We cannot connect to it to setup
        #replication.
        raise _errors.GroupError \
            (GROUP_MASTER_NOT_RUNNING % (group_slave.group_id, ))

    try:
        slave.connect()
    except _errors.DatabaseError as error:
        raise _errors.GroupError(
            GROUP_REPLICATION_SERVER_ERROR %  (group_master.master, error)
        )

    _replication.stop_slave(slave, wait=True)

    #clear references to old masters in the slave
    _replication.reset_slave(slave,  clean=True)

    _replication.switch_master(slave, master, master.user, master.passwd)

    _replication.start_slave(slave, wait=True)

    try:
        group_master.add_slave_group_id(group_slave_id)
        group_slave.add_master_group_id(group_master_id)
    except _errors.DatabaseError:
        #If there is an error while adding a reference to
        #the slave group or a master group, it means that
        #the slave group was already added and the error
        #is happening because the group was already registered.
        #Ignore this error.
        pass
Ejemplo n.º 16
0
def setup_group_replication(group_master_id, group_slave_id):
    """Sets up replication between the masters of the two groups and
    updates the references to the groups in each other.

    :param group_master_id: The group whose master will act as the master
                                             in the replication setup.
    :param group_slave_id: The group whose master will act as the slave in the
                                      replication setup.
    """
    group_master = Group.fetch(group_master_id)
    group_slave = Group.fetch(group_slave_id)

    if group_master is None:
        raise _errors.GroupError \
        (GROUP_REPLICATION_GROUP_NOT_FOUND_ERROR % (group_master_id, ))

    if group_slave is None:
        raise _errors.GroupError \
        (GROUP_REPLICATION_GROUP_NOT_FOUND_ERROR % (group_slave_id, ))

    if group_master.master is None:
        raise _errors.GroupError \
        (GROUP_REPLICATION_GROUP_MASTER_NOT_FOUND_ERROR % "")

    if group_slave.master is None:
        raise _errors.GroupError \
        (GROUP_REPLICATION_GROUP_MASTER_NOT_FOUND_ERROR % "")

    #Master is the master of the Global Group. We replicate from here to
    #the masters of all the shard Groups.
    master = MySQLServer.fetch(group_master.master)
    if master is None:
        raise _errors.GroupError \
        (GROUP_REPLICATION_GROUP_MASTER_NOT_FOUND_ERROR % \
        (group_master.master, ))

    #Get the master of the shard Group.
    slave = MySQLServer.fetch(group_slave.master)
    if slave is None:
        raise _errors.GroupError \
        (GROUP_REPLICATION_GROUP_MASTER_NOT_FOUND_ERROR % \
        (group_slave.master, ))

    if not server_running(master):
        #The server is already down. We cannot connect to it to setup
        #replication.
        raise _errors.GroupError \
        (GROUP_MASTER_NOT_RUNNING % (group_master.group_id, ))

    try:
        master.connect()
    except _errors.DatabaseError as error:
        #Server is not accessible, unable to connect to the server.
        raise _errors.GroupError(GROUP_REPLICATION_SERVER_ERROR %
                                 (group_slave.master, error))

    if not server_running(slave):
        #The server is already down. We cannot connect to it to setup
        #replication.
        raise _errors.GroupError \
            (GROUP_MASTER_NOT_RUNNING % (group_slave.group_id, ))

    try:
        slave.connect()
    except _errors.DatabaseError as error:
        raise _errors.GroupError(GROUP_REPLICATION_SERVER_ERROR %
                                 (group_master.master, error))

    _replication.stop_slave(slave, wait=True)

    #clear references to old masters in the slave
    _replication.reset_slave(slave, clean=True)

    _replication.switch_master(slave, master, master.user, master.passwd)

    _replication.start_slave(slave, wait=True)

    try:
        group_master.add_slave_group_id(group_slave_id)
        group_slave.add_master_group_id(group_master_id)
    except _errors.DatabaseError:
        #If there is an error while adding a reference to
        #the slave group or a master group, it means that
        #the slave group was already added and the error
        #is happening because the group was already registered.
        #Ignore this error.
        pass
Ejemplo n.º 17
0
def _setup_move_sync(shard_id, source_group_id, destn_group_id, split_value,
                                        cmd):
    """Setup replication between the source and the destination groups and
    ensure that they are in sync.

    :param shard_id: The shard ID of the shard that needs to be moved.
    :param source_group_id: The group_id of the source shard.
    :param destn_group_id: The ID of the group to which the shard needs to
                           be moved.
    :param split_value: Indicates the value at which the range for the
                        particular shard will be split. Will be set only
                        for shard split operations.
    :param cmd: Indicates the type of re-sharding operation
    """
    source_group = Group.fetch(source_group_id)
    if source_group is None:
        raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND %
                                    (source_group_id, ))

    destination_group = Group.fetch(destn_group_id)
    if destination_group is None:
        raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND %
                                    (destination_group_id, ))

    master = MySQLServer.fetch(source_group.master)
    if master is None:
        raise _errors.ShardingError(
            _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND)
    master.connect()

    slave = MySQLServer.fetch(destination_group.master)
    if slave is None:
        raise _errors.ShardingError(
            _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND)
    slave.connect()

    #Stop and reset any slave that  might be running on the slave server.
    _replication.stop_slave(slave, wait=True)
    _replication.reset_slave(slave, clean=True)

    #Change the master to the shard group master.
    _replication.switch_master(slave,  master,  master. user,  master.passwd)

    #Start the slave so that syncing of the data begins
    _replication.start_slave(slave, wait=True)

    #Synchronize until the slave catches up with the master.
    _replication.synchronize_with_read_only(slave, master)

    #Reset replication once the syncing is done.
    _replication.stop_slave(slave, wait=True)
    _replication.reset_slave(slave, clean=True)

    #Trigger changing the mappings for the shard that was copied
    _events.trigger_within_procedure(
                                     SETUP_RESHARDING_SWITCH,
                                     shard_id,
                                     source_group_id,
                                     destn_group_id,
                                     split_value,
                                     cmd
                                     )
    def test_promote_to(self):
        # Create topology: M1 ---> S2, M1 ---> S3
        instances = tests.utils.MySQLInstances()
        user = instances.user
        passwd = instances.passwd
        instances.configure_instances({0 : [{1 : []}, {2 : []}]}, user, passwd)
        master = instances.get_instance(0)
        slave_1 = instances.get_instance(1)
        slave_2 = instances.get_instance(2)

        # Try to use a group that does not exist.
        status = self.proxy.group.promote(
            "group_id", str(slave_1.uuid)
            )
        self.assertStatus(status, _executor.Job.ERROR)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Tried to execute action (_define_ha_operation).")

        # Try to use a slave that does not exist with the group.
        self.proxy.group.create("group_id", "")
        status = self.proxy.group.promote(
            "group_id", str(slave_1.uuid)
            )
        self.assertStatus(status, _executor.Job.ERROR)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Tried to execute action (_check_candidate_fail).")

        # Try to use a server that is already a master.
        self.proxy.group.add("group_id", master.address)
        self.proxy.group.add("group_id", slave_1.address)
        self.proxy.group.add("group_id", slave_2.address)
        group = _server.Group.fetch("group_id")
        tests.utils.configure_decoupled_master(group, slave_1)

        status = self.proxy.group.promote(
            "group_id", str(slave_1.uuid)
            )
        self.assertStatus(status, _executor.Job.ERROR)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Tried to execute action (_check_candidate_switch).")

        # Try to use a slave whose replication is not properly configured.
        tests.utils.configure_decoupled_master(group, master)
        _repl.stop_slave(slave_1, wait=True)
        _repl.reset_slave(slave_1, clean=True)
        status = self.proxy.group.promote(
            "group_id", str(slave_1.uuid)
            )
        self.assertStatus(status, _executor.Job.ERROR)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Tried to execute action (_check_candidate_switch).")

        # Try to use a slave whose replication is not properly running.
        _repl.switch_master(slave_1, master, user, passwd)
        status = self.proxy.group.promote(
            "group_id", str(slave_1.uuid)
            )
        self.assertStatus(status, _executor.Job.ERROR)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Tried to execute action (_check_candidate_switch).")

        # Start the slave.
        _repl.start_slave(slave_1, wait=True)

        # Look up servers.
        servers = self.proxy.group.lookup_servers("group_id")
        self.assertEqual(servers[0], True)
        self.assertEqual(servers[1], "")
        retrieved = servers[2]
        expected = \
            [{"server_uuid" : str(master.uuid), "address" : master.address,
             "status" :_server.MySQLServer.PRIMARY,
             "mode" : _server.MySQLServer.READ_WRITE,
             "weight" : _server.MySQLServer.DEFAULT_WEIGHT},
             {"server_uuid" : str(slave_1.uuid), "address" : slave_1.address,
             "status" : _server.MySQLServer.SECONDARY,
             "mode" : _server.MySQLServer.READ_ONLY,
             "weight" : _server.MySQLServer.DEFAULT_WEIGHT},
             {"server_uuid" : str(slave_2.uuid), "address" : slave_2.address,
             "status" : _server.MySQLServer.SECONDARY,
             "mode" : _server.MySQLServer.READ_ONLY,
             "weight" : _server.MySQLServer.DEFAULT_WEIGHT}]
        retrieved.sort()
        expected.sort()
        self.assertEqual(retrieved, expected)

        # Do the promote.
        status = self.proxy.group.promote(
            "group_id", str(slave_1.uuid)
            )
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_change_to_candidate).")

        # Look up servers.
        servers = self.proxy.group.lookup_servers("group_id")
        self.assertEqual(servers[0], True)
        self.assertEqual(servers[1], "")
        retrieved = servers[2]
        expected = \
            [{"server_uuid" : str(master.uuid), "address" : master.address,
             "status" : _server.MySQLServer.SECONDARY,
             "mode" : _server.MySQLServer.READ_ONLY,
             "weight" : _server.MySQLServer.DEFAULT_WEIGHT},
             {"server_uuid" : str(slave_1.uuid), "address" : slave_1.address,
             "status" : _server.MySQLServer.PRIMARY,
             "mode" : _server.MySQLServer.READ_WRITE,
             "weight" : _server.MySQLServer.DEFAULT_WEIGHT},
             {"server_uuid" : str(slave_2.uuid), "address" : slave_2.address,
             "status" : _server.MySQLServer.SECONDARY,
             "mode" : _server.MySQLServer.READ_ONLY,
             "weight" : _server.MySQLServer.DEFAULT_WEIGHT}]
        retrieved.sort()
        expected.sort()
        self.assertEqual(retrieved, expected)

        # Do the promote.
        # Note that it is using HOST:PORT instead of UUID.
        status = self.proxy.group.promote(
            "group_id", master.address
            )
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_change_to_candidate).")

        # Look up servers.
        servers = self.proxy.group.lookup_servers("group_id")
        self.assertEqual(servers[0], True)
        self.assertEqual(servers[1], "")
        retrieved = servers[2]
        expected = \
            [{"server_uuid" : str(master.uuid), "address" : master.address,
             "status" : _server.MySQLServer.PRIMARY,
             "mode" : _server.MySQLServer.READ_WRITE,
             "weight" : _server.MySQLServer.DEFAULT_WEIGHT},
             {"server_uuid" : str(slave_1.uuid), "address" : slave_1.address,
             "status" : _server.MySQLServer.SECONDARY,
             "mode" : _server.MySQLServer.READ_ONLY,
             "weight" : _server.MySQLServer.DEFAULT_WEIGHT},
             {"server_uuid" : str(slave_2.uuid), "address" : slave_2.address,
             "status" : _server.MySQLServer.SECONDARY,
             "mode" : _server.MySQLServer.READ_ONLY,
             "weight" : _server.MySQLServer.DEFAULT_WEIGHT}]
        retrieved.sort()
        expected.sort()
        self.assertEqual(retrieved, expected)
Ejemplo n.º 19
0
def cleanup_environment():
    """Clean up the existing environment
    """
    #Clean up information on instances.
    MySQLInstances().__instances = {}

    #Clean up information in the state store.
    uuid_server = _server.MySQLServer.discover_uuid(
        MySQLInstances().state_store_address,
        MySQLInstances().user,
        MySQLInstances().passwd)
    server = _server.MySQLServer(uuid.UUID(uuid_server),
                                 MySQLInstances().state_store_address,
                                 MySQLInstances().user,
                                 MySQLInstances().passwd)
    server.connect()

    server.set_foreign_key_checks(False)
    tables = server.exec_stmt(
        "SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES WHERE "
        "TABLE_SCHEMA = '%s' and TABLE_TYPE = 'BASE TABLE'" %
        (MySQLInstances().store_db, ))
    for table in tables:
        server.exec_stmt("TRUNCATE %s.%s" % (
            MySQLInstances().store_db,
            table[0],
        ))
    server.set_foreign_key_checks(True)

    #Remove all the databases from the running MySQL instances
    #other than the standard ones
    server_count = MySQLInstances().get_number_addresses()

    for i in range(0, server_count):
        uuid_server = _server.MySQLServer.discover_uuid(
            MySQLInstances().get_address(i),
            MySQLInstances().user,
            MySQLInstances().passwd)
        server = _server.MySQLServer(uuid.UUID(uuid_server),
                                     MySQLInstances().get_address(i),
                                     MySQLInstances().user,
                                     MySQLInstances().passwd)
        server.connect()
        server.read_only = False
        _replication.stop_slave(server, wait=True)

        server.set_foreign_key_checks(False)
        databases = server.exec_stmt("SHOW DATABASES")
        for database in databases:
            if database[0] not in _server.MySQLServer.NO_USER_DATABASES:
                server.exec_stmt("DROP DATABASE IF EXISTS %s" %
                                 (database[0], ))
        server.set_foreign_key_checks(True)

        _replication.reset_master(server)
        _replication.reset_slave(server, clean=True)

        server.disconnect()

    for __file in glob.glob(os.path.join(os.getcwd(), "*.sql")):
        os.remove(__file)
Ejemplo n.º 20
0
    def test_check_no_healthy_slave(self):
        """Test promoting when there is no healthy slave.
        """
        # Configure replication.
        instances = tests.utils.MySQLInstances()
        user = instances.user
        passwd = instances.passwd
        instances.configure_instances({0 : [{1 : []}, {2 : []}]}, user, passwd)
        master = instances.get_instance(0)
        slave_1 = instances.get_instance(1)
        slave_2 = instances.get_instance(2)

        self.proxy.group.create("group_id", "")
        self.proxy.group.add("group_id", master.address)
        self.proxy.group.add("group_id", slave_1.address)
        self.proxy.group.add("group_id", slave_2.address)

        # Promote a master.
        status = self.proxy.group.promote("group_id", str(master.uuid))
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_change_to_candidate).")

        # Check replication.
        status = self.proxy.group.health("group_id")
        self.assertEqual(status[2][str(slave_1.uuid)]["threads"], {})
        self.assertEqual(status[2][str(slave_1.uuid)]["status"],
                         _server.MySQLServer.SECONDARY)
        self.assertEqual(status[2][str(slave_2.uuid)]["threads"], {})
        self.assertEqual(status[2][str(slave_2.uuid)]["status"],
                         _server.MySQLServer.SECONDARY)
        self.assertEqual(status[2][str(master.uuid)]["status"],
                         _server.MySQLServer.PRIMARY)

        # Inject some events that make slaves break.
        slave_1.set_session_binlog(False)
        slave_1.exec_stmt("CREATE DATABASE IF NOT EXISTS test")
        slave_1.exec_stmt("USE test")
        slave_1.exec_stmt("DROP TABLE IF EXISTS test")
        slave_1.exec_stmt("CREATE TABLE test (id INTEGER)")
        slave_1.set_session_binlog(True)

        slave_2.set_session_binlog(False)
        slave_2.exec_stmt("CREATE DATABASE IF NOT EXISTS test")
        slave_2.exec_stmt("USE test")
        slave_2.exec_stmt("DROP TABLE IF EXISTS test")
        slave_2.exec_stmt("CREATE TABLE test (id INTEGER)")
        slave_2.set_session_binlog(True)

        master.exec_stmt("CREATE DATABASE IF NOT EXISTS test")
        master.exec_stmt("USE test")
        master.exec_stmt("SET sql_log_bin=0")
        master.exec_stmt("DROP TABLE IF EXISTS test")
        master.exec_stmt("SET sql_log_bin=1")
        master.exec_stmt("CREATE TABLE test (id INTEGER)")

        # Synchronize replicas.
        self.assertRaises(_errors.DatabaseError, _repl.sync_slave_with_master,
                          slave_1, master, timeout=0)
        self.assertRaises(_errors.DatabaseError, _repl.sync_slave_with_master,
                          slave_2, master, timeout=0)

        # Check replication.
        status = self.proxy.group.health("group_id")
        self.assertEqual(status[2][str(slave_1.uuid)]["threads"],
            {"sql_running": False, "sql_error": "Error 'Table 'test' "
            "already exists' on query. Default database: 'test'. Query: "
            "'CREATE TABLE test (id INTEGER)'"}
            )
        self.assertEqual(status[2][str(slave_1.uuid)]["status"],
                         _server.MySQLServer.SECONDARY)
        self.assertEqual(status[2][str(slave_2.uuid)]["threads"],
            {"sql_running": False, "sql_error": "Error 'Table 'test' "
            "already exists' on query. Default database: 'test'. Query: "
            "'CREATE TABLE test (id INTEGER)'"}
            )
        self.assertEqual(status[2][str(slave_2.uuid)]["status"],
                         _server.MySQLServer.SECONDARY)
        self.assertEqual(status[2][str(master.uuid)]["status"],
                         _server.MySQLServer.PRIMARY)

        # Try to choose a new master through switch over.
        status = self.proxy.group.promote("group_id")
        self.assertStatus(status, _executor.Job.ERROR)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Tried to execute action (_find_candidate_switch).")

        # Try to reset the slave and restart slave.
        _repl.stop_slave(slave_1, wait=True)
        _repl.reset_slave(slave_1, clean=False)

        try:
            _repl.start_slave(slave_1, wait=True)
        except _errors.DatabaseError as error:
            self.assertEqual(
                str(error), "Error 'Table 'test' already exists' "
                "on query. Default database: 'test'. Query: 'CREATE "
                "TABLE test (id INTEGER)'"
                )

        # Synchronize replica.
        self.assertRaises(_errors.DatabaseError, _repl.sync_slave_with_master,
                          slave_1, master, timeout=0)

        # Check replication.
        status = self.proxy.group.health("group_id")
        self.assertTrue(status[2][str(slave_1.uuid)]["threads"] ==
            {"sql_running": False, "sql_error": "Error 'Table 'test' "
            "already exists' on query. Default database: 'test'. Query: "
            "'CREATE TABLE test (id INTEGER)'"}
            )
        self.assertEqual(status[2][str(slave_1.uuid)]["status"],
                         _server.MySQLServer.SECONDARY)
        self.assertEqual(status[2][str(slave_2.uuid)]["threads"],
            {"sql_running": False, "sql_error": "Error 'Table 'test' "
            "already exists' on query. Default database: 'test'. Query: "
            "'CREATE TABLE test (id INTEGER)'"}
            )
        self.assertEqual(status[2][str(slave_2.uuid)]["status"],
                         _server.MySQLServer.SECONDARY)
        self.assertEqual(status[2][str(master.uuid)]["status"],
                         _server.MySQLServer.PRIMARY)

        # Try to drop the table on the slave.
        _repl.stop_slave(slave_1, wait=True)
        _repl.reset_slave(slave_1, clean=False)
        slave_1.set_session_binlog(False)
        slave_1.exec_stmt("DROP TABLE IF EXISTS test")
        slave_1.set_session_binlog(True)
        _repl.start_slave(slave_1, wait=True)
        _repl.stop_slave(slave_2, wait=True)
        _repl.reset_slave(slave_2, clean=False)
        slave_2.set_session_binlog(False)
        slave_2.exec_stmt("DROP TABLE IF EXISTS test")
        slave_2.set_session_binlog(True)
        _repl.start_slave(slave_2, wait=True)

        # Synchronize replicas.
        _repl.sync_slave_with_master(slave_1, master, timeout=0)
        _repl.sync_slave_with_master(slave_2, master, timeout=0)

        # Check replication.
        status = self.proxy.group.health("group_id")
        self.assertEqual(status[2][str(slave_1.uuid)]["threads"], {})
        self.assertEqual(status[2][str(slave_1.uuid)]["status"],
                         _server.MySQLServer.SECONDARY)
        self.assertEqual(status[2][str(slave_2.uuid)]["threads"], {})
        self.assertEqual(status[2][str(slave_2.uuid)]["status"],
                         _server.MySQLServer.SECONDARY)
        self.assertEqual(status[2][str(master.uuid)]["status"],
                         _server.MySQLServer.PRIMARY)