Пример #1
0
def _setup_replication(shard_id, source_group_id, destn_group_id, split_value,
                                        prune_limit, cmd):
    """Setup replication between the source and the destination groups and
    ensure that they are in sync.

    :param shard_id: The shard ID of the shard that needs to be moved.
    :param source_group_id: The group_id of the source shard.
    :param destn_group_id: The ID of the group to which the shard needs to
                           be moved.
    :param split_value: Indicates the value at which the range for the
                        particular shard will be split. Will be set only
                        for shard split operations.
    :param prune_limit: The number of DELETEs that should be
                        done in one batch.
    :param cmd: Indicates the type of re-sharding operation
    """
    source_group = Group.fetch(source_group_id)
    if source_group is None:
        raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND %
                                    (source_group_id, ))

    destination_group = Group.fetch(destn_group_id)
    if destination_group is None:
        raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND %
                                    (destn_group_id, ))

    master = MySQLServer.fetch(source_group.master)
    if master is None:
        raise _errors.ShardingError(
            _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND)
    master.connect()

    slave = MySQLServer.fetch(destination_group.master)
    if slave is None:
        raise _errors.ShardingError(
            _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND)
    slave.connect()

    #Stop and reset any slave that  might be running on the slave server.
    _utils.set_offline_mode(slave, True) ### TODO: if forced offline_mode
    _replication.stop_slave(slave, wait=True)
    _replication.reset_slave(slave, clean=True)

    #Change the master to the shard group master.
    _replication.switch_master(slave, master, master.repl_user, master.repl_pass)

    #Start the slave so that syncing of the data begins
    _replication.start_slave(slave, wait=True)
    _utils.set_offline_mode(slave, False) ### TODO: if forced offline_mode

    #Setup sync between the source and the destination groups.
    _events.trigger_within_procedure(
                                     SETUP_SYNC,
                                     shard_id,
                                     source_group_id,
                                     destn_group_id,
                                     split_value,
                                     prune_limit,
                                     cmd
                                     )
Пример #2
0
def start_group_slaves(master_group_id):
    """Start the slave groups for the given master group. The
    method will be used in the events that requires, a group, that
    has registered slaves to start them. An example would be
    enable shard, enable shard requires that a group start all
    the slaves that are registered with it.

    :param master_group_id: The master group ID. The ID belongs to the master
                            whose slaves need to be started.
    """
    # Fetch the master group corresponding to the master group
    # ID.
    master_group = Group.fetch(master_group_id)
    if master_group is None:
        raise _errors.GroupError(GROUP_REPLICATION_GROUP_NOT_FOUND_ERROR % \
                                                (master_group_id, ))

    # Setup replication with masters of the groups registered as master
    # groups. master_group.slave_group_ids contains the list of the group
    # IDs that are slaves to this master. Iterate through this list and start
    # replication with the registered slaves.
    for slave_group_id in master_group.slave_group_ids:
        slave_group = Group.fetch(slave_group_id)
        # Setup replication with the slave group.
        try:
            setup_group_replication(master_group_id, slave_group.group_id)
        except (_errors.GroupError, _errors.DatabaseError) as error:
            _LOGGER.warning(
                "Error while configuring group replication between "
                "(%s) and (%s): (%s).", master_group_id, slave_group.group_id,
                error)
def start_group_slaves(master_group_id):
    """Start the slave groups for the given master group. The
    method will be used in the events that requires, a group, that
    has registered slaves to start them. An example would be
    enable shard, enable shard requires that a group start all
    the slaves that are registered with it.

    :param master_group_id: The master group ID. The ID belongs to the master
                            whose slaves need to be started.
    """
    # Fetch the master group corresponding to the master group
    # ID.
    master_group = Group.fetch(master_group_id)
    if master_group is None:
        raise _errors.GroupError(GROUP_REPLICATION_GROUP_NOT_FOUND_ERROR % \
                                                (master_group_id, ))

    # Setup replication with masters of the groups registered as master
    # groups. master_group.slave_group_ids contains the list of the group
    # IDs that are slaves to this master. Iterate through this list and start
    # replication with the registered slaves.
    for slave_group_id in master_group.slave_group_ids:
        slave_group = Group.fetch(slave_group_id)
        # Setup replication with the slave group.
        try:
            setup_group_replication(master_group_id, slave_group.group_id)
        except (_errors.GroupError, _errors.DatabaseError) as error:
            _LOGGER.warning(
                "Error while configuring group replication between "
                "(%s) and (%s): (%s).", master_group_id, slave_group.group_id,
                error
            )
Пример #4
0
    def test_managment(self):
        """Test adding server to a group.
        """
        options_1 = {
            "uuid": _uuid.UUID("bb75b12b-98d1-414c-96af-9e9d4b179678"),
            "address": "server_1.mysql.com:3060",
        }
        server_1 = MySQLServer(**options_1)
        MySQLServer.add(server_1)
        options_2 = {
            "uuid": _uuid.UUID("aa75a12a-98d1-414c-96af-9e9d4b179678"),
            "address": "server_2.mysql.com:3060",
        }
        server_2 = MySQLServer(**options_2)
        MySQLServer.add(server_2)
        group_1 = Group("oracle.com", "First description.")
        Group.add(group_1)

        # Add servers to a group
        group_1.add_server(server_1)
        group_1.add_server(server_2)
        self.assertRaises(AssertionError, group_1.add_server, server_1)
        self.assertEqual(len(group_1.servers()), 2)

        # Remove servers to a group
        group_1.remove_server(server_1)
        group_1.remove_server(server_2)
        self.assertRaises(AssertionError, group_1.remove_server, server_1)
        self.assertEqual(len(group_1.servers()), 0)
Пример #5
0
def _lookup(lookup_arg, key,  hint):
    """Given a table name and a key return the servers of the Group where the
    shard of this table can be found

    :param lookup_arg: table name for "LOCAL" lookups
                Shard Mapping ID for "GLOBAL" lookups.
    :param key: The key value that needs to be looked up
    :param hint: A hint indicates if the query is LOCAL or GLOBAL

    :return: The servers of the Group that contains the range in which the
            key belongs.
    """
    VALID_HINTS = ('LOCAL',  'GLOBAL')
    hint = hint.upper()
    if hint not in VALID_HINTS:
        raise _errors.ShardingError(INVALID_SHARDING_HINT)

    group = None

    #Perform the lookup for the group contaning the lookup data.
    if hint == "GLOBAL":
        #Fetch the shard mapping object. In the case of GLOBAL lookups
        #the shard mapping ID is passed directly. In the case of "LOCAL"
        #lookups it is the table name that is passed.
        shard_mapping = ShardMapping.fetch_by_id(lookup_arg)
        if shard_mapping is None:
            raise _errors.ShardingError(
                SHARD_MAPPING_NOT_FOUND % (lookup_arg,  )
                )
        #GLOBAL lookups. There can be only one global group, hence using
        #shard_mapping[0] is safe.
        group = Group.fetch(shard_mapping[0].global_group)
    else:
        shard_mapping = ShardMapping.fetch(lookup_arg)
        if shard_mapping is None:
            raise _errors.ShardingError(TABLE_NAME_NOT_FOUND % (lookup_arg,  ))
        sharding_specification =\
            SHARDING_SPECIFICATION_HANDLER[shard_mapping.type_name].\
            lookup(key, shard_mapping.shard_mapping_id, shard_mapping.type_name)
        if sharding_specification is None:
            raise _errors.ShardingError(INVALID_SHARDING_KEY % (key,  ))
        shard = Shards.fetch(str(sharding_specification.shard_id))
        if shard.state == "DISABLED":
            raise _errors.ShardingError(SHARD_NOT_ENABLED)
        #group cannot be None since there is a foreign key on the group_id.
        #An exception will be thrown nevertheless.
        group = Group.fetch(shard.group_id)
        if group is None:
            raise _errors.ShardingError(SHARD_LOCATION_NOT_FOUND)

    ret = []
    #An empty list will be returned if the registered group has not
    #servers.
    for server in group.servers():
        ret.append([str(server.uuid), server.address,
                   group.master == server.uuid])
    return ret
Пример #6
0
def _setup_sync(shard_id, source_group_id, destn_group_id, split_value,
                                        prune_limit, cmd):

    """sync the source and the destination groups.

    :param shard_id: The shard ID of the shard that needs to be moved.
    :param source_group_id: The group_id of the source shard.
    :param destn_group_id: The ID of the group to which the shard needs to
                           be moved.
    :param split_value: Indicates the value at which the range for the
                        particular shard will be split. Will be set only
                        for shard split operations.
    :param prune_limit: The number of DELETEs that should be
                        done in one batch.
    :param cmd: Indicates the type of re-sharding operation
    """
    source_group = Group.fetch(source_group_id)
    if source_group is None:
        raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND %
                                    (source_group_id, ))

    destination_group = Group.fetch(destn_group_id)
    if destination_group is None:
        raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND %
                                    (destn_group_id, ))

    master = MySQLServer.fetch(source_group.master)
    if master is None:
        raise _errors.ShardingError(
            _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND)
    master.connect()

    slave = MySQLServer.fetch(destination_group.master)
    if slave is None:
        raise _errors.ShardingError(
            _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND)
    slave.connect()

    #Synchronize until the slave catches up with the master.
    _replication.synchronize_with_read_only(slave, master)

    #Reset replication once the syncing is done.
    _replication.stop_slave(slave, wait=True)
    _replication.reset_slave(slave, clean=True)

    #Trigger changing the mappings for the shard that was copied
    _events.trigger_within_procedure(
                                     SETUP_RESHARDING_SWITCH,
                                     shard_id,
                                     source_group_id,
                                     destn_group_id,
                                     split_value,
                                     prune_limit,
                                     cmd
                                     )
Пример #7
0
    def test_managment(self):
        """Test adding server to a group.
        """
        options_1 = {
            "uuid" :  _uuid.UUID("{bb75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : "server_1.mysql.com:3060",
        }
        server_1 = MySQLServer(**options_1)
        MySQLServer.add(server_1)
        options_2 = {
            "uuid" :  _uuid.UUID("{aa75a12a-98d1-414c-96af-9e9d4b179678}"),
            "address"  : "server_2.mysql.com:3060",
        }
        server_2 = MySQLServer(**options_2)
        MySQLServer.add(server_2)
        group_1 = Group("oracle.com", "First description.")
        Group.add(group_1)

        # Add servers to a group
        group_1.add_server(server_1)
        group_1.add_server(server_2)
        self.assertRaises(AssertionError, group_1.add_server, server_1)
        self.assertEqual(len(group_1.servers()), 2)

        # Remove servers to a group
        group_1.remove_server(server_1)
        group_1.remove_server(server_2)
        self.assertRaises(AssertionError, group_1.remove_server, server_1)
        self.assertEqual(len(group_1.servers()), 0)
Пример #8
0
def _setup_sync(shard_id, source_group_id, destn_group_id, split_value,
                                        prune_limit, cmd):

    """sync the source and the destination groups.

    :param shard_id: The shard ID of the shard that needs to be moved.
    :param source_group_id: The group_id of the source shard.
    :param destn_group_id: The ID of the group to which the shard needs to
                           be moved.
    :param split_value: Indicates the value at which the range for the
                        particular shard will be split. Will be set only
                        for shard split operations.
    :param prune_limit: The number of DELETEs that should be
                        done in one batch.
    :param cmd: Indicates the type of re-sharding operation
    """
    source_group = Group.fetch(source_group_id)
    if source_group is None:
        raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND %
                                    (source_group_id, ))

    destination_group = Group.fetch(destn_group_id)
    if destination_group is None:
        raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND %
                                    (destn_group_id, ))

    master = MySQLServer.fetch(source_group.master)
    if master is None:
        raise _errors.ShardingError(
            _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND)
    master.connect()

    slave = MySQLServer.fetch(destination_group.master)
    if slave is None:
        raise _errors.ShardingError(
            _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND)
    slave.connect()

    #Synchronize until the slave catches up with the master.
    _replication.synchronize_with_read_only(slave, master)

    #Reset replication once the syncing is done.
    _replication.stop_slave(slave, wait=True)
    _replication.reset_slave(slave, clean=True)

    #Trigger changing the mappings for the shard that was copied
    _events.trigger_within_procedure(
                                     SETUP_RESHARDING_SWITCH,
                                     shard_id,
                                     source_group_id,
                                     destn_group_id,
                                     split_value,
                                     prune_limit,
                                     cmd
                                     )
Пример #9
0
def stop_group_slave(group_master_id, group_slave_id, clear_ref):
    """Stop the slave on the slave group. This utility method is the
    completement of the setup_group_replication method and is
    used to stop the replication on the slave group. Given a master group ID
    and the slave group ID the method stops the slave on the slave
    group and updates the references on both the master and the
    slave group.

    :param group_master_id: The id of the master group.
    :param group_slave_id: The id of the slave group.
    :param clear_ref: The parameter indicates if the stop_group_slave
                                needs to clear the references to the group's
                                slaves. For example when you do a disable
                                shard the shard group still retains the
                                references to its slaves, since when enabled
                                it needs to enable the replication.
    """
    master_group = Group.fetch(group_master_id)
    slave_group = Group.fetch(group_slave_id)

    if master_group is None:
        raise _errors.GroupError \
        (GROUP_REPLICATION_GROUP_NOT_FOUND_ERROR % (group_master_id, ))

    if slave_group is None:
        raise _errors.GroupError \
        (GROUP_REPLICATION_GROUP_NOT_FOUND_ERROR % (group_slave_id, ))

    slave_group_master = MySQLServer.fetch(slave_group.master)
    if slave_group_master is None:
        raise _errors.GroupError \
        (GROUP_REPLICATION_GROUP_MASTER_NOT_FOUND_ERROR %
          (slave_group.master, ))

    if not server_running(slave_group_master):
        #The server is already down. We cannot connect to it to stop
        #replication.
        return
    try:
        slave_group_master.connect()
    except _errors.DatabaseError:
        #Server is not accessible, unable to connect to the server.
        return

    #Stop replication on the master of the group and clear the references,
    #if clear_ref has been set.
    _replication.stop_slave(slave_group_master, wait=True)
    _replication.reset_slave(slave_group_master, clean=True)
    if clear_ref:
        slave_group.remove_master_group_id()
        master_group.remove_slave_group_id(group_slave_id)
Пример #10
0
def stop_group_slave(group_master_id,  group_slave_id,  clear_ref):
    """Stop the slave on the slave group. This utility method is the
    completement of the setup_group_replication method and is
    used to stop the replication on the slave group. Given a master group ID
    and the slave group ID the method stops the slave on the slave
    group and updates the references on both the master and the
    slave group.

    :param group_master_id: The id of the master group.
    :param group_slave_id: The id of the slave group.
    :param clear_ref: The parameter indicates if the stop_group_slave
                                needs to clear the references to the group's
                                slaves. For example when you do a disable
                                shard the shard group still retains the
                                references to its slaves, since when enabled
                                it needs to enable the replication.
    """
    master_group = Group.fetch(group_master_id)
    slave_group = Group.fetch(group_slave_id)

    if master_group is None:
        raise _errors.GroupError \
        (GROUP_REPLICATION_GROUP_NOT_FOUND_ERROR % (group_master_id, ))

    if slave_group is None:
        raise _errors.GroupError \
        (GROUP_REPLICATION_GROUP_NOT_FOUND_ERROR % (group_slave_id, ))

    slave_group_master = MySQLServer.fetch(slave_group.master)
    if slave_group_master is None:
        raise _errors.GroupError \
        (GROUP_REPLICATION_GROUP_MASTER_NOT_FOUND_ERROR %
          (slave_group.master, ))

    if not server_running(slave_group_master):
        #The server is already down. We cannot connect to it to stop
        #replication.
        return
    try:
        slave_group_master.connect()
    except _errors.DatabaseError:
        #Server is not accessible, unable to connect to the server.
        return

    #Stop replication on the master of the group and clear the references,
    #if clear_ref has been set.
    _replication.stop_slave(slave_group_master, wait=True)
    _replication.reset_slave(slave_group_master,  clean=True)
    if clear_ref:
        slave_group.remove_master_group_id()
        master_group.remove_slave_group_id(group_slave_id)
Пример #11
0
def _restore_shard_backup(shard_id, source_group_id, destn_group_id,
                          backup_image, split_value, prune_limit, cmd):
    """Restore the backup on the destination Group.

    :param shard_id: The shard ID of the shard that needs to be moved.
    :param source_group_id: The group_id of the source shard.
    :param destn_group_id: The ID of the group to which the shard needs to
                           be moved.
    :param backup_image: The destination file that contains the backup
                         of the source shard.
    :param split_value: Indicates the value at which the range for the
                        particular shard will be split. Will be set only
                        for shard split operations.
    :param prune_limit: The number of DELETEs that should be
                        done in one batch.
    :param cmd: Indicates the type of re-sharding operation
    """
    restore_user = _services_utils.read_config_value(
                            _config.global_config,
                            'servers',
                            'restore_user'
                        )
    restore_passwd = _services_utils.read_config_value(
                            _config.global_config,
                            'servers',
                            'restore_password'
                        )
    mysqlclient_binary = _services_utils.read_config_value(
                            _config.global_config,
                            'sharding',
                            'mysqlclient_program'
                        )

    destn_group = Group.fetch(destn_group_id)
    if destn_group is None:
        raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND %
                                    (destn_group_id, ))

    #Build a backup image that will be used for restoring
    bk_img = _backup.BackupImage(backup_image)

    for destn_group_server in destn_group.servers():
        destn_group_server.connect()
        _backup.MySQLDump.restore_fabric_server(
            destn_group_server,
            restore_user, restore_passwd,
            bk_img,
            mysqlclient_binary
        )

    #Setup sync between the source and the destination groups.
    _events.trigger_within_procedure(
                                     SETUP_REPLICATION,
                                     shard_id,
                                     source_group_id,
                                     destn_group_id,
                                     split_value,
                                     prune_limit,
                                     cmd
                                     )
Пример #12
0
def _backup_source_shard(shard_id, source_group_id, destn_group_id,
                         mysqldump_binary, mysqlclient_binary, split_value,
                         config_file, prune_limit, cmd, update_only):
    """Backup the source shard.

    :param shard_id: The shard ID of the shard that needs to be moved.
    :param source_group_id: The group_id of the source shard.
    :param destn_group_id: The ID of the group to which the shard needs to
                           be moved.
    :param mysqldump_binary: The fully qualified mysqldump binary.
    :param mysqlclient_binary: The fully qualified mysql client binary.
    :param split_value: Indicates the value at which the range for the
                        particular shard will be split. Will be set only
                        for shard split operations.
    :param config_file: The complete path to the fabric configuration file.
    :param prune_limit: The number of DELETEs that should be
                        done in one batch.
    :param cmd: Indicates the type of re-sharding operation (move, split)
    :update_only: Only update the state store and skip provisioning.
    """
    source_group = Group.fetch(source_group_id)
    move_source_server = _services_utils.fetch_backup_server(source_group)

    #Do the backup of the group hosting the source shard.
    backup_image = _backup.MySQLDump.backup(move_source_server, config_file,
                                            mysqldump_binary)

    #Change the master for the server that is master of the group which hosts
    #the destination shard.
    _events.trigger_within_procedure(RESTORE_SHARD_BACKUP, shard_id,
                                     source_group_id, destn_group_id,
                                     mysqlclient_binary, backup_image.path,
                                     split_value, config_file, prune_limit,
                                     cmd)
    def drop_shard_range_trigger(group_id, sharding_type, table_name,
                                    column_name):
        """Drop a trigger on the shard table.

        :param group_id: The ID of the group on which the trigger definition
                         is applied. The trigger is created on the master of
                         this group.
        :param sharding_type: The datatype supported by the shards. Used to
                              name the trigger.
        :param table_name: The name of the table. This is used to name the
                           trigger being created.
        :param column_name: The name of the column in the table being sharded.
                            This is used to create the name of the trigger.
        """
        global_group = Group.fetch(group_id)
        master_server = MySQLServer.fetch(global_group.master)
        master_server.connect()

        db, table = table_name.split(".")

        #Drop the INSERT trigger on the sharded table.
        trigger_name = db + "." + _TRIGGER_PREFIX_INSERT+table
        drop_insert_trigger = _DROP_TRIGGER_DEFN.format(
            trigger_name=trigger_name
        )
        master_server.exec_stmt(drop_insert_trigger)

        #Drop the UPDATE trigger on the sharded table.
        trigger_name = db + "." + _TRIGGER_PREFIX_UPDATE + table
        drop_update_trigger = _DROP_TRIGGER_DEFN.format(
            trigger_name=trigger_name
        )
        master_server.exec_stmt(drop_update_trigger)
Пример #14
0
 def register_groups():
     """Upon startup initializes a failure detector for each group.
     """
     from mysql.fabric.server import Group
     _LOGGER.info("Starting failure detector.")
     for row in Group.groups_by_status(Group.ACTIVE):
         FailureDetector.register_group(row[0])
Пример #15
0
    def _run(self):
        """Function that verifies servers' availabilities.
        """
        ignored_status = [MySQLServer.FAULTY]
        quarantine = {}
        interval = FailureDetector._DETECTION_INTERVAL
        detections = FailureDetector._DETECTIONS
        detection_timeout = FailureDetector._DETECTION_TIMEOUT

        _persistence.init_thread()

        while self.__check:
            try:
                unreachable = set()
                group = Group.fetch(self.__group_id)
                if group is not None:
                    for server in group.servers():
                        if server.status in ignored_status or \
                            MySQLServer.is_alive(server, detection_timeout):
                            if server.status == MySQLServer.FAULTY:
                                self.__connection_manager.purge_connections(
                                    server
                                )
                            continue

                        unreachable.add(server.uuid)

                        _LOGGER.warning(
                            "Server (%s) in group (%s) is unreachable.",
                            server.uuid, self.__group_id
                        )

                        unstable = False
                        failed_attempts = 0
                        if server.uuid not in quarantine:
                            quarantine[server.uuid] = failed_attempts = 1
                        else:
                            failed_attempts = quarantine[server.uuid] + 1
                            quarantine[server.uuid] = failed_attempts
                        if failed_attempts >= detections:
                            unstable = True

                        can_set_faulty = group.can_set_server_faulty(
                            server, get_time()
                        )
                        if unstable and can_set_faulty:
                            self._spawn_report_failure(server)
                            
                for uuid in quarantine.keys():
                    if uuid not in unreachable:
                        del quarantine[uuid]

            except (_errors.ExecutorError, _errors.DatabaseError):
                pass
            except Exception as error:
                _LOGGER.exception(error)

            time.sleep(interval / detections)

        _persistence.deinit_thread()
Пример #16
0
def _restore_shard_backup(shard_id, source_group_id, destn_group_id,
                          backup_image, split_value, prune_limit, cmd):
    """Restore the backup on the destination Group.

    :param shard_id: The shard ID of the shard that needs to be moved.
    :param source_group_id: The group_id of the source shard.
    :param destn_group_id: The ID of the group to which the shard needs to
                           be moved.
    :param backup_image: The destination file that contains the backup
                         of the source shard.
    :param split_value: Indicates the value at which the range for the
                        particular shard will be split. Will be set only
                        for shard split operations.
    :param prune_limit: The number of DELETEs that should be
                        done in one batch.
    :param cmd: Indicates the type of re-sharding operation
    """
    restore_user = _services_utils.read_config_value(
                            _config.global_config,
                            'servers',
                            'restore_user'
                        )
    restore_passwd = _services_utils.read_config_value(
                            _config.global_config,
                            'servers',
                            'restore_password'
                        )
    mysqlclient_binary = _services_utils.read_config_value(
                            _config.global_config,
                            'sharding',
                            'mysqlclient_program'
                        )

    destn_group = Group.fetch(destn_group_id)
    if destn_group is None:
        raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND %
                                    (destn_group_id, ))

    #Build a backup image that will be used for restoring
    bk_img = _backup.BackupImage(backup_image)

    for destn_group_server in destn_group.servers():
        destn_group_server.connect()
        _backup.MySQLDump.restore_fabric_server(
            destn_group_server,
            restore_user, restore_passwd,
            bk_img,
            mysqlclient_binary
        )

    #Setup sync between the source and the destination groups.
    _events.trigger_within_procedure(
                                     SETUP_REPLICATION,
                                     shard_id,
                                     source_group_id,
                                     destn_group_id,
                                     split_value,
                                     prune_limit,
                                     cmd
                                     )
Пример #17
0
def stop_group_slaves(master_group_id):
    """Stop the group slaves for the given master group. This will be used
    for use cases that required all the slaves replicating from this group to
    be stopped. An example use case would be disabling a shard.

    :param master_group_id: The master group ID.
    """
    master_group = Group.fetch(master_group_id)
    if master_group is None:
        raise _errors.GroupError \
        (GROUP_REPLICATION_GROUP_NOT_FOUND_ERROR % \
        (master_group_id, ))

    # Stop the replication on all of the registered slaves for the group.
    for slave_group_id in master_group.slave_group_ids:

        slave_group = Group.fetch(slave_group_id)
        # Fetch the Slave Group and the master of the Slave Group
        slave_group_master = MySQLServer.fetch(slave_group.master)
        if slave_group_master is None:
            _LOGGER.warning(
                GROUP_REPLICATION_GROUP_MASTER_NOT_FOUND_ERROR % \
                (slave_group.master, )
            )
            continue

        if not server_running(slave_group_master):
            # The server is already down. we cannot connect to it to stop
            # replication.
            continue

        try:
            slave_group_master.connect()
            _replication.stop_slave(slave_group_master, wait=True)
            # Reset the slave to remove the reference of the master so
            # that when the server is used as a slave next it does not
            # complaint about having a different master.
            _replication.reset_slave(slave_group_master, clean=True)
        except _errors.DatabaseError as error:
            # Server is not accessible, unable to connect to the server.
            _LOGGER.warning(
                "Error while unconfiguring group replication between "
                "(%s) and (%s): (%s).", master_group_id, slave_group.group_id,
                error
            )
            continue
Пример #18
0
def stop_group_slaves(master_group_id):
    """Stop the group slaves for the given master group. This will be used
    for use cases that required all the slaves replicating from this group to
    be stopped. An example use case would be disabling a shard.

    :param master_group_id: The master group ID.
    """
    master_group = Group.fetch(master_group_id)
    if master_group is None:
        raise _errors.GroupError \
        (GROUP_REPLICATION_GROUP_NOT_FOUND_ERROR % \
        (master_group_id, ))

    # Stop the replication on all of the registered slaves for the group.
    for slave_group_id in master_group.slave_group_ids:

        slave_group = Group.fetch(slave_group_id)
        # Fetch the Slave Group and the master of the Slave Group
        slave_group_master = MySQLServer.fetch(slave_group.master)
        if slave_group_master is None:
            _LOGGER.warning(GROUP_REPLICATION_GROUP_MASTER_NOT_FOUND_ERROR,
                            slave_group.master)
            continue

        if not server_running(slave_group_master):
            # The server is already down. we cannot connect to it to stop
            # replication.
            continue

        try:
            slave_group_master.connect()
            _replication.stop_slave(slave_group_master, wait=True)
            # Reset the slave to remove the reference of the master so
            # that when the server is used as a slave next it does not
            # complaint about having a different master.
            _replication.reset_slave(slave_group_master, clean=True)
        except _errors.DatabaseError as error:
            # Server is not accessible, unable to connect to the server.
            _LOGGER.warning(
                "Error while unconfiguring group replication between "
                "(%s) and (%s): (%s).", master_group_id, slave_group.group_id,
                error)
            continue
Пример #19
0
    def test_clone_from_group(self):
        """Verify the clone operation from a group.
        """
        self.__group_1 = Group("GROUPID1", "First description.")
        Group.add(self.__group_1)
        self.__group_1.add_server(self.__server_1)
        self.__group_1.add_server(self.__server_2)
        self.__group_1.master = self.__server_2.uuid

        try:
            status = self.proxy.server.clone("GROUPID1", self.__server_2.address,
                                        str(self.__server_1.uuid))
            raise Exception("Cloning to a server inside Fabric should "
                "throw a fault")
        except:
            pass

        try:
            status = self.proxy.server.clone("GROUPID1", self.__server_2.address,
                                uuid_server3)
            raise Exception("Cloning from a server outside the "
                "source group should throw a fault")
        except:
            pass

        status = self.proxy.server.clone("GROUPID1", self.__server_3.address,
                                         None)
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_restore_server).")
        rows = self.__server_3.exec_stmt(
                                    "SELECT NAME FROM db1.t1",
                                    {"fetch" : True})
        self.assertEqual(len(rows), 2)
        self.assertEqual(rows[0][0], 'TEST 1')
        self.assertEqual(rows[1][0], 'TEST 2')
        rows = self.__server_3.exec_stmt(
                                    "SELECT NAME FROM db2.t1",
                                    {"fetch" : True})
        self.assertEqual(len(rows), 2)
        self.assertEqual(rows[0][0], 'TEST 1')
        self.assertEqual(rows[1][0], 'TEST 2')
Пример #20
0
    def _fetch_master_of_group(group_id):
        """Return a reference to the master of the group.

        :param group_id: ID of the group whose master needs to be fetched.

        :return: MySQLServer object referring to the group master.
        """
        global_group = Group.fetch(group_id)
        master_server = MySQLServer.fetch(global_group.master)
        master_server.connect()
        return master_server
Пример #21
0
    def _fetch_master_of_group(group_id):
        """Return a reference to the master of the group.

        :param group_id: ID of the group whose master needs to be fetched.

        :return: MySQLServer object referring to the group master.
        """
        global_group = Group.fetch(group_id)
        master_server = MySQLServer.fetch(global_group.master)
        master_server.connect()
        return master_server
Пример #22
0
def _backup_source_shard(shard_id, source_group_id, destn_group_id,
                         split_value, prune_limit, cmd, update_only):
    """Backup the source shard.

    :param shard_id: The shard ID of the shard that needs to be moved.
    :param source_group_id: The group_id of the source shard.
    :param destn_group_id: The ID of the group to which the shard needs to
                           be moved.
    :param split_value: Indicates the value at which the range for the
                        particular shard will be split. Will be set only
                        for shard split operations.
    :param prune_limit: The number of DELETEs that should be
                        done in one batch.
    :param cmd: Indicates the type of re-sharding operation (move, split)
    :update_only: Only update the state store and skip provisioning.
    """
    backup_user = _services_utils.read_config_value(
                            _config.global_config,
                            'servers',
                            'backup_user'
                        )
    backup_passwd = _services_utils.read_config_value(
                            _config.global_config,
                            'servers',
                            'backup_password'
                        )
    mysqldump_binary = _services_utils.read_config_value(
                            _config.global_config,
                            'sharding',
                            'mysqldump_program'
                        )

    source_group = Group.fetch(source_group_id)
    move_source_server = _services_utils.fetch_backup_server(source_group)

    #Do the backup of the group hosting the source shard.
    backup_image = _backup.MySQLDump.backup(
                        move_source_server,
                        backup_user, backup_passwd,
                        mysqldump_binary
                    )

    #Change the master for the server that is master of the group which hosts
    #the destination shard.
    _events.trigger_within_procedure(
                                     RESTORE_SHARD_BACKUP,
                                     shard_id,
                                     source_group_id,
                                     destn_group_id,
                                     backup_image.path,
                                     split_value,
                                     prune_limit,
                                     cmd
                                     )
Пример #23
0
def _backup_source_shard(shard_id, source_group_id, destn_group_id,
                         split_value, prune_limit, cmd, update_only):
    """Backup the source shard.

    :param shard_id: The shard ID of the shard that needs to be moved.
    :param source_group_id: The group_id of the source shard.
    :param destn_group_id: The ID of the group to which the shard needs to
                           be moved.
    :param split_value: Indicates the value at which the range for the
                        particular shard will be split. Will be set only
                        for shard split operations.
    :param prune_limit: The number of DELETEs that should be
                        done in one batch.
    :param cmd: Indicates the type of re-sharding operation (move, split)
    :update_only: Only update the state store and skip provisioning.
    """
    backup_user = _services_utils.read_config_value(
                            _config.global_config,
                            'servers',
                            'backup_user'
                        )
    backup_passwd = _services_utils.read_config_value(
                            _config.global_config,
                            'servers',
                            'backup_password'
                        )
    mysqldump_binary = _services_utils.read_config_value(
                            _config.global_config,
                            'sharding',
                            'mysqldump_program'
                        )

    source_group = Group.fetch(source_group_id)
    move_source_server = _services_utils.fetch_backup_server(source_group)

    #Do the backup of the group hosting the source shard.
    backup_image = _backup.MySQLDump.backup(
                        move_source_server,
                        backup_user, backup_passwd,
                        mysqldump_binary
                    )

    #Change the master for the server that is master of the group which hosts
    #the destination shard.
    _events.trigger_within_procedure(
                                     RESTORE_SHARD_BACKUP,
                                     shard_id,
                                     source_group_id,
                                     destn_group_id,
                                     backup_image.path,
                                     split_value,
                                     prune_limit,
                                     cmd
                                     )
Пример #24
0
    def test_clone_from_group(self):
        """Verify the clone operation from a group.
        """
        self.__group_1 = Group("GROUPID1", "First description.")
        Group.add(self.__group_1)
        self.__group_1.add_server(self.__server_1)
        self.__group_1.add_server(self.__server_2)
        self.__group_1.master = self.__server_2.uuid

        try:
            status = self.proxy.server.clone("GROUPID1",
                                             self.__server_2.address,
                                             str(self.__server_1.uuid))
            raise Exception("Cloning to a server inside Fabric should "
                            "throw a fault")
        except:
            pass

        try:
            status = self.proxy.server.clone("GROUPID1",
                                             self.__server_2.address,
                                             uuid_server3)
            raise Exception("Cloning from a server outside the "
                            "source group should throw a fault")
        except:
            pass

        status = self.proxy.server.clone("GROUPID1", self.__server_3.address,
                                         None)
        self.check_xmlrpc_command_result(status)
        rows = self.__server_3.exec_stmt("SELECT NAME FROM db1.t1",
                                         {"fetch": True})
        self.assertEqual(len(rows), 2)
        self.assertEqual(rows[0][0], 'TEST 1')
        self.assertEqual(rows[1][0], 'TEST 2')
        rows = self.__server_3.exec_stmt("SELECT NAME FROM db2.t1",
                                         {"fetch": True})
        self.assertEqual(len(rows), 2)
        self.assertEqual(rows[0][0], 'TEST 1')
        self.assertEqual(rows[1][0], 'TEST 2')
Пример #25
0
def _restore_shard_backup(shard_id,  source_group_id, destn_group_id,
                            mysqlclient_binary, backup_image,
                            split_value, config_file, cmd):
    """Restore the backup on the destination Group.

    :param shard_id: The shard ID of the shard that needs to be moved.
    :param source_group_id: The group_id of the source shard.
    :param destn_group_id: The ID of the group to which the shard needs to
                           be moved.
    :param mysqlclient_binary: The fully qualified mysqlclient binary.
    :param backup_image: The destination file that contains the backup
                         of the source shard.
    :param split_value: Indicates the value at which the range for the
                        particular shard will be split. Will be set only
                        for shard split operations.
    :param config_file: The complete path to the fabric configuration file.
    :param cmd: Indicates the type of re-sharding operation
    """
    destn_group = Group.fetch(destn_group_id)
    if destn_group is None:
        raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND %
                                    (destn_group_id, ))

    #Build a backup image that will be used for restoring
    bk_img = _backup.BackupImage(backup_image)

    for destn_group_server in destn_group.servers():
        destn_group_server.connect()
        _backup.MySQLDump.restore_fabric_server(
            destn_group_server,
            bk_img,
            config_file,
            mysqlclient_binary
        )

    #Setup sync between the source and the destination groups.
    _events.trigger_within_procedure(
                                     SETUP_MOVE_SYNC,
                                     shard_id,
                                     source_group_id,
                                     destn_group_id,
                                     split_value,
                                     cmd
                                     )
Пример #26
0
def _backup_source_shard(shard_id, source_group_id, destn_group_id,
                         mysqldump_binary, mysqlclient_binary, split_value,
                         config_file, prune_limit, cmd, update_only):
    """Backup the source shard.

    :param shard_id: The shard ID of the shard that needs to be moved.
    :param source_group_id: The group_id of the source shard.
    :param destn_group_id: The ID of the group to which the shard needs to
                           be moved.
    :param mysqldump_binary: The fully qualified mysqldump binary.
    :param mysqlclient_binary: The fully qualified mysql client binary.
    :param split_value: Indicates the value at which the range for the
                        particular shard will be split. Will be set only
                        for shard split operations.
    :param config_file: The complete path to the fabric configuration file.
    :param prune_limit: The number of DELETEs that should be
                        done in one batch.
    :param cmd: Indicates the type of re-sharding operation (move, split)
    :update_only: Only update the state store and skip provisioning.
    """
    source_group = Group.fetch(source_group_id)
    move_source_server = _services_utils.fetch_backup_server(source_group)

    #Do the backup of the group hosting the source shard.
    backup_image = _backup.MySQLDump.backup(
                        move_source_server,
                        config_file,
                        mysqldump_binary
                    )

    #Change the master for the server that is master of the group which hosts
    #the destination shard.
    _events.trigger_within_procedure(
                                     RESTORE_SHARD_BACKUP,
                                     shard_id,
                                     source_group_id,
                                     destn_group_id,
                                     mysqlclient_binary,
                                     backup_image.path,
                                     split_value,
                                     config_file,
                                     prune_limit,
                                     cmd
                                     )
    def add_shard_range_trigger(group_id, sharding_type, table_name,
                                column_name):
        """Add a trigger on the shard table to ensure that values
        inserted fall within the valid shard ranges.

        :param group_id: The ID of the group on which the trigger definition
                         is applied. The trigger is created on the master of
                         this group.
        :param sharding_type: The datatype supported by the shards. Used to
                              name the trigger.
        :param table_name: The name of the table. This is used to name the
                           trigger being created.
        :param column_name: The name of the column in the table being sharded.
                            This is used to create the name of the trigger.
        """
        global_group = Group.fetch(group_id)
        master_server = MySQLServer.fetch(global_group.master)
        master_server.connect()

        #Create an INSERT trigger on the sharded table.
        db, table = table_name.split(".")
        trigger_tmpl = _TRIGGER_DEFN[sharding_type]
        trigger_name = db + "." + _TRIGGER_PREFIX_INSERT + table
 
        create_insert_trigger = trigger_tmpl.format(
            trigger_name=trigger_name,
            operation="INSERT",
            table_name=table_name,
            column_name="NEW"+"."+column_name
        )
        master_server.exec_stmt(create_insert_trigger)

        #Create an UPDATE trigger on the sharded table.
        trigger_tmpl = _TRIGGER_DEFN[sharding_type]
        trigger_name = db + "." + _TRIGGER_PREFIX_UPDATE + table
        create_update_trigger =trigger_tmpl.format(
                trigger_name=trigger_name,
                operation="UPDATE",
                table_name=table_name,
                column_name="NEW"+"."+column_name
            )
        master_server.exec_stmt(create_update_trigger)
Пример #28
0
def _setup_shard_switch_move(shard_id,  source_group_id, destination_group_id,
                             update_only):
    """Setup the moved shard to map to the new group.

    :param shard_id: The shard ID of the shard that needs to be moved.
    :param source_group_id: The group_id of the source shard.
    :param destination_group_id: The ID of the group to which the shard
                                needs to be moved.
    :update_only: Only update the state store and skip provisioning.
    """
    #Fetch the Range sharding specification. When we start implementing
    #heterogenous sharding schemes, we need to find out the type of
    #sharding scheme and we should use that to find out the sharding
    #implementation.
    _, source_shard,  _,  shard_mapping_defn = \
        _services_sharding._verify_and_fetch_shard(shard_id)

    #Setup replication between the shard group and the global group.
    _group_replication.setup_group_replication \
            (shard_mapping_defn[2],  destination_group_id)
    #set the shard to point to the new group.
    source_shard.group_id = destination_group_id
    #Stop the replication between the global server and the original
    #group associated with the shard.
    _group_replication.stop_group_slave\
            (shard_mapping_defn[2],  source_group_id,  True)

    #Reset the read only flag on the source server.
    source_group = Group.fetch(source_group_id)
    if source_group is None:
        raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND %
                                    (source_group_id, ))

    master = MySQLServer.fetch(source_group.master)
    if master is None:
        raise _errors.ShardingError(
            _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND)

    if not update_only:
        master.connect()
        master.read_only = False
Пример #29
0
    def setUp(self):
        """Creates the topology for testing.
        """
        tests.utils.cleanup_environment()
        self.manager, self.proxy = tests.utils.setup_xmlrpc()

        self.__options_1 = {
            "uuid" :  _uuid.UUID("{aa75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(0),
            "user" : MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }

        uuid_server1 = MySQLServer.discover_uuid(self.__options_1["address"])
        self.__options_1["uuid"] = _uuid.UUID(uuid_server1)
        self.__server_1 = MySQLServer(**self.__options_1)
        MySQLServer.add(self.__server_1)
        self.__server_1.connect()

        self.__group_1 = Group("GROUPID1", "First description.")
        Group.add(self.__group_1)
        self.__group_1.add_server(self.__server_1)
        tests.utils.configure_decoupled_master(self.__group_1, self.__server_1)

        self.__options_2 = {
            "uuid" :  _uuid.UUID("{aa45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(1),
            "user" : MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }

        uuid_server2 = MySQLServer.discover_uuid(self.__options_2["address"])
        self.__options_2["uuid"] = _uuid.UUID(uuid_server2)
        self.__server_2 = MySQLServer(**self.__options_2)
        MySQLServer.add(self.__server_2)
        self.__server_2.connect()
        self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_2.exec_stmt("CREATE DATABASE db1")
        self.__server_2.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID VARCHAR(20) PRIMARY KEY, name VARCHAR(30))")
        for i in range(1, 71):
            self.__server_2.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES('%s', 'TEST %s')" % ("a"+str(i), i))
        for i in range(101, 301):
            self.__server_2.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES('%s', 'TEST %s')" % ("b"+str(i), i))
        for i in range(1001, 1201):
            self.__server_2.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES('%s', 'TEST %s')" % ("c"+str(i), i))
        for i in range(10001, 10201):
            self.__server_2.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES('%s', 'TEST %s')" % ("d"+str(i), i))
        for i in range(100001, 100201):
            self.__server_2.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES('%s', 'TEST %s')" % ("e"+str(i), i))
        self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db2")
        self.__server_2.exec_stmt("CREATE DATABASE db2")
        self.__server_2.exec_stmt("CREATE TABLE db2.t2"
                                  "(userID VARCHAR(20), salary INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 71):
            self.__server_2.exec_stmt("INSERT INTO db2.t2 "
                                  "VALUES('%s', %s)" % ("a"+str(i), i))
        for i in range(101, 301):
            self.__server_2.exec_stmt("INSERT INTO db2.t2 "
                                  "VALUES('%s', %s)" % ("b"+str(i), i))
        for i in range(1001, 1201):
            self.__server_2.exec_stmt("INSERT INTO db2.t2 "
                                  "VALUES('%s', %s)" % ("c"+str(i), i))
        for i in range(10001, 10201):
            self.__server_2.exec_stmt("INSERT INTO db2.t2 "
                                  "VALUES('%s', %s)" % ("d"+str(i), i))
        for i in range(100001, 100201):
            self.__server_2.exec_stmt("INSERT INTO db2.t2 "
                                  "VALUES('%s', %s)" % ("e"+str(i), i))


        self.__group_2 = Group("GROUPID2", "Second description.")
        Group.add(self.__group_2)
        self.__group_2.add_server(self.__server_2)
        tests.utils.configure_decoupled_master(self.__group_2, self.__server_2)

        self.__options_3 = {
            "uuid" :  _uuid.UUID("{bb75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(2),
            "user" : MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }

        uuid_server3 = MySQLServer.discover_uuid(self.__options_3["address"])
        self.__options_3["uuid"] = _uuid.UUID(uuid_server3)
        self.__server_3 = MySQLServer(**self.__options_3)
        MySQLServer.add( self.__server_3)
        self.__server_3.connect()
        self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_3.exec_stmt("CREATE DATABASE db1")
        self.__server_3.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID VARCHAR(20)PRIMARY KEY, name VARCHAR(30))")
        for i in range(1, 71):
            self.__server_3.exec_stmt("INSERT INTO db1.t1 "
                                "VALUES('%s', 'TEST %s')" % ("a"+str(i), i))
        for i in range(101, 301):
            self.__server_3.exec_stmt("INSERT INTO db1.t1 "
                                "VALUES('%s', 'TEST %s')" % ("b"+str(i), i))
        for i in range(1001, 1201):
            self.__server_3.exec_stmt("INSERT INTO db1.t1 "
                                "VALUES('%s', 'TEST %s')" % ("c"+str(i), i))
        for i in range(10001, 10201):
            self.__server_3.exec_stmt("INSERT INTO db1.t1 "
                                "VALUES('%s', 'TEST %s')" % ("d"+str(i), i))
        for i in range(100001, 100201):
            self.__server_3.exec_stmt("INSERT INTO db1.t1 "
                                "VALUES('%s', 'TEST %s')" % ("e"+str(i), i))
        self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db2")
        self.__server_3.exec_stmt("CREATE DATABASE db2")
        self.__server_3.exec_stmt("CREATE TABLE db2.t2"
                                  "(userID VARCHAR(20), salary INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 71):
            self.__server_3.exec_stmt("INSERT INTO db2.t2 "
                                "VALUES('%s', %s)" % ("a"+str(i), i))
        for i in range(101, 301):
            self.__server_3.exec_stmt("INSERT INTO db2.t2 "
                                "VALUES('%s', %s)" % ("b"+str(i), i))
        for i in range(1001, 1201):
            self.__server_3.exec_stmt("INSERT INTO db2.t2 "
                                "VALUES('%s', %s)" % ("c"+str(i), i))
        for i in range(10001, 10201):
            self.__server_3.exec_stmt("INSERT INTO db2.t2 "
                                "VALUES('%s', %s)" % ("d"+str(i), i))
        for i in range(100001, 100201):
            self.__server_3.exec_stmt("INSERT INTO db2.t2 "
                                "VALUES('%s', %s)" % ("e"+str(i), i))

        self.__group_3 = Group("GROUPID3", "Third description.")
        Group.add( self.__group_3)
        self.__group_3.add_server(self.__server_3)
        tests.utils.configure_decoupled_master(self.__group_3, self.__server_3)

        self.__options_4 = {
            "uuid" :  _uuid.UUID("{bb45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(3),
            "user" : MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }

        uuid_server4 = MySQLServer.discover_uuid(self.__options_4["address"])
        self.__options_4["uuid"] = _uuid.UUID(uuid_server4)
        self.__server_4 = MySQLServer(**self.__options_4)
        MySQLServer.add(self.__server_4)
        self.__server_4.connect()
        self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_4.exec_stmt("CREATE DATABASE db1")
        self.__server_4.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID VARCHAR(20)PRIMARY KEY, name VARCHAR(30))")
        for i in range(1, 71):
            self.__server_4.exec_stmt("INSERT INTO db1.t1 "
                    "VALUES('%s', 'TEST %s')" % ("a"+str(i), i))
        for i in range(101, 301):
            self.__server_4.exec_stmt("INSERT INTO db1.t1 "
                    "VALUES('%s', 'TEST %s')" % ("b"+str(i), i))
        for i in range(1001, 1201):
            self.__server_4.exec_stmt("INSERT INTO db1.t1 "
                    "VALUES('%s', 'TEST %s')" % ("c"+str(i), i))
        for i in range(10001, 10201):
            self.__server_4.exec_stmt("INSERT INTO db1.t1 "
                    "VALUES('%s', 'TEST %s')" % ("d"+str(i), i))
        for i in range(100001, 100201):
            self.__server_4.exec_stmt("INSERT INTO db1.t1 "
                    "VALUES('%s', 'TEST %s')" % ("e"+str(i), i))
        self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db2")
        self.__server_4.exec_stmt("CREATE DATABASE db2")
        self.__server_4.exec_stmt("CREATE TABLE db2.t2"
                                  "(userID VARCHAR(20), salary INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 71):
            self.__server_4.exec_stmt("INSERT INTO db2.t2 "
                        "VALUES('%s', %s)" % ("a"+str(i), i))
        for i in range(101, 301):
            self.__server_4.exec_stmt("INSERT INTO db2.t2 "
                        "VALUES('%s', %s)" % ("b"+str(i), i))
        for i in range(1001, 1201):
            self.__server_4.exec_stmt("INSERT INTO db2.t2 "
                        "VALUES('%s', %s)" % ("c"+str(i), i))
        for i in range(10001, 10201):
            self.__server_4.exec_stmt("INSERT INTO db2.t2 "
                        "VALUES('%s', %s)" % ("d"+str(i), i))
        for i in range(100001, 100201):
            self.__server_4.exec_stmt("INSERT INTO db2.t2 "
                        "VALUES('%s', %s)" % ("e"+str(i), i))

        self.__group_4 = Group("GROUPID4", "Fourth description.")
        Group.add( self.__group_4)
        self.__group_4.add_server(self.__server_4)
        tests.utils.configure_decoupled_master(self.__group_4, self.__server_4)

        self.__options_5 = {
            "uuid" :  _uuid.UUID("{cc75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(4),
            "user" : MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }

        uuid_server5 = MySQLServer.discover_uuid(self.__options_5["address"])
        self.__options_5["uuid"] = _uuid.UUID(uuid_server5)
        self.__server_5 = MySQLServer(**self.__options_5)
        MySQLServer.add(self.__server_5)
        self.__server_5.connect()
        self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_5.exec_stmt("CREATE DATABASE db1")
        self.__server_5.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID VARCHAR(20)PRIMARY KEY, name VARCHAR(30))")
        for i in range(1, 71):
            self.__server_5.exec_stmt("INSERT INTO db1.t1 "
                        "VALUES('%s', 'TEST %s')" % ("a"+str(i), i))
        for i in range(101, 301):
            self.__server_5.exec_stmt("INSERT INTO db1.t1 "
                        "VALUES('%s', 'TEST %s')" % ("b"+str(i), i))
        for i in range(1001, 1201):
            self.__server_5.exec_stmt("INSERT INTO db1.t1 "
                            "VALUES('%s', 'TEST %s')" % ("c"+str(i), i))
        for i in range(10001, 10201):
            self.__server_5.exec_stmt("INSERT INTO db1.t1 "
                        "VALUES('%s', 'TEST %s')" % ("d"+str(i), i))
        for i in range(100001, 100201):
            self.__server_5.exec_stmt("INSERT INTO db1.t1 "
                        "VALUES('%s', 'TEST %s')" % ("e"+str(i), i))
        self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db2")
        self.__server_5.exec_stmt("CREATE DATABASE db2")
        self.__server_5.exec_stmt("CREATE TABLE db2.t2"
                                  "(userID VARCHAR(20), salary INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 71):
            self.__server_5.exec_stmt("INSERT INTO db2.t2 "
                      "VALUES('%s', %s)" % ("a"+str(i), i))
        for i in range(101, 301):
            self.__server_5.exec_stmt("INSERT INTO db2.t2 "
                      "VALUES('%s', %s)" % ("b"+str(i), i))
        for i in range(1001, 1201):
            self.__server_5.exec_stmt("INSERT INTO db2.t2 "
                      "VALUES('%s', %s)" % ("c"+str(i), i))
        for i in range(10001, 10201):
            self.__server_5.exec_stmt("INSERT INTO db2.t2 "
                      "VALUES('%s', %s)" % ("d"+str(i), i))
        for i in range(100001, 100201):
            self.__server_5.exec_stmt("INSERT INTO db2.t2 "
                      "VALUES('%s', %s)" % ("e"+str(i), i))


        self.__group_5 = Group("GROUPID5", "Fifth description.")
        Group.add( self.__group_5)
        self.__group_5.add_server(self.__server_5)
        tests.utils.configure_decoupled_master(self.__group_5, self.__server_5)

        self.__options_6 = {
            "uuid" :  _uuid.UUID("{cc45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(5),
            "user" : MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }

        uuid_server6 = MySQLServer.discover_uuid(self.__options_6["address"])
        self.__options_6["uuid"] = _uuid.UUID(uuid_server6)
        self.__server_6 = MySQLServer(**self.__options_6)
        MySQLServer.add(self.__server_6)
        self.__server_6.connect()

        self.__group_6 = Group("GROUPID6", "Sixth description.")
        Group.add( self.__group_6)
        self.__group_6.add_server(self.__server_6)
        tests.utils.configure_decoupled_master(self.__group_6, self.__server_6)

        status = self.proxy.sharding.create_definition("RANGE_STRING", "GROUPID1")
        self.check_xmlrpc_command_result(status, returns=1)

        status = self.proxy.sharding.add_table(1, "db1.t1", "userID")
        self.check_xmlrpc_command_result(status)

        status = self.proxy.sharding.add_table(1, "db2.t2", "userID")
        self.check_xmlrpc_command_result(status)

        status = self.proxy.sharding.add_shard(
            1,
            "GROUPID2/a,GROUPID3/b,GROUPID4/c,GROUPID5/d",
            "ENABLED"
        )
        self.check_xmlrpc_command_result(status)

        status = self.proxy.sharding.prune_shard("db1.t1")
        self.check_xmlrpc_command_result(status)
Пример #30
0
    def setUp(self):
        """Configure the existing environment
        """
        self.manager, self.proxy = tests.utils.setup_xmlrpc()

        self.__options_1 = {
            "uuid": _uuid.UUID("{aa75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(0),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }

        uuid_server1 = MySQLServer.discover_uuid(self.__options_1["address"])
        self.__options_1["uuid"] = _uuid.UUID(uuid_server1)
        self.__server_1 = MySQLServer(**self.__options_1)
        MySQLServer.add(self.__server_1)

        self.__group_1 = Group("GROUPID1", "First description.")
        Group.add(self.__group_1)
        self.__group_1.add_server(self.__server_1)
        tests.utils.configure_decoupled_master(self.__group_1, self.__server_1)

        self.__options_2 = {
            "uuid": _uuid.UUID("{aa45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(1),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }

        uuid_server2 = MySQLServer.discover_uuid(self.__options_2["address"])
        self.__options_2["uuid"] = _uuid.UUID(uuid_server2)
        self.__server_2 = MySQLServer(**self.__options_2)
        MySQLServer.add(self.__server_2)

        self.__group_2 = Group("GROUPID2", "Second description.")
        Group.add(self.__group_2)
        self.__group_2.add_server(self.__server_2)
        tests.utils.configure_decoupled_master(self.__group_2, self.__server_2)

        self.__options_3 = {
            "uuid": _uuid.UUID("{bb75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(2),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }

        uuid_server3 = MySQLServer.discover_uuid(self.__options_3["address"])
        self.__options_3["uuid"] = _uuid.UUID(uuid_server3)
        self.__server_3 = MySQLServer(**self.__options_3)
        MySQLServer.add(self.__server_3)

        self.__group_3 = Group("GROUPID3", "Third description.")
        Group.add(self.__group_3)
        self.__group_3.add_server(self.__server_3)
        tests.utils.configure_decoupled_master(self.__group_3, self.__server_3)

        self.__options_4 = {
            "uuid": _uuid.UUID("{bb45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(3),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }

        uuid_server4 = MySQLServer.discover_uuid(self.__options_4["address"])
        self.__options_4["uuid"] = _uuid.UUID(uuid_server4)
        self.__server_4 = MySQLServer(**self.__options_4)
        MySQLServer.add(self.__server_4)

        self.__group_4 = Group("GROUPID4", "Fourth description.")
        Group.add(self.__group_4)
        self.__group_4.add_server(self.__server_4)
        tests.utils.configure_decoupled_master(self.__group_4, self.__server_4)

        self.__options_5 = {
            "uuid": _uuid.UUID("{cc75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(4),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }

        uuid_server5 = MySQLServer.discover_uuid(self.__options_5["address"])
        self.__options_5["uuid"] = _uuid.UUID(uuid_server5)
        self.__server_5 = MySQLServer(**self.__options_5)
        MySQLServer.add(self.__server_5)

        self.__group_5 = Group("GROUPID5", "Fifth description.")
        Group.add(self.__group_5)
        self.__group_5.add_server(self.__server_5)
        tests.utils.configure_decoupled_master(self.__group_5, self.__server_5)

        self.__options_6 = {
            "uuid": _uuid.UUID("{cc45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(5),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }

        uuid_server6 = MySQLServer.discover_uuid(self.__options_6["address"])
        self.__options_6["uuid"] = _uuid.UUID(uuid_server6)
        self.__server_6 = MySQLServer(**self.__options_6)
        MySQLServer.add(self.__server_6)

        self.__group_6 = Group("GROUPID6", "Sixth description.")
        Group.add(self.__group_6)
        self.__group_6.add_server(self.__server_6)
        tests.utils.configure_decoupled_master(self.__group_6, self.__server_6)

        status = self.proxy.sharding.create_definition("HASH", "GROUPID1")
        self.check_xmlrpc_command_result(status, returns=1)

        status = self.proxy.sharding.add_table(1, "db1.t1", "userID1")
        self.check_xmlrpc_command_result(status)

        status = self.proxy.sharding.add_shard(
            1, "GROUPID2,GROUPID3,"
            "GROUPID4,GROUPID5,GROUPID6", "ENABLED")
        self.check_xmlrpc_command_result(status)
Пример #31
0
    def setUp(self):
        """Creates the following topology for testing,

        GROUPID1 - localhost:13001, localhost:13002 - Global Group
        GROUPID2 - localhost:13003, localhost:13004 - shard 1
        GROUPID3 - localhost:13005, localhost:13006 - shard 2
        """
        self.manager, self.proxy = tests.utils.setup_xmlrpc()

        self.__options_1 = {
            "uuid": _uuid.UUID("{aa75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(0),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd
        }

        uuid_server1 = MySQLServer.discover_uuid(self.__options_1["address"])
        self.__options_1["uuid"] = _uuid.UUID(uuid_server1)
        self.__server_1 = MySQLServer(**self.__options_1)
        MySQLServer.add(self.__server_1)

        self.__group_1 = Group("GROUPID1", "First description.")
        Group.add(self.__group_1)
        self.__group_1.add_server(self.__server_1)
        tests.utils.configure_decoupled_master(self.__group_1, self.__server_1)

        self.__options_2 = {
            "uuid": _uuid.UUID("{aa45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(1),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd
        }

        uuid_server2 = MySQLServer.discover_uuid(self.__options_2["address"])
        self.__options_2["uuid"] = _uuid.UUID(uuid_server2)
        self.__server_2 = MySQLServer(**self.__options_2)
        MySQLServer.add(self.__server_2)
        self.__server_2.connect()
        self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_2.exec_stmt("CREATE DATABASE db1")
        self.__server_2.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 71):
            self.__server_2.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(101, 301):
            self.__server_2.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(1001, 1201):
            self.__server_2.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(10001, 10201):
            self.__server_2.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(100001, 100201):
            self.__server_2.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))

        self.__group_2 = Group("GROUPID2", "Second description.")
        Group.add(self.__group_2)
        self.__group_2.add_server(self.__server_2)
        tests.utils.configure_decoupled_master(self.__group_2, self.__server_2)

        self.__options_3 = {
            "uuid": _uuid.UUID("{bb75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(2),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd
        }

        uuid_server3 = MySQLServer.discover_uuid(self.__options_3["address"])
        self.__options_3["uuid"] = _uuid.UUID(uuid_server3)
        self.__server_3 = MySQLServer(**self.__options_3)
        MySQLServer.add(self.__server_3)
        self.__server_3.connect()
        self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_3.exec_stmt("CREATE DATABASE db1")
        self.__server_3.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 71):
            self.__server_3.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(101, 301):
            self.__server_3.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(1001, 1201):
            self.__server_3.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(10001, 10201):
            self.__server_3.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(100001, 100201):
            self.__server_3.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))

        self.__group_3 = Group("GROUPID3", "Third description.")
        Group.add(self.__group_3)
        self.__group_3.add_server(self.__server_3)
        tests.utils.configure_decoupled_master(self.__group_3, self.__server_3)

        self.__options_4 = {
            "uuid": _uuid.UUID("{bb45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(3),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd
        }

        uuid_server4 = MySQLServer.discover_uuid(self.__options_4["address"])
        self.__options_4["uuid"] = _uuid.UUID(uuid_server4)
        self.__server_4 = MySQLServer(**self.__options_4)
        MySQLServer.add(self.__server_4)
        self.__server_4.connect()
        self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_4.exec_stmt("CREATE DATABASE db1")
        self.__server_4.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 71):
            self.__server_4.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(101, 301):
            self.__server_4.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(1001, 1201):
            self.__server_4.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(10001, 10201):
            self.__server_4.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(100001, 100201):
            self.__server_4.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))

        self.__group_4 = Group("GROUPID4", "Fourth description.")
        Group.add(self.__group_4)
        self.__group_4.add_server(self.__server_4)
        tests.utils.configure_decoupled_master(self.__group_4, self.__server_4)

        self.__options_5 = {
            "uuid": _uuid.UUID("{cc75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(4),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd
        }

        uuid_server5 = MySQLServer.discover_uuid(self.__options_5["address"])
        self.__options_5["uuid"] = _uuid.UUID(uuid_server5)
        self.__server_5 = MySQLServer(**self.__options_5)
        MySQLServer.add(self.__server_5)
        self.__server_5.connect()
        self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_5.exec_stmt("CREATE DATABASE db1")
        self.__server_5.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 71):
            self.__server_5.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(101, 301):
            self.__server_5.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(1001, 1201):
            self.__server_5.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(10001, 10201):
            self.__server_5.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(100001, 100201):
            self.__server_5.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))

        self.__group_5 = Group("GROUPID5", "Fifth description.")
        Group.add(self.__group_5)
        self.__group_5.add_server(self.__server_5)
        tests.utils.configure_decoupled_master(self.__group_5, self.__server_5)

        self.__options_6 = {
            "uuid": _uuid.UUID("{cc45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(5),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd
        }

        uuid_server6 = MySQLServer.discover_uuid(self.__options_6["address"])
        self.__options_6["uuid"] = _uuid.UUID(uuid_server6)
        self.__server_6 = MySQLServer(**self.__options_6)
        MySQLServer.add(self.__server_6)
        self.__server_6.connect()
        self.__server_6.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_6.exec_stmt("CREATE DATABASE db1")
        self.__server_6.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 71):
            self.__server_6.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(101, 301):
            self.__server_6.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(1001, 1201):
            self.__server_6.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(10001, 10201):
            self.__server_6.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(100001, 100201):
            self.__server_6.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))

        self.__group_6 = Group("GROUPID6", "Sixth description.")
        Group.add(self.__group_6)
        self.__group_6.add_server(self.__server_6)
        tests.utils.configure_decoupled_master(self.__group_6, self.__server_6)

        status = self.proxy.sharding.create_definition("RANGE", "GROUPID1")
        self.check_xmlrpc_command_result(status, returns=1)

        status = self.proxy.sharding.add_table(1, "db1.t1", "userID")
        self.check_xmlrpc_command_result(status)

        status = self.proxy.sharding.add_shard(
            1, "GROUPID2/1,GROUPID3/101,GROUPID4/1001,"
            "GROUPID5/10001,GROUPID6/100001", "ENABLED")
        self.check_xmlrpc_command_result(status)
Пример #32
0
class TestShardingPrune(tests.utils.TestCase):
    """Contains unit tests for testing the shard split operation and for
    verifying that the global server configuration remains constant after
    the shard split configuration.
    """
    def setUp(self):
        """Creates the topology for testing.
        """
        tests.utils.cleanup_environment()
        self.manager, self.proxy = tests.utils.setup_xmlrpc()

        self.__options_1 = {
            "uuid" :  _uuid.UUID("{aa75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(0),
            "user" : MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }

        uuid_server1 = MySQLServer.discover_uuid(self.__options_1["address"])
        self.__options_1["uuid"] = _uuid.UUID(uuid_server1)
        self.__server_1 = MySQLServer(**self.__options_1)
        MySQLServer.add(self.__server_1)
        self.__server_1.connect()

        self.__group_1 = Group("GROUPID1", "First description.")
        Group.add(self.__group_1)
        self.__group_1.add_server(self.__server_1)
        tests.utils.configure_decoupled_master(self.__group_1, self.__server_1)

        self.__options_2 = {
            "uuid" :  _uuid.UUID("{aa45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(1),
            "user" : MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }

        uuid_server2 = MySQLServer.discover_uuid(self.__options_2["address"])
        self.__options_2["uuid"] = _uuid.UUID(uuid_server2)
        self.__server_2 = MySQLServer(**self.__options_2)
        MySQLServer.add(self.__server_2)
        self.__server_2.connect()
        self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_2.exec_stmt("CREATE DATABASE db1")
        self.__server_2.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID VARCHAR(20) PRIMARY KEY, name VARCHAR(30))")
        for i in range(1, 71):
            self.__server_2.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES('%s', 'TEST %s')" % ("a"+str(i), i))
        for i in range(101, 301):
            self.__server_2.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES('%s', 'TEST %s')" % ("b"+str(i), i))
        for i in range(1001, 1201):
            self.__server_2.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES('%s', 'TEST %s')" % ("c"+str(i), i))
        for i in range(10001, 10201):
            self.__server_2.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES('%s', 'TEST %s')" % ("d"+str(i), i))
        for i in range(100001, 100201):
            self.__server_2.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES('%s', 'TEST %s')" % ("e"+str(i), i))
        self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db2")
        self.__server_2.exec_stmt("CREATE DATABASE db2")
        self.__server_2.exec_stmt("CREATE TABLE db2.t2"
                                  "(userID VARCHAR(20), salary INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 71):
            self.__server_2.exec_stmt("INSERT INTO db2.t2 "
                                  "VALUES('%s', %s)" % ("a"+str(i), i))
        for i in range(101, 301):
            self.__server_2.exec_stmt("INSERT INTO db2.t2 "
                                  "VALUES('%s', %s)" % ("b"+str(i), i))
        for i in range(1001, 1201):
            self.__server_2.exec_stmt("INSERT INTO db2.t2 "
                                  "VALUES('%s', %s)" % ("c"+str(i), i))
        for i in range(10001, 10201):
            self.__server_2.exec_stmt("INSERT INTO db2.t2 "
                                  "VALUES('%s', %s)" % ("d"+str(i), i))
        for i in range(100001, 100201):
            self.__server_2.exec_stmt("INSERT INTO db2.t2 "
                                  "VALUES('%s', %s)" % ("e"+str(i), i))


        self.__group_2 = Group("GROUPID2", "Second description.")
        Group.add(self.__group_2)
        self.__group_2.add_server(self.__server_2)
        tests.utils.configure_decoupled_master(self.__group_2, self.__server_2)

        self.__options_3 = {
            "uuid" :  _uuid.UUID("{bb75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(2),
            "user" : MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }

        uuid_server3 = MySQLServer.discover_uuid(self.__options_3["address"])
        self.__options_3["uuid"] = _uuid.UUID(uuid_server3)
        self.__server_3 = MySQLServer(**self.__options_3)
        MySQLServer.add( self.__server_3)
        self.__server_3.connect()
        self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_3.exec_stmt("CREATE DATABASE db1")
        self.__server_3.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID VARCHAR(20)PRIMARY KEY, name VARCHAR(30))")
        for i in range(1, 71):
            self.__server_3.exec_stmt("INSERT INTO db1.t1 "
                                "VALUES('%s', 'TEST %s')" % ("a"+str(i), i))
        for i in range(101, 301):
            self.__server_3.exec_stmt("INSERT INTO db1.t1 "
                                "VALUES('%s', 'TEST %s')" % ("b"+str(i), i))
        for i in range(1001, 1201):
            self.__server_3.exec_stmt("INSERT INTO db1.t1 "
                                "VALUES('%s', 'TEST %s')" % ("c"+str(i), i))
        for i in range(10001, 10201):
            self.__server_3.exec_stmt("INSERT INTO db1.t1 "
                                "VALUES('%s', 'TEST %s')" % ("d"+str(i), i))
        for i in range(100001, 100201):
            self.__server_3.exec_stmt("INSERT INTO db1.t1 "
                                "VALUES('%s', 'TEST %s')" % ("e"+str(i), i))
        self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db2")
        self.__server_3.exec_stmt("CREATE DATABASE db2")
        self.__server_3.exec_stmt("CREATE TABLE db2.t2"
                                  "(userID VARCHAR(20), salary INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 71):
            self.__server_3.exec_stmt("INSERT INTO db2.t2 "
                                "VALUES('%s', %s)" % ("a"+str(i), i))
        for i in range(101, 301):
            self.__server_3.exec_stmt("INSERT INTO db2.t2 "
                                "VALUES('%s', %s)" % ("b"+str(i), i))
        for i in range(1001, 1201):
            self.__server_3.exec_stmt("INSERT INTO db2.t2 "
                                "VALUES('%s', %s)" % ("c"+str(i), i))
        for i in range(10001, 10201):
            self.__server_3.exec_stmt("INSERT INTO db2.t2 "
                                "VALUES('%s', %s)" % ("d"+str(i), i))
        for i in range(100001, 100201):
            self.__server_3.exec_stmt("INSERT INTO db2.t2 "
                                "VALUES('%s', %s)" % ("e"+str(i), i))

        self.__group_3 = Group("GROUPID3", "Third description.")
        Group.add( self.__group_3)
        self.__group_3.add_server(self.__server_3)
        tests.utils.configure_decoupled_master(self.__group_3, self.__server_3)

        self.__options_4 = {
            "uuid" :  _uuid.UUID("{bb45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(3),
            "user" : MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }

        uuid_server4 = MySQLServer.discover_uuid(self.__options_4["address"])
        self.__options_4["uuid"] = _uuid.UUID(uuid_server4)
        self.__server_4 = MySQLServer(**self.__options_4)
        MySQLServer.add(self.__server_4)
        self.__server_4.connect()
        self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_4.exec_stmt("CREATE DATABASE db1")
        self.__server_4.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID VARCHAR(20)PRIMARY KEY, name VARCHAR(30))")
        for i in range(1, 71):
            self.__server_4.exec_stmt("INSERT INTO db1.t1 "
                    "VALUES('%s', 'TEST %s')" % ("a"+str(i), i))
        for i in range(101, 301):
            self.__server_4.exec_stmt("INSERT INTO db1.t1 "
                    "VALUES('%s', 'TEST %s')" % ("b"+str(i), i))
        for i in range(1001, 1201):
            self.__server_4.exec_stmt("INSERT INTO db1.t1 "
                    "VALUES('%s', 'TEST %s')" % ("c"+str(i), i))
        for i in range(10001, 10201):
            self.__server_4.exec_stmt("INSERT INTO db1.t1 "
                    "VALUES('%s', 'TEST %s')" % ("d"+str(i), i))
        for i in range(100001, 100201):
            self.__server_4.exec_stmt("INSERT INTO db1.t1 "
                    "VALUES('%s', 'TEST %s')" % ("e"+str(i), i))
        self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db2")
        self.__server_4.exec_stmt("CREATE DATABASE db2")
        self.__server_4.exec_stmt("CREATE TABLE db2.t2"
                                  "(userID VARCHAR(20), salary INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 71):
            self.__server_4.exec_stmt("INSERT INTO db2.t2 "
                        "VALUES('%s', %s)" % ("a"+str(i), i))
        for i in range(101, 301):
            self.__server_4.exec_stmt("INSERT INTO db2.t2 "
                        "VALUES('%s', %s)" % ("b"+str(i), i))
        for i in range(1001, 1201):
            self.__server_4.exec_stmt("INSERT INTO db2.t2 "
                        "VALUES('%s', %s)" % ("c"+str(i), i))
        for i in range(10001, 10201):
            self.__server_4.exec_stmt("INSERT INTO db2.t2 "
                        "VALUES('%s', %s)" % ("d"+str(i), i))
        for i in range(100001, 100201):
            self.__server_4.exec_stmt("INSERT INTO db2.t2 "
                        "VALUES('%s', %s)" % ("e"+str(i), i))

        self.__group_4 = Group("GROUPID4", "Fourth description.")
        Group.add( self.__group_4)
        self.__group_4.add_server(self.__server_4)
        tests.utils.configure_decoupled_master(self.__group_4, self.__server_4)

        self.__options_5 = {
            "uuid" :  _uuid.UUID("{cc75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(4),
            "user" : MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }

        uuid_server5 = MySQLServer.discover_uuid(self.__options_5["address"])
        self.__options_5["uuid"] = _uuid.UUID(uuid_server5)
        self.__server_5 = MySQLServer(**self.__options_5)
        MySQLServer.add(self.__server_5)
        self.__server_5.connect()
        self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_5.exec_stmt("CREATE DATABASE db1")
        self.__server_5.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID VARCHAR(20)PRIMARY KEY, name VARCHAR(30))")
        for i in range(1, 71):
            self.__server_5.exec_stmt("INSERT INTO db1.t1 "
                        "VALUES('%s', 'TEST %s')" % ("a"+str(i), i))
        for i in range(101, 301):
            self.__server_5.exec_stmt("INSERT INTO db1.t1 "
                        "VALUES('%s', 'TEST %s')" % ("b"+str(i), i))
        for i in range(1001, 1201):
            self.__server_5.exec_stmt("INSERT INTO db1.t1 "
                            "VALUES('%s', 'TEST %s')" % ("c"+str(i), i))
        for i in range(10001, 10201):
            self.__server_5.exec_stmt("INSERT INTO db1.t1 "
                        "VALUES('%s', 'TEST %s')" % ("d"+str(i), i))
        for i in range(100001, 100201):
            self.__server_5.exec_stmt("INSERT INTO db1.t1 "
                        "VALUES('%s', 'TEST %s')" % ("e"+str(i), i))
        self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db2")
        self.__server_5.exec_stmt("CREATE DATABASE db2")
        self.__server_5.exec_stmt("CREATE TABLE db2.t2"
                                  "(userID VARCHAR(20), salary INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 71):
            self.__server_5.exec_stmt("INSERT INTO db2.t2 "
                      "VALUES('%s', %s)" % ("a"+str(i), i))
        for i in range(101, 301):
            self.__server_5.exec_stmt("INSERT INTO db2.t2 "
                      "VALUES('%s', %s)" % ("b"+str(i), i))
        for i in range(1001, 1201):
            self.__server_5.exec_stmt("INSERT INTO db2.t2 "
                      "VALUES('%s', %s)" % ("c"+str(i), i))
        for i in range(10001, 10201):
            self.__server_5.exec_stmt("INSERT INTO db2.t2 "
                      "VALUES('%s', %s)" % ("d"+str(i), i))
        for i in range(100001, 100201):
            self.__server_5.exec_stmt("INSERT INTO db2.t2 "
                      "VALUES('%s', %s)" % ("e"+str(i), i))


        self.__group_5 = Group("GROUPID5", "Fifth description.")
        Group.add( self.__group_5)
        self.__group_5.add_server(self.__server_5)
        tests.utils.configure_decoupled_master(self.__group_5, self.__server_5)

        self.__options_6 = {
            "uuid" :  _uuid.UUID("{cc45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(5),
            "user" : MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }

        uuid_server6 = MySQLServer.discover_uuid(self.__options_6["address"])
        self.__options_6["uuid"] = _uuid.UUID(uuid_server6)
        self.__server_6 = MySQLServer(**self.__options_6)
        MySQLServer.add(self.__server_6)
        self.__server_6.connect()

        self.__group_6 = Group("GROUPID6", "Sixth description.")
        Group.add( self.__group_6)
        self.__group_6.add_server(self.__server_6)
        tests.utils.configure_decoupled_master(self.__group_6, self.__server_6)

        status = self.proxy.sharding.create_definition("RANGE_STRING", "GROUPID1")
        self.check_xmlrpc_command_result(status, returns=1)

        status = self.proxy.sharding.add_table(1, "db1.t1", "userID")
        self.check_xmlrpc_command_result(status)

        status = self.proxy.sharding.add_table(1, "db2.t2", "userID")
        self.check_xmlrpc_command_result(status)

        status = self.proxy.sharding.add_shard(
            1,
            "GROUPID2/a,GROUPID3/b,GROUPID4/c,GROUPID5/d",
            "ENABLED"
        )
        self.check_xmlrpc_command_result(status)

        status = self.proxy.sharding.prune_shard("db1.t1")
        self.check_xmlrpc_command_result(status)

    def test_split_shard_4(self):
        '''Test the split of shard 4 and the global server configuration
        after that. The test splits shard 4 between GROUPID5 and GROUPID6.
        After the split is done, it verifies the count on GROUPID6 to check
        that the group has tuples from the earlier group. Now it fires
        an INSERT on the global group and verifies that all the shards have got
        the inserted tuples.
        '''
        row_cnt_shard_before_split_db1_t1 = self.__server_5.exec_stmt(
                    "SELECT COUNT(*) FROM db1.t1",
                    {"fetch" : True}
                )
        row_cnt_shard_before_split_db2_t2 = self.__server_5.exec_stmt(
                    "SELECT COUNT(*) FROM db2.t2",
                    {"fetch" : True}
                )

        row_cnt_shard_before_split_db1_t1 = \
            int(row_cnt_shard_before_split_db1_t1[0][0])
        row_cnt_shard_before_split_db2_t2 = \
            int(row_cnt_shard_before_split_db2_t2[0][0])

        self.assertEqual(row_cnt_shard_before_split_db1_t1, 400)
        self.assertEqual(row_cnt_shard_before_split_db2_t2, 400)

        status = self.proxy.sharding.split_shard("4", "GROUPID6", "e")
        self.check_xmlrpc_command_result(status)

        self.__server_5.connect()
        row_cnt_shard_after_split_db1_t1_a = self.__server_5.exec_stmt(
                    "SELECT COUNT(*) FROM db1.t1",
                    {"fetch" : True}
                )
        row_cnt_shard_after_split_db1_t1_a = \
            int(row_cnt_shard_after_split_db1_t1_a[0][0])
        self.assertEqual(row_cnt_shard_after_split_db1_t1_a, 200)

        row_cnt_shard_after_split_db2_t2_a = self.__server_5.exec_stmt(
                    "SELECT COUNT(*) FROM db2.t2",
                    {"fetch" : True}
                )
        row_cnt_shard_after_split_db2_t2_a = \
            int(row_cnt_shard_after_split_db2_t2_a[0][0])
        self.assertEqual(row_cnt_shard_after_split_db2_t2_a, 200)

        row_cnt_shard_after_split_db1_t1_b = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM db1.t1",
                    {"fetch" : True}
                )
        row_cnt_shard_after_split_db1_t1_b = \
            int(row_cnt_shard_after_split_db1_t1_b[0][0])
        self.assertEqual(row_cnt_shard_after_split_db1_t1_b, 200)

        row_cnt_shard_after_split_db2_t2_b = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM db2.t2",
                    {"fetch" : True}
                )
        row_cnt_shard_after_split_db2_t2_b = \
            int(row_cnt_shard_after_split_db2_t2_b[0][0])
        self.assertEqual(row_cnt_shard_after_split_db2_t2_b, 200)
        #Enter data into the global server
        self.__server_1.exec_stmt("DROP DATABASE IF EXISTS global")
        self.__server_1.exec_stmt("CREATE DATABASE global")
        self.__server_1.exec_stmt("CREATE TABLE global.global_table"
                                  "(userID VARCHAR(20), name VARCHAR(30))")
        for i in range(1, 11):
            self.__server_1.exec_stmt("INSERT INTO global.global_table "
                                  "VALUES('%s', 'TEST %s')" % (i, i))
        time.sleep(5)
        #Verify that the data is there in the first shard.
        global_table_count = self.__server_2.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the second shard.
        global_table_count = self.__server_3.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the third shard.
        global_table_count = self.__server_4.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the fourth shard.
        global_table_count = self.__server_5.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the fifth shard.
        global_table_count = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)

    def tearDown(self):
        """Clean up the existing environment
        """
        tests.utils.cleanup_environment()
Пример #33
0
    def _run(self):
        """Function that verifies servers' availabilities.
        """
        from mysql.fabric.server import (
            Group,
            MySQLServer,
            ConnectionManager,
        )

        ignored_status = [MySQLServer.FAULTY]
        quarantine = {}
        interval = FailureDetector._DETECTION_INTERVAL
        detections = FailureDetector._DETECTIONS
        detection_timeout = FailureDetector._DETECTION_TIMEOUT
        connection_manager = ConnectionManager()

        _persistence.init_thread()

        while self.__check:
            try:
                unreachable = set()
                group = Group.fetch(self.__group_id)
                if group is not None:
                    for server in group.servers():
                        if server.status in ignored_status or \
                            MySQLServer.is_alive(server, detection_timeout):
                            if server.status == MySQLServer.FAULTY:
                                connection_manager.kill_connections(server)
                            continue

                        unreachable.add(server.uuid)

                        _LOGGER.warning(
                            "Server (%s) in group (%s) is unreachable.",
                            server.uuid, self.__group_id
                        )

                        unstable = False
                        failed_attempts = 0
                        if server.uuid not in quarantine:
                            quarantine[server.uuid] = failed_attempts = 1
                        else:
                            failed_attempts = quarantine[server.uuid] + 1
                            quarantine[server.uuid] = failed_attempts
                        if failed_attempts >= detections:
                            unstable = True

                        can_set_faulty = group.can_set_server_faulty(
                            server, get_time()
                        )
                        if unstable and can_set_faulty:
                            # We have to make this transactional and make the
                            # failover (i.e. report failure) robust to failures.
                            # Otherwise, a master might be set to faulty and
                            # a new one never promoted.
                            server.status = MySQLServer.FAULTY
                            connection_manager.kill_connections(server)
                            
                            procedures = trigger("REPORT_FAILURE", None,
                                str(server.uuid),
                                threading.current_thread().name,
                                MySQLServer.FAULTY, False
                            )
                            executor = _executor.Executor()
                            for procedure in procedures:
                                executor.wait_for_procedure(procedure)

                for uuid in quarantine.keys():
                    if uuid not in unreachable:
                        del quarantine[uuid]

            except (_errors.ExecutorError, _errors.DatabaseError):
                pass
            except Exception as error:
                _LOGGER.exception(error)

            time.sleep(interval / detections)

        _persistence.deinit_thread()
Пример #34
0
class TestShardingServices(tests.utils.TestCase):
    def setUp(self):
        """Configure the existing environment
        """
        self.manager, self.proxy = tests.utils.setup_xmlrpc()

        self.__options_1 = {
            "uuid": _uuid.UUID("{aa75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(0),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }

        uuid_server1 = MySQLServer.discover_uuid(self.__options_1["address"])
        self.__options_1["uuid"] = _uuid.UUID(uuid_server1)
        self.__server_1 = MySQLServer(**self.__options_1)
        MySQLServer.add(self.__server_1)

        self.__group_1 = Group("GROUPID1", "First description.")
        Group.add(self.__group_1)
        self.__group_1.add_server(self.__server_1)
        tests.utils.configure_decoupled_master(self.__group_1, self.__server_1)

        self.__options_2 = {
            "uuid": _uuid.UUID("{aa45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(1),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }

        uuid_server2 = MySQLServer.discover_uuid(self.__options_2["address"])
        self.__options_2["uuid"] = _uuid.UUID(uuid_server2)
        self.__server_2 = MySQLServer(**self.__options_2)
        MySQLServer.add(self.__server_2)

        self.__group_2 = Group("GROUPID2", "Second description.")
        Group.add(self.__group_2)
        self.__group_2.add_server(self.__server_2)
        tests.utils.configure_decoupled_master(self.__group_2, self.__server_2)

        self.__options_3 = {
            "uuid": _uuid.UUID("{bb75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(2),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }

        uuid_server3 = MySQLServer.discover_uuid(self.__options_3["address"])
        self.__options_3["uuid"] = _uuid.UUID(uuid_server3)
        self.__server_3 = MySQLServer(**self.__options_3)
        MySQLServer.add(self.__server_3)

        self.__group_3 = Group("GROUPID3", "Third description.")
        Group.add(self.__group_3)
        self.__group_3.add_server(self.__server_3)
        tests.utils.configure_decoupled_master(self.__group_3, self.__server_3)

        self.__options_4 = {
            "uuid": _uuid.UUID("{bb45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(3),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }

        uuid_server4 = MySQLServer.discover_uuid(self.__options_4["address"])
        self.__options_4["uuid"] = _uuid.UUID(uuid_server4)
        self.__server_4 = MySQLServer(**self.__options_4)
        MySQLServer.add(self.__server_4)

        self.__group_4 = Group("GROUPID4", "Fourth description.")
        Group.add(self.__group_4)
        self.__group_4.add_server(self.__server_4)
        tests.utils.configure_decoupled_master(self.__group_4, self.__server_4)

        self.__server_4.connect()
        self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db2")
        self.__server_4.exec_stmt("CREATE DATABASE db2")
        self.__server_4.exec_stmt("CREATE TABLE db2.t2"
                                  "(userID2 INT, name VARCHAR(30))")
        for i in range(1, 201):
            self.__server_4.exec_stmt("INSERT INTO db2.t2 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))

        self.__options_5 = {
            "uuid": _uuid.UUID("{cc75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(4),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }

        uuid_server5 = MySQLServer.discover_uuid(self.__options_5["address"])
        self.__options_5["uuid"] = _uuid.UUID(uuid_server5)
        self.__server_5 = MySQLServer(**self.__options_5)
        MySQLServer.add(self.__server_5)

        self.__group_5 = Group("GROUPID5", "Fifth description.")
        Group.add(self.__group_5)
        self.__group_5.add_server(self.__server_5)
        tests.utils.configure_decoupled_master(self.__group_5, self.__server_5)

        self.__server_5.connect()
        self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db2")
        self.__server_5.exec_stmt("CREATE DATABASE db2")
        self.__server_5.exec_stmt("CREATE TABLE db2.t2"
                                  "(userID2 INT, name VARCHAR(30))")
        for i in range(1, 201):
            self.__server_5.exec_stmt("INSERT INTO db2.t2 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))

        self.__options_6 = {
            "uuid": _uuid.UUID("{cc45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(5),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }

        uuid_server6 = MySQLServer.discover_uuid(self.__options_6["address"])
        self.__options_6["uuid"] = _uuid.UUID(uuid_server6)
        self.__server_6 = MySQLServer(**self.__options_6)
        MySQLServer.add(self.__server_6)

        self.__group_6 = Group("GROUPID6", "Sixth description.")
        Group.add(self.__group_6)
        self.__group_6.add_server(self.__server_6)
        tests.utils.configure_decoupled_master(self.__group_6, self.__server_6)

        status = self.proxy.sharding.create_definition("HASH", "GROUPID1")
        self.check_xmlrpc_command_result(status, returns=1)

        status = self.proxy.sharding.create_definition("RANGE", "GROUPID1")
        self.check_xmlrpc_command_result(status, returns=2)

        status = self.proxy.sharding.create_definition("HASH", "GROUPID1")
        self.check_xmlrpc_command_result(status, returns=3)

        status = self.proxy.sharding.add_table(1, "db1.t1", "userID1")
        self.check_xmlrpc_command_result(status)

        status = self.proxy.sharding.add_table(2, "db2.t2", "userID2")
        self.check_xmlrpc_command_result(status)

        status = self.proxy.sharding.add_table(3, "db3.t3", "userID3")
        self.check_xmlrpc_command_result(status)

        status = self.proxy.sharding.add_shard(1, "GROUPID2,GROUPID3",
                                               "ENABLED")
        self.check_xmlrpc_command_result(status)

        status = self.proxy.sharding.add_shard(2, "GROUPID4/0,GROUPID5/101",
                                               "ENABLED")
        self.check_xmlrpc_command_result(status)

        status = self.proxy.sharding.add_shard(3, "GROUPID6", "ENABLED")
        self.check_xmlrpc_command_result(status)

    def test_list_shard_defns(self):
        expected_shard_mapping_list1 = tests.utils.make_mapping_result(
            [[1, "HASH", "GROUPID1"], [2, "RANGE", "GROUPID1"],
             [3, "HASH", "GROUPID1"]])

        status = self.proxy.sharding.list_definitions()
        self.check_xmlrpc_result(status, expected_shard_mapping_list1)

    def test_list_shard_mappings(self):
        expected_hash = tests.utils.make_shard_mapping_list_result([
            [1, 'HASH', "db1.t1", "GROUPID1", "userID1"],
            [3, 'HASH', "db3.t3", "GROUPID1", "userID3"],
        ])
        status = self.proxy.sharding.list_tables("HASH")
        self.check_xmlrpc_result(status, expected_hash)

        expected_range = tests.utils.make_shard_mapping_list_result([
            [2, "RANGE", "db2.t2", "GROUPID1", "userID2"],
        ])
        status = self.proxy.sharding.list_tables("RANGE")
        self.check_xmlrpc_result(status, expected_range)

    def test_shard_prune(self):
        status = self.proxy.sharding.prune_shard("db2.t2")
        self.check_xmlrpc_command_result(status)

        status = self.proxy.sharding.lookup_servers("db2.t2", 1, "LOCAL")
        info = self.check_xmlrpc_simple(status, {})
        shard_uuid = info['server_uuid']
        shard_server = fetch_test_server(shard_uuid)
        shard_server.connect()
        rows = shard_server.exec_stmt("SELECT COUNT(*) FROM db2.t2",
                                      {"fetch": True})
        self.assertTrue(int(rows[0][0]) == 100)
        rows = shard_server.exec_stmt("SELECT MAX(userID2) FROM db2.t2",
                                      {"fetch": True})
        self.assertTrue(int(rows[0][0]) == 100)
        rows = shard_server.exec_stmt("SELECT MIN(userID2) FROM db2.t2",
                                      {"fetch": True})
        self.assertTrue(int(rows[0][0]) == 1)

        status = self.proxy.sharding.lookup_servers("db2.t2", 101, "LOCAL")
        info = self.check_xmlrpc_simple(status, {})
        shard_uuid = info['server_uuid']
        shard_server = fetch_test_server(shard_uuid)
        shard_server.connect()
        rows = shard_server.exec_stmt("SELECT COUNT(*) FROM db2.t2",
                                      {"fetch": True})
        self.assertTrue(int(rows[0][0]) == 100)
        rows = shard_server.exec_stmt("SELECT MAX(userID2) FROM db2.t2",
                                      {"fetch": True})
        self.assertTrue(int(rows[0][0]) == 200)
        rows = shard_server.exec_stmt("SELECT MIN(userID2) FROM db2.t2",
                                      {"fetch": True})
        self.assertTrue(int(rows[0][0]) == 101)

    def tearDown(self):
        """Clean up the existing environment
        """
        tests.utils.cleanup_environment()
Пример #35
0
    def test_properties(self):
        """Test group's properties.
        """
        group_1 = Group("mysql.com")
        Group.add(group_1)
        fetched_group_1 = Group.fetch(group_1.group_id)
        self.assertEqual(group_1.group_id, "mysql.com")
        self.assertEqual(fetched_group_1.group_id, "mysql.com")
        self.assertEqual(fetched_group_1.master_defined, None)

        group_2 = Group("oracle.com", "First description.")
        Group.add(group_2)
        fetched_group_2 = Group.fetch(group_2.group_id)
        self.assertEqual(group_2.group_id, "oracle.com")
        self.assertEqual(fetched_group_2.group_id, "oracle.com")

        group_1.description = "New description."
        fetched_group_1 = Group.fetch(group_1.group_id)
        self.assertEqual(group_1.description, "New description.")
        self.assertEqual(fetched_group_1.description, "New description.")

        group_1.description = None
        fetched_group_1 = Group.fetch(group_1.group_id)
        self.assertEqual(group_1.description, None)
        self.assertEqual(fetched_group_1.description, None)

        group_1.status = Group.INACTIVE
        fetched_group_1 = Group.fetch(group_1.group_id)
        self.assertEqual(group_1.status, Group.INACTIVE)
        self.assertEqual(fetched_group_1.status, Group.INACTIVE)

        self.assertEqual(group_1, fetched_group_1)
        self.assertEqual(group_2, fetched_group_2)
        self.assertNotEqual(group_1, group_2)

        set_of_groups = set()
        set_of_groups.add(group_1)
        set_of_groups.add(group_2)
        set_of_groups.add(fetched_group_1)
        set_of_groups.add(fetched_group_2)
        self.assertEqual(len(set_of_groups), 2)
Пример #36
0
    def _run(self):
        """Function that verifies servers' availabilities.
        """
        from mysql.fabric.server import (
            Group,
            MySQLServer,
            ConnectionManager,
        )

        ignored_status = [MySQLServer.FAULTY]
        quarantine = {}
        interval = FailureDetector._DETECTION_INTERVAL
        detections = FailureDetector._DETECTIONS
        detection_timeout = FailureDetector._DETECTION_TIMEOUT
        connection_manager = ConnectionManager()
        slave_deep_checks = FailureDetector._SLAVE_DEEP_CHECKS

        _persistence.init_thread()

        while self.__check:
            try:
                unreachable = set()
                group = Group.fetch(self.__group_id)
                if group is not None:
                    for server in group.servers():
                        if server.status in ignored_status:

                            ### Server is FAULTY
                            connection_manager.kill_connections(server)
                            continue
                        else:
                            ### Server is Not FAULTY
                            if MySQLServer.is_alive(server, detection_timeout):

                                ### Server is alive
                                ### check depends on `slave_deep_checks` parameter
                                if slave_deep_checks:

                                    ### When server is alive and status != FAULTY
                                    is_master= (group.master == server.uuid)
                                    if not is_master:
                                        ### Checking master is dead or alive.
                                        master_server = MySQLServer.fetch(group.master)
    
                                        if MySQLServer.is_alive(master_server, detection_timeout):
    
                                            ### Checking is replication valid or not if master is alive.
                                            server.connect()
                                            slave_issues, why_slave_issues = \
                                                _replication.check_slave_issues(server)
                                            if slave_issues:
        
                                                if (why_slave_issues['io_error'] and \
                                                    why_slave_issues['io_errno'] == 2003):
        
                                                    ### Nothing to do during reconnecting, just logging
                                                    _LOGGER.info(why_slave_issues)
        
                                                else:
                                                        
                                                    ### If slave threads are not running, set status to SPARE
                                                    server.status = MySQLServer.SPARE
        
                                            ### Done slave_issues.
                                            server.disconnect()
    
                                        ### Endif MySQLServer.is_alive(master_server, detection_timeout)
                                    ### Endif not is_master
                                ### Endif slave_deep_checks
                                continue
                            ### Else MySQLServer.is_alive(server, detection_timeout)
                            else:

                                unreachable.add(server.uuid)

                                _LOGGER.warning(
                                    "Server (%s) in group (%s) is unreachable.",
                                    server.uuid, self.__group_id
                                )
        
                                unstable = False
                                failed_attempts = 0
                                if server.uuid not in quarantine:
                                    quarantine[server.uuid] = failed_attempts = 1
                                else:
                                    failed_attempts = quarantine[server.uuid] + 1
                                    quarantine[server.uuid] = failed_attempts
                                if failed_attempts >= detections:
                                    unstable = True
        
                                can_set_faulty = group.can_set_server_faulty(
                                    server, get_time()
                                )
                                if unstable and can_set_faulty:
                                    # We have to make this transactional and make the
                                    # failover (i.e. report failure) robust to failures.
                                    # Otherwise, a master might be set to faulty and
                                    # a new one never promoted.
                                    server.status = MySQLServer.FAULTY
                                    connection_manager.kill_connections(server)
                                    
                                    procedures = trigger("REPORT_FAILURE", None,
                                        str(server.uuid),
                                        threading.current_thread().name,
                                        MySQLServer.FAULTY, False
                                    )
                                    executor = _executor.Executor()
                                    for procedure in procedures:
                                        executor.wait_for_procedure(procedure)

                            ### Endif MySQLServer.is_alive(server, detection_timeout)
                        ### Endif server.status in ignored_status
                    ### End for server in group.servers()
                ### Endif group is not None
                for uuid in quarantine.keys():
                    if uuid not in unreachable:
                        del quarantine[uuid]

            except (_errors.ExecutorError, _errors.DatabaseError):
                pass
            except Exception as error:
                _LOGGER.exception(error)

            time.sleep(interval)

        _persistence.deinit_thread()
Пример #37
0
class TestHashMoveGlobal(tests.utils.TestCase):
    """Contains unit tests for testing the shard move operation and for
    verifying that the global server configuration remains constant after
    the shard move configuration.
    """
    def setUp(self):
        """Configure the existing environment
        """
        tests.utils.cleanup_environment()
        self.manager, self.proxy = tests.utils.setup_xmlrpc()

        self.__options_1 = {
            "uuid" :  _uuid.UUID("{aa75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(0),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server1 = MySQLServer.discover_uuid(self.__options_1["address"])
        self.__options_1["uuid"] = _uuid.UUID(uuid_server1)
        self.__server_1 = MySQLServer(**self.__options_1)
        MySQLServer.add(self.__server_1)
        self.__server_1.connect()

        self.__group_1 = Group("GROUPID1", "First description.")
        Group.add(self.__group_1)
        self.__group_1.add_server(self.__server_1)
        tests.utils.configure_decoupled_master(self.__group_1, self.__server_1)

        self.__options_2 = {
            "uuid" :  _uuid.UUID("{aa45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(1),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server2 = MySQLServer.discover_uuid(self.__options_2["address"])
        self.__options_2["uuid"] = _uuid.UUID(uuid_server2)
        self.__server_2 = MySQLServer(**self.__options_2)
        MySQLServer.add(self.__server_2)
        self.__server_2.connect()
        self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_2.exec_stmt("CREATE DATABASE db1")
        self.__server_2.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT PRIMARY KEY, name VARCHAR(30))")
        for i in range(1, 1001):
            self.__server_2.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES(%s, 'TEST %s')" % (i, i))
        self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db2")
        self.__server_2.exec_stmt("CREATE DATABASE db2")
        self.__server_2.exec_stmt("CREATE TABLE db2.t2"
                                  "(userID INT, salary INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 1001):
            self.__server_2.exec_stmt("INSERT INTO db2.t2 "
                                  "VALUES(%s, %s)" % (i, i))

        self.__group_2 = Group("GROUPID2", "Second description.")
        Group.add(self.__group_2)
        self.__group_2.add_server(self.__server_2)
        tests.utils.configure_decoupled_master(self.__group_2, self.__server_2)

        self.__options_3 = {
            "uuid" :  _uuid.UUID("{bb75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(2),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server3 = MySQLServer.discover_uuid(self.__options_3["address"])
        self.__options_3["uuid"] = _uuid.UUID(uuid_server3)
        self.__server_3 = MySQLServer(**self.__options_3)
        MySQLServer.add( self.__server_3)
        self.__server_3.connect()
        self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_3.exec_stmt("CREATE DATABASE db1")
        self.__server_3.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT PRIMARY KEY, name VARCHAR(30))")
        for i in range(1, 1001):
            self.__server_3.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES(%s, 'TEST %s')" % (i, i))
        self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db2")
        self.__server_3.exec_stmt("CREATE DATABASE db2")
        self.__server_3.exec_stmt("CREATE TABLE db2.t2"
                                  "(userID INT, salary INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 1001):
            self.__server_3.exec_stmt("INSERT INTO db2.t2 "
                                  "VALUES(%s, %s)" % (i, i))

        self.__group_3 = Group("GROUPID3", "Third description.")
        Group.add( self.__group_3)
        self.__group_3.add_server(self.__server_3)
        tests.utils.configure_decoupled_master(self.__group_3, self.__server_3)

        self.__options_4 = {
            "uuid" :  _uuid.UUID("{bb45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(3),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server4 = MySQLServer.discover_uuid(self.__options_4["address"])
        self.__options_4["uuid"] = _uuid.UUID(uuid_server4)
        self.__server_4 = MySQLServer(**self.__options_4)
        MySQLServer.add(self.__server_4)
        self.__server_4.connect()
        self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_4.exec_stmt("CREATE DATABASE db1")
        self.__server_4.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT PRIMARY KEY, name VARCHAR(30))")
        for i in range(1, 1001):
            self.__server_4.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES(%s, 'TEST %s')" % (i, i))
        self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db2")
        self.__server_4.exec_stmt("CREATE DATABASE db2")
        self.__server_4.exec_stmt("CREATE TABLE db2.t2"
                                  "(userID INT, salary INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 1001):
            self.__server_4.exec_stmt("INSERT INTO db2.t2 "
                                  "VALUES(%s, %s)" % (i, i))

        self.__group_4 = Group("GROUPID4", "Fourth description.")
        Group.add( self.__group_4)
        self.__group_4.add_server(self.__server_4)
        tests.utils.configure_decoupled_master(self.__group_4, self.__server_4)

        self.__options_5 = {
            "uuid" :  _uuid.UUID("{cc75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(4),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server5 = MySQLServer.discover_uuid(self.__options_5["address"])
        self.__options_5["uuid"] = _uuid.UUID(uuid_server5)
        self.__server_5 = MySQLServer(**self.__options_5)
        MySQLServer.add(self.__server_5)
        self.__server_5.connect()
        self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_5.exec_stmt("CREATE DATABASE db1")
        self.__server_5.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT PRIMARY KEY, name VARCHAR(30))")
        for i in range(1, 1001):
            self.__server_5.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES(%s, 'TEST %s')" % (i, i))
        self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db2")
        self.__server_5.exec_stmt("CREATE DATABASE db2")
        self.__server_5.exec_stmt("CREATE TABLE db2.t2"
                                  "(userID INT, salary INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 1001):
            self.__server_5.exec_stmt("INSERT INTO db2.t2 "
                                  "VALUES(%s, %s)" % (i, i))

        self.__group_5 = Group("GROUPID5", "Fifth description.")
        Group.add( self.__group_5)
        self.__group_5.add_server(self.__server_5)
        tests.utils.configure_decoupled_master(self.__group_5, self.__server_5)

        self.__options_6 = {
            "uuid" :  _uuid.UUID("{cc45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(5),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server6 = MySQLServer.discover_uuid(self.__options_6["address"])
        self.__options_6["uuid"] = _uuid.UUID(uuid_server6)
        self.__server_6 = MySQLServer(**self.__options_6)
        MySQLServer.add(self.__server_6)

        self.__group_6 = Group("GROUPID6", "Sixth description.")
        Group.add( self.__group_6)
        self.__group_6.add_server(self.__server_6)
        tests.utils.configure_decoupled_master(self.__group_6, self.__server_6)

        status = self.proxy.sharding.create_definition("HASH", "GROUPID1")
        self.check_xmlrpc_command_result(status, returns=1)

        status = self.proxy.sharding.add_table(1, "db1.t1", "userID")
        self.check_xmlrpc_command_result(status)

        status = self.proxy.sharding.add_table(1, "db2.t2", "userID")
        self.check_xmlrpc_command_result(status)

        status = self.proxy.sharding.add_shard(
            1,
            "GROUPID2,GROUPID3,GROUPID4,GROUPID5",
            "ENABLED"
        )
        self.check_xmlrpc_command_result(status)

        status = self.proxy.sharding.prune_shard("db1.t1")
        self.check_xmlrpc_command_result(status)

    def test_move_shard_1(self):
        '''Test the move of shard 1 and the global server configuration
        after that. The test moves shard 1 from GROUPID2 to GROUPID6.
        After the move is done, it verifies the count on GROUPID6 to check
        that the group has all the tuples from the earlier group. Now it fires
        an INSERT on the global group and verifies that all the shards have got
        the inserted tuples. GROUPID2 should not have received the values
        since it has had the shard moved away from it.
        '''
        row_cnt_shard_before_move_db1_t1 = self.__server_2.exec_stmt(
                    "SELECT COUNT(*) FROM db1.t1",
                    {"fetch" : True}
                )
        row_cnt_shard_before_move_db2_t2 = self.__server_2.exec_stmt(
                    "SELECT COUNT(*) FROM db2.t2",
                    {"fetch" : True}
                )
        row_cnt_shard_before_move_db1_t1 = \
            int(row_cnt_shard_before_move_db1_t1[0][0])
        row_cnt_shard_before_move_db2_t2 = \
            int(row_cnt_shard_before_move_db2_t2[0][0])
        status = self.proxy.sharding.move_shard("1", "GROUPID6")
        self.check_xmlrpc_command_result(status)
        self.__server_6.connect()
        row_cnt_shard_after_move_db1_t1 = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM db1.t1",
                    {"fetch" : True}
                )
        row_cnt_shard_after_move_db2_t2 = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM db2.t2",
                    {"fetch" : True}
                )
        row_cnt_shard_after_move_db1_t1 = \
            int(row_cnt_shard_after_move_db1_t1[0][0])
        row_cnt_shard_after_move_db2_t2 = \
            int(row_cnt_shard_after_move_db2_t2[0][0])
        self.assertTrue(
            row_cnt_shard_before_move_db1_t1 == row_cnt_shard_after_move_db1_t1
        )
        self.assertTrue(
            row_cnt_shard_before_move_db2_t2 == row_cnt_shard_after_move_db2_t2
        )
        #Enter data into the global server
        self.__server_1.exec_stmt("DROP DATABASE IF EXISTS global")
        self.__server_1.exec_stmt("CREATE DATABASE global")
        self.__server_1.exec_stmt("CREATE TABLE global.global_table"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 11):
            self.__server_1.exec_stmt("INSERT INTO global.global_table "
                                  "VALUES(%s, 'TEST %s')" % (i, i))
        time.sleep(5)

        try:
            #Verify that the data is not there in the first shard.
            global_table_count = self.__server_2.exec_stmt(
                        "SELECT COUNT(*) FROM global.global_table",
                        {"fetch" : True}
                    )
            raise Exception("Server should not be connected to global server")
        except _errors.DatabaseError:
            pass

        #Verify that the data is there in the second shard.
        global_table_count = self.__server_3.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the third shard.
        global_table_count = self.__server_4.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the fourth shard.
        global_table_count = self.__server_5.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the fifth shard.
        global_table_count = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)

    def test_move_shard_2(self):
        '''Test the move of shard 2 and the global server configuration
        after that.  The test moves shard 2 from GROUPID3 to GROUPID6.
        After the move is done, it verifies the count on GROUPID6 to check
        that the group has all the tuples from the earlier group. Now it fires
        an INSERT on the global group and verifies that all the shards have got
        the inserted tuples. GROUPID3 should not have received the values
        since it has had the shard moved away from it.
        '''
        row_cnt_shard_before_move_db1_t1 = self.__server_3.exec_stmt(
                    "SELECT COUNT(*) FROM db1.t1",
                    {"fetch" : True}
                )
        row_cnt_shard_before_move_db2_t2 = self.__server_3.exec_stmt(
                    "SELECT COUNT(*) FROM db2.t2",
                    {"fetch" : True}
                )
        row_cnt_shard_before_move_db1_t1 = \
            int(row_cnt_shard_before_move_db1_t1[0][0])
        row_cnt_shard_before_move_db2_t2 = \
            int(row_cnt_shard_before_move_db2_t2[0][0])
        status = self.proxy.sharding.move_shard("2", "GROUPID6")
        self.check_xmlrpc_command_result(status)
        self.__server_6.connect()
        row_cnt_shard_after_move_db1_t1 = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM db1.t1",
                    {"fetch" : True}
                )
        row_cnt_shard_after_move_db2_t2 = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM db2.t2",
                    {"fetch" : True}
                )
        row_cnt_shard_after_move_db1_t1 = \
            int(row_cnt_shard_after_move_db1_t1[0][0])
        row_cnt_shard_after_move_db2_t2 = \
            int(row_cnt_shard_after_move_db2_t2[0][0])
        self.assertTrue(
            row_cnt_shard_before_move_db1_t1 == row_cnt_shard_after_move_db1_t1
        )
        self.assertTrue(
            row_cnt_shard_before_move_db2_t2 == row_cnt_shard_after_move_db2_t2
        )
        #Enter data into the global server
        self.__server_1.exec_stmt("DROP DATABASE IF EXISTS global")
        self.__server_1.exec_stmt("CREATE DATABASE global")
        self.__server_1.exec_stmt("CREATE TABLE global.global_table"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 11):
            self.__server_1.exec_stmt("INSERT INTO global.global_table "
                                  "VALUES(%s, 'TEST %s')" % (i, i))
        time.sleep(5)

        try:
            #Verify that the data is not there in the second shard.
            global_table_count = self.__server_3.exec_stmt(
                        "SELECT COUNT(*) FROM global.global_table",
                        {"fetch" : True}
                    )
            raise Exception("Server should not be connected to global server")
        except _errors.DatabaseError:
            pass

        #Verify that the data is there in the first shard.
        global_table_count = self.__server_2.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the third shard.
        global_table_count = self.__server_4.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the fourth shard.
        global_table_count = self.__server_5.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the fifth shard.
        global_table_count = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)

    def test_move_shard_3(self):
        '''Test the move of shard 3 and the global server configuration
        after that. The test moves shard 3 from GROUPID4 to GROUPID6.
        After the move is done, it verifies the count on GROUPID6 to check
        that the group has all the tuples from the earlier group. Now it fires
        an INSERT on the global group and verifies that all the shards have got
        the inserted tuples. GROUPID4 should not have received the values
        since it has had the shard moved away from it.
        '''
        row_cnt_shard_before_move_db1_t1 = self.__server_4.exec_stmt(
                    "SELECT COUNT(*) FROM db1.t1",
                    {"fetch" : True}
                )
        row_cnt_shard_before_move_db2_t2 = self.__server_4.exec_stmt(
                    "SELECT COUNT(*) FROM db2.t2",
                    {"fetch" : True}
                )
        row_cnt_shard_before_move_db1_t1 = \
            int(row_cnt_shard_before_move_db1_t1[0][0])
        row_cnt_shard_before_move_db2_t2 = \
            int(row_cnt_shard_before_move_db2_t2[0][0])
        status = self.proxy.sharding.move_shard("3", "GROUPID6")
        self.check_xmlrpc_command_result(status)
        self.__server_6.connect()
        row_cnt_shard_after_move_db1_t1 = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM db1.t1",
                    {"fetch" : True}
                )
        row_cnt_shard_after_move_db2_t2 = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM db2.t2",
                    {"fetch" : True}
                )
        row_cnt_shard_after_move_db1_t1 = \
            int(row_cnt_shard_after_move_db1_t1[0][0])
        row_cnt_shard_after_move_db2_t2 = \
            int(row_cnt_shard_after_move_db2_t2[0][0])
        self.assertTrue(
            row_cnt_shard_before_move_db1_t1 == row_cnt_shard_after_move_db1_t1
        )
        self.assertTrue(
            row_cnt_shard_before_move_db2_t2 == row_cnt_shard_after_move_db2_t2
        )
        #Enter data into the global server
        self.__server_1.exec_stmt("DROP DATABASE IF EXISTS global")
        self.__server_1.exec_stmt("CREATE DATABASE global")
        self.__server_1.exec_stmt("CREATE TABLE global.global_table"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 11):
            self.__server_1.exec_stmt("INSERT INTO global.global_table "
                                  "VALUES(%s, 'TEST %s')" % (i, i))
        time.sleep(5)
        try:
            #Verify that the data is not there in the third shard's
            #original group.
            global_table_count = self.__server_4.exec_stmt(
                        "SELECT COUNT(*) FROM global.global_table",
                        {"fetch" : True}
                    )
            raise Exception("Server should not be connected to global server")
        except _errors.DatabaseError:
            pass
        #Verify that the data is there in the first shard.
        global_table_count = self.__server_2.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the second shard.
        global_table_count = self.__server_3.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the fourth shard.
        global_table_count = self.__server_5.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the third shard
        #new group.
        global_table_count = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)

    def test_move_shard_4(self):
        '''Test the move of shard 4 and the global server configuration
        after that. The test moves shard 4 from GROUPID5 to GROUPID6.
        After the move is done, it verifies the count on GROUPID6 to check
        that the group has all the tuples from the earlier group. Now it fires
        an INSERT on the global group and verifies that all the shards have got
        the inserted tuples. GROUPID5 should not have received the values
        since it has had the shard moved away from it.
        '''
        row_cnt_shard_before_move_db1_t1 = self.__server_5.exec_stmt(
                    "SELECT COUNT(*) FROM db1.t1",
                    {"fetch" : True}
                )
        row_cnt_shard_before_move_db2_t2 = self.__server_5.exec_stmt(
                    "SELECT COUNT(*) FROM db2.t2",
                    {"fetch" : True}
                )
        row_cnt_shard_before_move_db1_t1 = \
            int(row_cnt_shard_before_move_db1_t1[0][0])
        row_cnt_shard_before_move_db2_t2 = \
            int(row_cnt_shard_before_move_db2_t2[0][0])
        status = self.proxy.sharding.move_shard("4", "GROUPID6")
        self.check_xmlrpc_command_result(status)
        self.__server_6.connect()
        row_cnt_shard_after_move_db1_t1 = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM db1.t1",
                    {"fetch" : True}
                )
        row_cnt_shard_after_move_db2_t2 = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM db2.t2",
                    {"fetch" : True}
                )
        row_cnt_shard_after_move_db1_t1 = \
            int(row_cnt_shard_after_move_db1_t1[0][0])
        row_cnt_shard_after_move_db2_t2 = \
            int(row_cnt_shard_after_move_db2_t2[0][0])
        self.assertTrue(
            row_cnt_shard_before_move_db1_t1 == row_cnt_shard_after_move_db1_t1
        )
        self.assertTrue(
            row_cnt_shard_before_move_db2_t2 == row_cnt_shard_after_move_db2_t2
        )
        #Enter data into the global server
        self.__server_1.exec_stmt("DROP DATABASE IF EXISTS global")
        self.__server_1.exec_stmt("CREATE DATABASE global")
        self.__server_1.exec_stmt("CREATE TABLE global.global_table"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 11):
            self.__server_1.exec_stmt("INSERT INTO global.global_table "
                                  "VALUES(%s, 'TEST %s')" % (i, i))
        time.sleep(5)
        try:
            #Verify that the data is not there in the fourth shard's
            #original group.
            global_table_count = self.__server_5.exec_stmt(
                        "SELECT COUNT(*) FROM global.global_table",
                        {"fetch" : True}
                    )
            raise Exception("Server should not be connected to global server")
        except _errors.DatabaseError:
            pass
        #Verify that the data is there in the first shard.
        global_table_count = self.__server_2.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the second shard.
        global_table_count = self.__server_3.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the third shard.
        global_table_count = self.__server_4.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the fourth shard's new
        #group.
        global_table_count = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)

    def tearDown(self):
        """Clean up the existing environment
        """
        tests.utils.cleanup_environment()
Пример #38
0
def _check_shard_information(shard_id, destn_group_id, mysqldump_binary,
                             mysqlclient_binary, split_value, config_file, prune_limit, cmd,
                             update_only):
    """Verify the sharding information before starting a re-sharding operation.

    :param shard_id: The destination shard ID.
    :param destn_group_id: The Destination group ID.
    :param mysqldump_binary: The path to the mysqldump binary.
    :param mysqlclient_binary: The path to the mysqlclient binary.
    :param split_value: The point at which the sharding definition
                        should be split.
    :param config_file: The complete path to the fabric configuration
                        file.
    :param prune_limit: The number of DELETEs that should be
                        done in one batch.
    :param cmd: Indicates if it is a split or a move being executed.
    :param update_only: If the operation is a update only operation.
    """
    if not _services_utils.is_valid_binary(mysqldump_binary):
        raise _errors.ShardingError(
                _services_sharding.MYSQLDUMP_NOT_FOUND % mysqldump_binary)

    if not _services_utils.is_valid_binary(mysqlclient_binary):
        raise _errors.ShardingError(
                _services_sharding.MYSQLCLIENT_NOT_FOUND % mysqlclient_binary)

    if cmd == "SPLIT":
        range_sharding_spec, _, shard_mappings, _ = \
            _services_sharding.verify_and_fetch_shard(shard_id)
        upper_bound = \
            SHARDING_SPECIFICATION_HANDLER[shard_mappings[0].type_name].\
                        get_upper_bound(
                            range_sharding_spec.lower_bound,
                            range_sharding_spec.shard_mapping_id,
                            shard_mappings[0].type_name
                          )
        #If the underlying sharding scheme is a HASH. When a shard is split,
        #all the tables that are part of the shard, have the same sharding
        #scheme. All the shard mappings associated with this shard_id will be
        #of the same sharding type. Hence it is safe to use one of the shard
        #mappings.
        if shard_mappings[0].type_name == "HASH":
            if split_value is not None:
                raise _errors.ShardingError(
                    _services_sharding.NO_LOWER_BOUND_FOR_HASH_SHARDING
                )
            if  upper_bound is None:
                #While splitting a range, retrieve the next upper bound and
                #find the mid-point, in the case where the next upper_bound
                #is unavailable pick the maximum value in the set of values in
                #the shard.
                upper_bound = HashShardingSpecification.fetch_max_key(shard_id)

            #Calculate the split value.
            split_value = \
                SHARDING_DATATYPE_HANDLER[shard_mappings[0].type_name].\
                split_value(
                    range_sharding_spec.lower_bound,
                    upper_bound
                )
        elif split_value is not None:
            if not (SHARDING_DATATYPE_HANDLER[shard_mappings[0].type_name].\
                    is_valid_split_value(
                        split_value, range_sharding_spec.lower_bound,
                        upper_bound
                    )
                ):
                raise _errors.ShardingError(
                    _services_sharding.INVALID_LOWER_BOUND_VALUE %
                    (split_value, )
                )
        elif split_value is None:
            raise _errors.ShardingError(
                _services_sharding.SPLIT_VALUE_NOT_DEFINED
            )

    #Ensure that the group does not already contain a shard.
    if Shards.lookup_shard_id(destn_group_id) is not None:
        raise _errors.ShardingError(
            _services_sharding.SHARD_MOVE_DESTINATION_NOT_EMPTY %
            (destn_group_id, )
        )

    #Fetch the group information for the source shard that
    #needs to be moved.
    source_shard = Shards.fetch(shard_id)
    if source_shard is None:
        raise _errors.ShardingError(
            _services_sharding.SHARD_NOT_FOUND % (shard_id, ))

    #Fetch the group_id and the group that hosts the source shard.
    source_group_id = source_shard.group_id

    destn_group = Group.fetch(destn_group_id)
    if destn_group is None:
        raise _errors.ShardingError(
            _services_sharding.SHARD_GROUP_NOT_FOUND %
            (destn_group_id, ))

    if not update_only:
        _events.trigger_within_procedure(
            BACKUP_SOURCE_SHARD, shard_id, source_group_id, destn_group_id,
            mysqldump_binary, mysqlclient_binary, split_value, config_file,
            prune_limit, cmd, update_only
        )
    else:
        _events.trigger_within_procedure(
            SETUP_RESHARDING_SWITCH, shard_id, source_group_id, destn_group_id,
            split_value, prune_limit, cmd, update_only
        )
Пример #39
0
def _setup_shard_switch_split(shard_id, source_group_id, destination_group_id,
                              split_value, prune_limit, cmd, update_only):
    """Setup the moved shard to map to the new group.

    :param shard_id: The shard ID of the shard that needs to be moved.
    :param source_group_id: The group_id of the source shard.
    :param destn_group_id: The ID of the group to which the shard needs to
                           be moved.
    :param split_value: Indicates the value at which the range for the
                        particular shard will be split. Will be set only
                        for shard split operations.
    :param prune_limit: The number of DELETEs that should be
                        done in one batch.
    :param cmd: Indicates the type of re-sharding operation.
    :update_only: Only update the state store and skip provisioning.
    """
    #Fetch the Range sharding specification.
    range_sharding_spec, source_shard, shard_mappings, shard_mapping_defn = \
            _services_sharding.verify_and_fetch_shard(shard_id)

    #Disable the old shard
    source_shard.disable()

    #Remove the old shard.
    range_sharding_spec.remove()
    source_shard.remove()

    destination_group = Group.fetch(destination_group_id)
    if destination_group is None:
        raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND %
                                    (destination_group_id, ))
    destn_group_master = MySQLServer.fetch(destination_group.master)
    if destn_group_master is None:
        raise _errors.ShardingError(
            _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND)
    destn_group_master.connect()

    #Make the destination group as read only to disable updates until the
    #connectors update their caches, thus avoiding inconsistency.
    destn_group_master.read_only = True

    #Add the new shards. Generate new shard IDs for the shard being
    #split and also for the shard that is created as a result of the split.
    new_shard_1 = Shards.add(source_shard.group_id, "DISABLED")
    new_shard_2 = Shards.add(destination_group_id, "DISABLED")

    #Both of the shard mappings associated with this shard_id should
    #be of the same sharding type. Hence it is safe to use one of the
    #shard mappings.
    if shard_mappings[0].type_name == "HASH":
        #In the case of a split involving a HASH sharding scheme,
        #the shard that is split gets a new shard_id, while the split
        #gets the new computed lower_bound and also a new shard id.
        #NOTE: How the shard that is split retains its lower_bound.
        HashShardingSpecification.add_hash_split(
            range_sharding_spec.shard_mapping_id, new_shard_1.shard_id,
            range_sharding_spec.lower_bound)
        HashShardingSpecification.add_hash_split(
            range_sharding_spec.shard_mapping_id, new_shard_2.shard_id,
            split_value)
    else:
        #Add the new ranges. Note that the shard being split retains
        #its lower_bound, while the new shard gets the computed,
        #lower_bound.
        RangeShardingSpecification.add(range_sharding_spec.shard_mapping_id,
                                       range_sharding_spec.lower_bound,
                                       new_shard_1.shard_id)
        RangeShardingSpecification.add(range_sharding_spec.shard_mapping_id,
                                       split_value, new_shard_2.shard_id)

    #The sleep ensures that the connector have refreshed their caches with the
    #new shards that have been added as a result of the split.
    time.sleep(_utils.TTL)

    #The source shard group master would have been marked as read only
    #during the sync. Remove the read_only flag.
    source_group = Group.fetch(source_group_id)
    if source_group is None:
        raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND %
                                    (source_group_id, ))

    source_group_master = MySQLServer.fetch(source_group.master)
    if source_group_master is None:
        raise _errors.ShardingError(
            _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND)
    source_group_master.connect()

    #Kill all the existing connections on the servers
    source_group.kill_connections_on_servers()

    #Allow connections on the source group master
    source_group_master.read_only = False

    #Allow connections on the destination group master
    destn_group_master.read_only = False

    #Setup replication for the new group from the global server
    _group_replication.setup_group_replication \
            (shard_mapping_defn[2], destination_group_id)

    #Enable the split shards
    new_shard_1.enable()
    new_shard_2.enable()

    #Trigger changing the mappings for the shard that was copied
    if not update_only:
        _events.trigger_within_procedure(PRUNE_SHARDS, new_shard_1.shard_id,
                                         new_shard_2.shard_id, prune_limit)
Пример #40
0
def setup_group_replication(group_master_id,  group_slave_id):
    """Sets up replication between the masters of the two groups and
    updates the references to the groups in each other.

    :param group_master_id: The group whose master will act as the master
                                             in the replication setup.
    :param group_slave_id: The group whose master will act as the slave in the
                                      replication setup.
    """
    group_master = Group.fetch(group_master_id)
    group_slave = Group.fetch(group_slave_id)

    if group_master is None:
        raise _errors.GroupError \
        (GROUP_REPLICATION_GROUP_NOT_FOUND_ERROR % (group_master_id, ))

    if group_slave is None:
        raise _errors.GroupError \
        (GROUP_REPLICATION_GROUP_NOT_FOUND_ERROR % (group_slave_id, ))

    if group_master.master is None:
        raise _errors.GroupError \
        (GROUP_REPLICATION_GROUP_MASTER_NOT_FOUND_ERROR % "")

    if group_slave.master is None:
        raise _errors.GroupError \
        (GROUP_REPLICATION_GROUP_MASTER_NOT_FOUND_ERROR % "")

    #Master is the master of the Global Group. We replicate from here to
    #the masters of all the shard Groups.
    master = MySQLServer.fetch(group_master.master)
    if master is None:
        raise _errors.GroupError \
        (GROUP_REPLICATION_GROUP_MASTER_NOT_FOUND_ERROR % \
        (group_master.master, ))

    #Get the master of the shard Group.
    slave = MySQLServer.fetch(group_slave.master)
    if slave is None:
        raise _errors.GroupError \
        (GROUP_REPLICATION_GROUP_MASTER_NOT_FOUND_ERROR % \
        (group_slave.master, ))

    if not server_running(master):
        #The server is already down. We cannot connect to it to setup
        #replication.
        raise _errors.GroupError \
        (GROUP_MASTER_NOT_RUNNING % (group_master.group_id, ))

    try:
        master.connect()
    except _errors.DatabaseError as error: 
        #Server is not accessible, unable to connect to the server.
        raise _errors.GroupError(
            GROUP_REPLICATION_SERVER_ERROR %  (group_slave.master, error)
        )

    if not server_running(slave):
        #The server is already down. We cannot connect to it to setup
        #replication.
        raise _errors.GroupError \
            (GROUP_MASTER_NOT_RUNNING % (group_slave.group_id, ))

    try:
        slave.connect()
    except _errors.DatabaseError as error:
        raise _errors.GroupError(
            GROUP_REPLICATION_SERVER_ERROR %  (group_master.master, error)
        )

    _replication.stop_slave(slave, wait=True)

    #clear references to old masters in the slave
    _replication.reset_slave(slave,  clean=True)

    _replication.switch_master(slave, master, master.user, master.passwd)

    _replication.start_slave(slave, wait=True)

    try:
        group_master.add_slave_group_id(group_slave_id)
        group_slave.add_master_group_id(group_master_id)
    except _errors.DatabaseError:
        #If there is an error while adding a reference to
        #the slave group or a master group, it means that
        #the slave group was already added and the error
        #is happening because the group was already registered.
        #Ignore this error.
        pass
Пример #41
0
def _setup_shard_switch_move(shard_id, source_group_id, destination_group_id,
                             update_only):
    """Setup the moved shard to map to the new group.

    :param shard_id: The shard ID of the shard that needs to be moved.
    :param source_group_id: The group_id of the source shard.
    :param destination_group_id: The ID of the group to which the shard
                                needs to be moved.
    :update_only: Only update the state store and skip provisioning.
    """
    #Fetch the Range sharding specification. When we start implementing
    #heterogenous sharding schemes, we need to find out the type of
    #sharding scheme and we should use that to find out the sharding
    #implementation.
    _, source_shard, _, shard_mapping_defn = \
        _services_sharding.verify_and_fetch_shard(shard_id)

    destination_group = Group.fetch(destination_group_id)
    if destination_group is None:
        raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND %
                                    (destination_group_id, ))
    destn_group_master = MySQLServer.fetch(destination_group.master)
    if destn_group_master is None:
        raise _errors.ShardingError(
            _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND)
    destn_group_master.connect()
    #Set the destination group master to read_only
    destn_group_master.read_only = True

    #Setup replication between the shard group and the global group.
    _group_replication.setup_group_replication \
            (shard_mapping_defn[2], destination_group_id)
    #set the shard to point to the new group.
    source_shard.group_id = destination_group_id
    #Stop the replication between the global server and the original
    #group associated with the shard.
    _group_replication.stop_group_slave\
            (shard_mapping_defn[2], source_group_id,  True)

    #The sleep ensures that the connector have refreshed their caches with the
    #new shards that have been added as a result of the split.
    time.sleep(_utils.TTL)

    #Reset the read only flag on the source server.
    source_group = Group.fetch(source_group_id)
    if source_group is None:
        raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND %
                                    (source_group_id, ))

    master = MySQLServer.fetch(source_group.master)
    if master is None:
        raise _errors.ShardingError(
            _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND)

    if not update_only:
        master.connect()
        master.read_only = False
        #Kill all the existing connections on the servers
        source_group.kill_connections_on_servers()
        #allow updates in the destination group master
        destn_group_master.read_only = False
Пример #42
0
class TestShardingServices(tests.utils.TestCase):
    def setUp(self):
        """Configure the existing environment
        """
        self.manager, self.proxy = tests.utils.setup_xmlrpc()

        self.__options_1 = {
            "uuid": _uuid.UUID("{aa75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(0),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }

        uuid_server1 = MySQLServer.discover_uuid(self.__options_1["address"])
        self.__options_1["uuid"] = _uuid.UUID(uuid_server1)
        self.__server_1 = MySQLServer(**self.__options_1)
        MySQLServer.add(self.__server_1)

        self.__group_1 = Group("GROUPID1", "First description.")
        Group.add(self.__group_1)
        self.__group_1.add_server(self.__server_1)
        tests.utils.configure_decoupled_master(self.__group_1, self.__server_1)

        self.__options_2 = {
            "uuid": _uuid.UUID("{aa45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(1),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }

        uuid_server2 = MySQLServer.discover_uuid(self.__options_2["address"])
        self.__options_2["uuid"] = _uuid.UUID(uuid_server2)
        self.__server_2 = MySQLServer(**self.__options_2)
        MySQLServer.add(self.__server_2)

        self.__group_2 = Group("GROUPID2", "Second description.")
        Group.add(self.__group_2)
        self.__group_2.add_server(self.__server_2)
        tests.utils.configure_decoupled_master(self.__group_2, self.__server_2)

        self.__options_3 = {
            "uuid": _uuid.UUID("{bb75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(2),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }

        uuid_server3 = MySQLServer.discover_uuid(self.__options_3["address"])
        self.__options_3["uuid"] = _uuid.UUID(uuid_server3)
        self.__server_3 = MySQLServer(**self.__options_3)
        MySQLServer.add(self.__server_3)

        self.__group_3 = Group("GROUPID3", "Third description.")
        Group.add(self.__group_3)
        self.__group_3.add_server(self.__server_3)
        tests.utils.configure_decoupled_master(self.__group_3, self.__server_3)

        self.__options_4 = {
            "uuid": _uuid.UUID("{bb45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(3),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }

        uuid_server4 = MySQLServer.discover_uuid(self.__options_4["address"])
        self.__options_4["uuid"] = _uuid.UUID(uuid_server4)
        self.__server_4 = MySQLServer(**self.__options_4)
        MySQLServer.add(self.__server_4)

        self.__group_4 = Group("GROUPID4", "Fourth description.")
        Group.add(self.__group_4)
        self.__group_4.add_server(self.__server_4)
        tests.utils.configure_decoupled_master(self.__group_4, self.__server_4)

        self.__options_5 = {
            "uuid": _uuid.UUID("{cc75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(4),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }

        uuid_server5 = MySQLServer.discover_uuid(self.__options_5["address"])
        self.__options_5["uuid"] = _uuid.UUID(uuid_server5)
        self.__server_5 = MySQLServer(**self.__options_5)
        MySQLServer.add(self.__server_5)

        self.__group_5 = Group("GROUPID5", "Fifth description.")
        Group.add(self.__group_5)
        self.__group_5.add_server(self.__server_5)
        tests.utils.configure_decoupled_master(self.__group_5, self.__server_5)

        self.__options_6 = {
            "uuid": _uuid.UUID("{cc45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(5),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }

        uuid_server6 = MySQLServer.discover_uuid(self.__options_6["address"])
        self.__options_6["uuid"] = _uuid.UUID(uuid_server6)
        self.__server_6 = MySQLServer(**self.__options_6)
        MySQLServer.add(self.__server_6)

        self.__group_6 = Group("GROUPID6", "Sixth description.")
        Group.add(self.__group_6)
        self.__group_6.add_server(self.__server_6)
        tests.utils.configure_decoupled_master(self.__group_6, self.__server_6)

        status = self.proxy.sharding.create_definition("HASH", "GROUPID1")
        self.check_xmlrpc_command_result(status, returns=1)

        status = self.proxy.sharding.add_table(1, "db1.t1", "userID1")
        self.check_xmlrpc_command_result(status)

        status = self.proxy.sharding.add_shard(
            1, "GROUPID2,GROUPID3,"
            "GROUPID4,GROUPID5,GROUPID6", "ENABLED")
        self.check_xmlrpc_command_result(status)

    def tearDown(self):
        """Clean up the existing environment
        """
        tests.utils.cleanup_environment()

    def test_add_shard_cannot_add_when_shards_exist_exception(self):
        status = self.proxy.sharding.add_shard(1, "NON_EXISTENT_GROUP",
                                               "ENABLED")
        self.check_xmlrpc_command_result(status, has_error=True)

    def test_add_shard_invalid_group_exception(self):
        #Remove all the existing shards in the system. Since add shard
        #shard will not be allowed when shards exist in the system
        self.proxy.sharding.disable_shard(1)
        self.proxy.sharding.remove_shard(1)

        self.proxy.sharding.disable_shard(2)
        self.proxy.sharding.remove_shard(2)

        self.proxy.sharding.disable_shard(3)
        self.proxy.sharding.remove_shard(3)

        self.proxy.sharding.disable_shard(4)
        self.proxy.sharding.remove_shard(4)

        self.proxy.sharding.disable_shard(5)
        self.proxy.sharding.remove_shard(5)

        #Use an invalid group ID (WRONG_GROUP)
        status = self.proxy.sharding.add_shard(4, "WRONG_GROUP", "ENABLED")
        self.check_xmlrpc_command_result(status, has_error=True)

    def test_add_shard_invalid_state_exception(self):
        #Remove all the existing shards in the system. Since add shard
        #shard will not be allowed when shards exist in the system
        self.proxy.sharding.disable_shard(1)
        self.proxy.sharding.remove_shard(1)

        self.proxy.sharding.disable_shard(2)
        self.proxy.sharding.remove_shard(2)

        self.proxy.sharding.disable_shard(3)
        self.proxy.sharding.remove_shard(3)

        self.proxy.sharding.disable_shard(4)
        self.proxy.sharding.remove_shard(4)

        self.proxy.sharding.disable_shard(5)
        self.proxy.sharding.remove_shard(5)

        #WRONG_STATE is an invalid description of the shard state.
        status = self.proxy.sharding.add_shard(4, "GROUP4", "WRONG_STATE")
        self.check_xmlrpc_command_result(status, has_error=True)

    def test_add_shard_invalid_shard_mapping(self):
        status = self.proxy.sharding.add_shard(25000, "GROUPID4", "ENABLED")
        self.check_xmlrpc_command_result(status, has_error=True)

    def test_remove_shard_mapping_shards_exist(self):
        #It is not an error to remove tables from a shard mapping.
        status = self.proxy.sharding.remove_table("db1.t1")
        self.check_xmlrpc_command_result(status)

    def test_remove_sharding_specification(self):
        status = self.proxy.sharding.disable_shard(1)
        self.check_xmlrpc_command_result(status)
        status = self.proxy.sharding.remove_shard(1)
        self.check_xmlrpc_command_result(status)
        status = self.proxy.sharding.disable_shard(2)
        self.check_xmlrpc_command_result(status)
        status = self.proxy.sharding.remove_shard(2)
        self.check_xmlrpc_command_result(status)
        status = self.proxy.sharding.disable_shard(3)
        self.check_xmlrpc_command_result(status)
        status = self.proxy.sharding.remove_shard(3)
        self.check_xmlrpc_command_result(status)
        status = self.proxy.sharding.disable_shard(4)
        self.check_xmlrpc_command_result(status)
        status = self.proxy.sharding.remove_shard(4)
        self.check_xmlrpc_command_result(status)
        status = self.proxy.sharding.disable_shard(5)
        self.check_xmlrpc_command_result(status)
        status = self.proxy.sharding.remove_shard(5)
        self.check_xmlrpc_command_result(status)

        status = self.proxy.sharding.lookup_servers("db1.t1", 500, "LOCAL")
        self.check_xmlrpc_simple(status, {}, has_error=True)

    def test_remove_sharding_specification_exception(self):
        status = self.proxy.sharding.remove_shard(1)
        self.check_xmlrpc_command_result(status, has_error=True)

    def test_lookup_shard_mapping(self):
        status = self.proxy.sharding.lookup_table("db1.t1")
        self.check_xmlrpc_simple(
            status, {
                "mapping_id": 1,
                "table_name": "db1.t1",
                "column_name": "userID1",
                "type_name": "HASH",
                "global_group": "GROUPID1"
            })

    def test_list(self):
        status = self.proxy.sharding.list_tables("HASH")
        self.check_xmlrpc_simple(
            status, {
                "mapping_id": 1,
                "table_name": "db1.t1",
                "column_name": "userID1",
                "type_name": "HASH",
                "global_group": "GROUPID1"
            })

    def test_list_exception(self):
        status = self.proxy.sharding.list_tables("Wrong")
        self.check_xmlrpc_simple(status, {}, has_error=True)

    def test_lookup(self):
        shard_hit_count = {}

        for i in range(1, 200):
            status = self.proxy.sharding.lookup_servers("db1.t1", i, "LOCAL")
            for info in self.check_xmlrpc_iter(status):
                uuid = info['server_uuid']
                if uuid not in shard_hit_count:
                    shard_hit_count[uuid] = 0
                shard_hit_count[uuid] += 1

        self.assertEqual(len(shard_hit_count), 5)
        for key, count in shard_hit_count.items():
            self.assertTrue(count > 0)

    def test_lookup_disabled_exception(self):
        #Since inhashing it is difficult to guess which shard the value will
        #fall into, disable both the shards.
        self.proxy.sharding.disable_shard(1)
        self.proxy.sharding.disable_shard(2)
        self.proxy.sharding.disable_shard(3)
        self.proxy.sharding.disable_shard(4)
        self.proxy.sharding.disable_shard(5)
        status = self.proxy.sharding.lookup_servers("db1.t1", 500, "LOCAL")
        self.check_xmlrpc_simple(status, {}, has_error=True)

    def test_lookup_wrong_table_exception(self):
        status = self.proxy.sharding.lookup_servers("Wrong", 500, "LOCAL")
        self.check_xmlrpc_simple(status, {}, has_error=True)

    def test_list_shard_mappings(self):
        status = self.proxy.sharding.list_definitions()
        self.check_xmlrpc_simple(status, {
            'mapping_id': 1,
            'type_name': "HASH",
            'global_group_id': "GROUPID1",
        },
                                 rowcount=1)

    def test_enable_shard_exception(self):
        status = self.proxy.sharding.enable_shard(25000)
        self.check_xmlrpc_command_result(status, has_error=True)

    def test_disable_shard_exception(self):
        status = self.proxy.sharding.disable_shard(25000)
        self.check_xmlrpc_command_result(status, has_error=True)
Пример #43
0
def _setup_shard_switch_move(shard_id, source_group_id, destination_group_id,
                             update_only):
    """Setup the moved shard to map to the new group.

    :param shard_id: The shard ID of the shard that needs to be moved.
    :param source_group_id: The group_id of the source shard.
    :param destination_group_id: The ID of the group to which the shard
                                needs to be moved.
    :update_only: Only update the state store and skip provisioning.
    """
    #Fetch the Range sharding specification. When we start implementing
    #heterogenous sharding schemes, we need to find out the type of
    #sharding scheme and we should use that to find out the sharding
    #implementation.
    _, source_shard, _, shard_mapping_defn = \
        _services_sharding.verify_and_fetch_shard(shard_id)

    destination_group = Group.fetch(destination_group_id)
    if destination_group is None:
        raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND %
                                    (destination_group_id, ))
    destn_group_master = MySQLServer.fetch(destination_group.master)
    if destn_group_master is None:
        raise _errors.ShardingError(
            _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND)
    destn_group_master.connect()
    #Set the destination group master to read_only
    destn_group_master.read_only = True

    #Setup replication between the shard group and the global group.
    _group_replication.setup_group_replication \
            (shard_mapping_defn[2], destination_group_id)
    #set the shard to point to the new group.
    source_shard.group_id = destination_group_id
    #Stop the replication between the global server and the original
    #group associated with the shard.
    _group_replication.stop_group_slave\
            (shard_mapping_defn[2], source_group_id,  True)

    #The sleep ensures that the connector have refreshed their caches with the
    #new shards that have been added as a result of the split.
    time.sleep(_utils.TTL)

    #Reset the read only flag on the source server.
    source_group = Group.fetch(source_group_id)
    if source_group is None:
        raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND %
                                    (source_group_id, ))

    master = MySQLServer.fetch(source_group.master)
    if master is None:
        raise _errors.ShardingError(
            _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND)

    if not update_only:
        master.connect()
        master.read_only = False
        #Kill all the existing connections on the servers
        source_group.kill_connections_on_servers()
        #allow updates in the destination group master
        destn_group_master.read_only = False
class TestHashSplitGlobal(unittest.TestCase):
    """Contains unit tests for testing the shard split operation and for
    verifying that the global server configuration remains constant after
    the shard split configuration.
    """

    def assertStatus(self, status, expect):
        items = (item['diagnosis'] for item in status[1] if item['diagnosis'])
        self.assertEqual(status[1][-1]["success"], expect, "\n".join(items))

    def setUp(self):
        """Configure the existing environment
        """
        tests.utils.cleanup_environment()
        self.manager, self.proxy = tests.utils.setup_xmlrpc()

        self.__options_1 = {
            "uuid" :  _uuid.UUID("{aa75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(0),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server1 = MySQLServer.discover_uuid(self.__options_1["address"])
        self.__options_1["uuid"] = _uuid.UUID(uuid_server1)
        self.__server_1 = MySQLServer(**self.__options_1)
        MySQLServer.add(self.__server_1)
        self.__server_1.connect()

        self.__group_1 = Group("GROUPID1", "First description.")
        Group.add(self.__group_1)
        self.__group_1.add_server(self.__server_1)
        tests.utils.configure_decoupled_master(self.__group_1, self.__server_1)

        self.__options_2 = {
            "uuid" :  _uuid.UUID("{aa45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(1),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server2 = MySQLServer.discover_uuid(self.__options_2["address"])
        self.__options_2["uuid"] = _uuid.UUID(uuid_server2)
        self.__server_2 = MySQLServer(**self.__options_2)
        MySQLServer.add(self.__server_2)
        self.__server_2.connect()
        self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_2.exec_stmt("CREATE DATABASE db1")
        self.__server_2.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT PRIMARY KEY, name VARCHAR(30))")
        for i in range(1, 1001):
            self.__server_2.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES(%s, 'TEST %s')" % (i, i))
        self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db2")
        self.__server_2.exec_stmt("CREATE DATABASE db2")
        self.__server_2.exec_stmt("CREATE TABLE db2.t2"
                                  "(userID INT, salary INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 1001):
            self.__server_2.exec_stmt("INSERT INTO db2.t2 "
                                  "VALUES(%s, %s)" % (i, i))
        self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db3")
        self.__server_2.exec_stmt("CREATE DATABASE db3")
        self.__server_2.exec_stmt("CREATE TABLE db3.t3"
                                  "(userID INT, Department INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 1001):
            self.__server_2.exec_stmt("INSERT INTO db3.t3 "
                                  "VALUES(%s, %s)" % (i, i))

        self.__group_2 = Group("GROUPID2", "Second description.")
        Group.add(self.__group_2)
        self.__group_2.add_server(self.__server_2)
        tests.utils.configure_decoupled_master(self.__group_2, self.__server_2)

        self.__options_3 = {
            "uuid" :  _uuid.UUID("{bb75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(2),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server3 = MySQLServer.discover_uuid(self.__options_3["address"])
        self.__options_3["uuid"] = _uuid.UUID(uuid_server3)
        self.__server_3 = MySQLServer(**self.__options_3)
        MySQLServer.add( self.__server_3)
        self.__server_3.connect()
        self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_3.exec_stmt("CREATE DATABASE db1")
        self.__server_3.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT PRIMARY KEY, name VARCHAR(30))")
        for i in range(1, 1001):
            self.__server_3.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES(%s, 'TEST %s')" % (i, i))
        self.__server_3.exec_stmt("CREATE DATABASE db2")
        self.__server_3.exec_stmt("CREATE TABLE db2.t2"
                                  "(userID INT, salary INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 1001):
            self.__server_3.exec_stmt("INSERT INTO db2.t2 "
                                  "VALUES(%s, %s)" % (i, i))
        self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db3")
        self.__server_3.exec_stmt("CREATE DATABASE db3")
        self.__server_3.exec_stmt("CREATE TABLE db3.t3"
                                  "(userID INT, Department INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 1001):
            self.__server_3.exec_stmt("INSERT INTO db3.t3 "
                                  "VALUES(%s, %s)" % (i, i))

        self.__group_3 = Group("GROUPID3", "Third description.")
        Group.add( self.__group_3)
        self.__group_3.add_server(self.__server_3)
        tests.utils.configure_decoupled_master(self.__group_3, self.__server_3)

        self.__options_4 = {
            "uuid" :  _uuid.UUID("{bb45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(3),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server4 = MySQLServer.discover_uuid(self.__options_4["address"])
        self.__options_4["uuid"] = _uuid.UUID(uuid_server4)
        self.__server_4 = MySQLServer(**self.__options_4)
        MySQLServer.add(self.__server_4)
        self.__server_4.connect()
        self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_4.exec_stmt("CREATE DATABASE db1")
        self.__server_4.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT PRIMARY KEY, name VARCHAR(30))")
        for i in range(1, 1001):
            self.__server_4.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES(%s, 'TEST %s')" % (i, i))
        self.__server_4.exec_stmt("CREATE DATABASE db2")
        self.__server_4.exec_stmt("CREATE TABLE db2.t2"
                                  "(userID INT, salary INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 1001):
            self.__server_4.exec_stmt("INSERT INTO db2.t2 "
                                  "VALUES(%s, %s)" % (i, i))
        self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db3")
        self.__server_4.exec_stmt("CREATE DATABASE db3")
        self.__server_4.exec_stmt("CREATE TABLE db3.t3"
                                  "(userID INT, Department INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 1001):
            self.__server_4.exec_stmt("INSERT INTO db3.t3 "
                                  "VALUES(%s, %s)" % (i, i))

        self.__group_4 = Group("GROUPID4", "Fourth description.")
        Group.add( self.__group_4)
        self.__group_4.add_server(self.__server_4)
        tests.utils.configure_decoupled_master(self.__group_4, self.__server_4)

        self.__options_5 = {
            "uuid" :  _uuid.UUID("{cc75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(4),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server5 = MySQLServer.discover_uuid(self.__options_5["address"])
        self.__options_5["uuid"] = _uuid.UUID(uuid_server5)
        self.__server_5 = MySQLServer(**self.__options_5)
        MySQLServer.add(self.__server_5)
        self.__server_5.connect()
        self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_5.exec_stmt("CREATE DATABASE db1")
        self.__server_5.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT PRIMARY KEY, name VARCHAR(30))")
        for i in range(1, 1001):
            self.__server_5.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES(%s, 'TEST %s')" % (i, i))
        self.__server_5.exec_stmt("CREATE DATABASE db2")
        self.__server_5.exec_stmt("CREATE TABLE db2.t2"
                                  "(userID INT, salary INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 1001):
            self.__server_5.exec_stmt("INSERT INTO db2.t2 "
                                  "VALUES(%s, %s)" % (i, i))
        self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db3")
        self.__server_5.exec_stmt("CREATE DATABASE db3")
        self.__server_5.exec_stmt("CREATE TABLE db3.t3"
                                  "(userID INT, Department INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 1001):
            self.__server_5.exec_stmt("INSERT INTO db3.t3 "
                                  "VALUES(%s, %s)" % (i, i))

        self.__group_5 = Group("GROUPID5", "Fifth description.")
        Group.add( self.__group_5)
        self.__group_5.add_server(self.__server_5)
        tests.utils.configure_decoupled_master(self.__group_5, self.__server_5)

        self.__options_6 = {
            "uuid" :  _uuid.UUID("{cc45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(5),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server6 = MySQLServer.discover_uuid(self.__options_6["address"])
        self.__options_6["uuid"] = _uuid.UUID(uuid_server6)
        self.__server_6 = MySQLServer(**self.__options_6)
        MySQLServer.add(self.__server_6)

        self.__group_6 = Group("GROUPID6", "Sixth description.")
        Group.add( self.__group_6)
        self.__group_6.add_server(self.__server_6)
        tests.utils.configure_decoupled_master(self.__group_6, self.__server_6)

        status = self.proxy.sharding.create_definition("HASH", "GROUPID1")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_define_shard_mapping).")
        self.assertEqual(status[2], 1)

        status = self.proxy.sharding.add_table(1, "db1.t1", "userID")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_add_shard_mapping).")
        status = self.proxy.sharding.add_table(1, "db2.t2", "userID")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_add_shard_mapping).")
        status = self.proxy.sharding.add_table(1, "db3.t3", "userID")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_add_shard_mapping).")

        status = self.proxy.sharding.add_shard(1, "GROUPID2,GROUPID3,GROUPID4,GROUPID5", "ENABLED")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_add_shard).")

        status = self.proxy.sharding.prune_shard("db1.t1")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_prune_shard_tables).")
        status = self.proxy.sharding.prune_shard("db2.t2")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_prune_shard_tables).")
        status = self.proxy.sharding.prune_shard("db3.t3")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_prune_shard_tables).")

    def test_split_shard_1(self):
        '''Test the split of shard 1 and the global server configuration
        after that. The test splits shard 1 between GROUPID2 and GROUPID6.
        After the split is done, it verifies the count on GROUPID6 to check
        that the group has tuples from the earlier group. Now it fires
        an INSERT on the global group and verifies that all the shards have got
        the inserted tuples.
        '''
        row_cnt_shard_db1_t1 = self.__server_2.exec_stmt(
                    "SELECT COUNT(*) FROM db1.t1",
                    {"fetch" : True}
                )
        row_cnt_shard_db2_t2 = self.__server_2.exec_stmt(
                    "SELECT COUNT(*) FROM db2.t2",
                    {"fetch" : True}
                )
        row_cnt_shard_db3_t3 = self.__server_2.exec_stmt(
                    "SELECT COUNT(*) FROM db3.t3",
                    {"fetch" : True}
                )
        row_cnt_shard_db1_t1 = int(row_cnt_shard_db1_t1[0][0])
        row_cnt_shard_db2_t2 = int(row_cnt_shard_db2_t2[0][0])
        row_cnt_shard_db3_t3 = int(row_cnt_shard_db3_t3[0][0])

        status = self.proxy.sharding.split_shard("1", "GROUPID6")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_prune_shard_tables_after_split).")

        self.__server_6.connect()

        row_cnt_shard_after_split_1_db1_t1 = self.__server_2.exec_stmt(
                    "SELECT COUNT(*) FROM db1.t1",
                    {"fetch" : True}
                )
        row_cnt_shard_after_split_1_db2_t2 = self.__server_2.exec_stmt(
                    "SELECT COUNT(*) FROM db2.t2",
                    {"fetch" : True}
                )
        row_cnt_shard_after_split_1_db3_t3 = self.__server_2.exec_stmt(
                    "SELECT COUNT(*) FROM db3.t3",
                    {"fetch" : True}
                )

        row_cnt_shard_after_split_1_db1_t1_a = \
            int(row_cnt_shard_after_split_1_db1_t1[0][0])
        row_cnt_shard_after_split_1_db2_t2_a = \
            int(row_cnt_shard_after_split_1_db2_t2[0][0])
        row_cnt_shard_after_split_1_db3_t3_a = \
            int(row_cnt_shard_after_split_1_db3_t3[0][0])

        row_cnt_shard_after_split_1_db1_t1_b = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM db1.t1",
                    {"fetch" : True}
                )
        row_cnt_shard_after_split_1_db2_t2_b = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM db2.t2",
                    {"fetch" : True}
                )
        row_cnt_shard_after_split_1_db3_t3_b = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM db3.t3",
                    {"fetch" : True}
                )

        row_cnt_shard_after_split_1_db1_t1_b = \
            int(row_cnt_shard_after_split_1_db1_t1_b[0][0])
        row_cnt_shard_after_split_1_db2_t2_b = \
            int(row_cnt_shard_after_split_1_db2_t2_b[0][0])
        row_cnt_shard_after_split_1_db3_t3_b = \
            int(row_cnt_shard_after_split_1_db3_t3_b[0][0])

        self.assertTrue(
            row_cnt_shard_db1_t1 ==
            (row_cnt_shard_after_split_1_db1_t1_a +
            row_cnt_shard_after_split_1_db1_t1_b)
        )
        self.assertTrue(
            row_cnt_shard_db2_t2 ==
            (row_cnt_shard_after_split_1_db2_t2_a +
            row_cnt_shard_after_split_1_db2_t2_b)
        )
        self.assertTrue(
            row_cnt_shard_db3_t3 ==
            (row_cnt_shard_after_split_1_db3_t3_a +
            row_cnt_shard_after_split_1_db3_t3_b)
        )

        #Enter data into the global server
        self.__server_1.exec_stmt("DROP DATABASE IF EXISTS global")
        self.__server_1.exec_stmt("CREATE DATABASE global")
        self.__server_1.exec_stmt("CREATE TABLE global.global_table"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 11):
            self.__server_1.exec_stmt("INSERT INTO global.global_table "
                                  "VALUES(%s, 'TEST %s')" % (i, i))
        time.sleep(5)
        #Verify that the data is there in the first shard.
        global_table_count = self.__server_2.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the second shard.
        global_table_count = self.__server_3.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the third shard.
        global_table_count = self.__server_4.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the fourth shard.
        global_table_count = self.__server_5.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the fifth shard.
        global_table_count = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)

    def test_split_shard_2(self):
        '''Test the split of shard 2 and the global server configuration
        after that. The test splits shard 2 between GROUPID3 and GROUPID6.
        After the split is done, it verifies the count on GROUPID6 to check
        that the group has tuples from the earlier group. Now it fires
        an INSERT on the global group and verifies that all the shards have got
        the inserted tuples.
        '''
        row_cnt_shard_db1_t1 = self.__server_3.exec_stmt(
                    "SELECT COUNT(*) FROM db1.t1",
                    {"fetch" : True}
                )
        row_cnt_shard_db2_t2 = self.__server_3.exec_stmt(
                    "SELECT COUNT(*) FROM db2.t2",
                    {"fetch" : True}
                )
        row_cnt_shard_db3_t3 = self.__server_3.exec_stmt(
                    "SELECT COUNT(*) FROM db3.t3",
                    {"fetch" : True}
                )
        row_cnt_shard_db1_t1 = int(row_cnt_shard_db1_t1[0][0])
        row_cnt_shard_db2_t2 = int(row_cnt_shard_db2_t2[0][0])
        row_cnt_shard_db3_t3 = int(row_cnt_shard_db3_t3[0][0])

        status = self.proxy.sharding.split_shard("2", "GROUPID6")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_prune_shard_tables_after_split).")

        self.__server_6.connect()

        row_cnt_shard_after_split_1_db1_t1 = self.__server_3.exec_stmt(
                    "SELECT COUNT(*) FROM db1.t1",
                    {"fetch" : True}
                )
        row_cnt_shard_after_split_1_db2_t2 = self.__server_3.exec_stmt(
                    "SELECT COUNT(*) FROM db2.t2",
                    {"fetch" : True}
                )
        row_cnt_shard_after_split_1_db3_t3 = self.__server_3.exec_stmt(
                    "SELECT COUNT(*) FROM db3.t3",
                    {"fetch" : True}
                )

        row_cnt_shard_after_split_1_db1_t1_a = \
            int(row_cnt_shard_after_split_1_db1_t1[0][0])
        row_cnt_shard_after_split_1_db2_t2_a = \
            int(row_cnt_shard_after_split_1_db2_t2[0][0])
        row_cnt_shard_after_split_1_db3_t3_a = \
            int(row_cnt_shard_after_split_1_db3_t3[0][0])

        row_cnt_shard_after_split_1_db1_t1_b = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM db1.t1",
                    {"fetch" : True}
                )
        row_cnt_shard_after_split_1_db2_t2_b = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM db2.t2",
                    {"fetch" : True}
                )
        row_cnt_shard_after_split_1_db3_t3_b = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM db3.t3",
                    {"fetch" : True}
                )

        row_cnt_shard_after_split_1_db1_t1_b = \
            int(row_cnt_shard_after_split_1_db1_t1_b[0][0])
        row_cnt_shard_after_split_1_db2_t2_b = \
            int(row_cnt_shard_after_split_1_db2_t2_b[0][0])
        row_cnt_shard_after_split_1_db3_t3_b = \
            int(row_cnt_shard_after_split_1_db3_t3_b[0][0])

        self.assertTrue(
            row_cnt_shard_db1_t1 ==
            (row_cnt_shard_after_split_1_db1_t1_a +
            row_cnt_shard_after_split_1_db1_t1_b)
        )
        self.assertTrue(
            row_cnt_shard_db2_t2 ==
            (row_cnt_shard_after_split_1_db2_t2_a +
            row_cnt_shard_after_split_1_db2_t2_b)
        )
        self.assertTrue(
            row_cnt_shard_db3_t3 ==
            (row_cnt_shard_after_split_1_db3_t3_a +
            row_cnt_shard_after_split_1_db3_t3_b)
        )

        #Enter data into the global server
        self.__server_1.exec_stmt("DROP DATABASE IF EXISTS global")
        self.__server_1.exec_stmt("CREATE DATABASE global")
        self.__server_1.exec_stmt("CREATE TABLE global.global_table"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 11):
            self.__server_1.exec_stmt("INSERT INTO global.global_table "
                                  "VALUES(%s, 'TEST %s')" % (i, i))
        time.sleep(5)
        #Verify that the data is there in the first shard.
        global_table_count = self.__server_2.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the second shard.
        global_table_count = self.__server_3.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the third shard.
        global_table_count = self.__server_4.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the fourth shard.
        global_table_count = self.__server_5.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the fifth shard.
        global_table_count = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)

    def test_split_shard_3(self):
        '''Test the split of shard 3 and the global server configuration
        after that. The test splits shard 3 between GROUPID4 and GROUPID6.
        After the split is done, it verifies the count on GROUPID6 to check
        that the group has tuples from the earlier group. Now it fires
        an INSERT on the global group and verifies that all the shards have got
        the inserted tuples.
        '''
        row_cnt_shard_db1_t1 = self.__server_4.exec_stmt(
                    "SELECT COUNT(*) FROM db1.t1",
                    {"fetch" : True}
                )
        row_cnt_shard_db2_t2 = self.__server_4.exec_stmt(
                    "SELECT COUNT(*) FROM db2.t2",
                    {"fetch" : True}
                )
        row_cnt_shard_db3_t3 = self.__server_4.exec_stmt(
                    "SELECT COUNT(*) FROM db3.t3",
                    {"fetch" : True}
                )
        row_cnt_shard_db1_t1 = int(row_cnt_shard_db1_t1[0][0])
        row_cnt_shard_db2_t2 = int(row_cnt_shard_db2_t2[0][0])
        row_cnt_shard_db3_t3 = int(row_cnt_shard_db3_t3[0][0])

        status = self.proxy.sharding.split_shard("3", "GROUPID6")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_prune_shard_tables_after_split).")

        self.__server_6.connect()

        row_cnt_shard_after_split_1_db1_t1 = self.__server_4.exec_stmt(
                    "SELECT COUNT(*) FROM db1.t1",
                    {"fetch" : True}
                )
        row_cnt_shard_after_split_1_db2_t2 = self.__server_4.exec_stmt(
                    "SELECT COUNT(*) FROM db2.t2",
                    {"fetch" : True}
                )
        row_cnt_shard_after_split_1_db3_t3 = self.__server_4.exec_stmt(
                    "SELECT COUNT(*) FROM db3.t3",
                    {"fetch" : True}
                )

        row_cnt_shard_after_split_1_db1_t1_a = \
            int(row_cnt_shard_after_split_1_db1_t1[0][0])
        row_cnt_shard_after_split_1_db2_t2_a = \
            int(row_cnt_shard_after_split_1_db2_t2[0][0])
        row_cnt_shard_after_split_1_db3_t3_a = \
            int(row_cnt_shard_after_split_1_db3_t3[0][0])

        row_cnt_shard_after_split_1_db1_t1_b = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM db1.t1",
                    {"fetch" : True}
                )
        row_cnt_shard_after_split_1_db2_t2_b = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM db2.t2",
                    {"fetch" : True}
                )
        row_cnt_shard_after_split_1_db3_t3_b = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM db3.t3",
                    {"fetch" : True}
                )

        row_cnt_shard_after_split_1_db1_t1_b = \
            int(row_cnt_shard_after_split_1_db1_t1_b[0][0])
        row_cnt_shard_after_split_1_db2_t2_b = \
            int(row_cnt_shard_after_split_1_db2_t2_b[0][0])
        row_cnt_shard_after_split_1_db3_t3_b = \
            int(row_cnt_shard_after_split_1_db3_t3_b[0][0])

        self.assertTrue(
            row_cnt_shard_db1_t1 ==
            (row_cnt_shard_after_split_1_db1_t1_a +
            row_cnt_shard_after_split_1_db1_t1_b)
        )
        self.assertTrue(
            row_cnt_shard_db2_t2 ==
            (row_cnt_shard_after_split_1_db2_t2_a +
            row_cnt_shard_after_split_1_db2_t2_b)
        )
        self.assertTrue(
            row_cnt_shard_db3_t3 ==
            (row_cnt_shard_after_split_1_db3_t3_a +
            row_cnt_shard_after_split_1_db3_t3_b)
        )

        #Enter data into the global server
        self.__server_1.exec_stmt("DROP DATABASE IF EXISTS global")
        self.__server_1.exec_stmt("CREATE DATABASE global")
        self.__server_1.exec_stmt("CREATE TABLE global.global_table"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 11):
            self.__server_1.exec_stmt("INSERT INTO global.global_table "
                                  "VALUES(%s, 'TEST %s')" % (i, i))
        time.sleep(5)
        #Verify that the data is there in the first shard.
        global_table_count = self.__server_2.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the second shard.
        global_table_count = self.__server_3.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the third shard.
        global_table_count = self.__server_4.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the fourth shard.
        global_table_count = self.__server_5.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the fifth shard.
        global_table_count = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)

    def test_split_shard_4(self):
        '''Test the split of shard 4 and the global server configuration
        after that. The test splits shard 4 between GROUPID5 and GROUPID6.
        After the split is done, it verifies the count on GROUPID6 to check
        that the group has tuples from the earlier group. Now it fires
        an INSERT on the global group and verifies that all the shards have got
        the inserted tuples.
        '''
        row_cnt_shard_db1_t1 = self.__server_5.exec_stmt(
                    "SELECT COUNT(*) FROM db1.t1",
                    {"fetch" : True}
                )
        row_cnt_shard_db2_t2 = self.__server_5.exec_stmt(
                    "SELECT COUNT(*) FROM db2.t2",
                    {"fetch" : True}
                )
        row_cnt_shard_db3_t3 = self.__server_5.exec_stmt(
                    "SELECT COUNT(*) FROM db3.t3",
                    {"fetch" : True}
                )
        row_cnt_shard_db1_t1 = int(row_cnt_shard_db1_t1[0][0])
        row_cnt_shard_db2_t2 = int(row_cnt_shard_db2_t2[0][0])
        row_cnt_shard_db3_t3 = int(row_cnt_shard_db3_t3[0][0])

        status = self.proxy.sharding.split_shard("4", "GROUPID6")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_prune_shard_tables_after_split).")

        self.__server_6.connect()

        row_cnt_shard_after_split_1_db1_t1 = self.__server_5.exec_stmt(
                    "SELECT COUNT(*) FROM db1.t1",
                    {"fetch" : True}
                )
        row_cnt_shard_after_split_1_db2_t2 = self.__server_5.exec_stmt(
                    "SELECT COUNT(*) FROM db2.t2",
                    {"fetch" : True}
                )
        row_cnt_shard_after_split_1_db3_t3 = self.__server_5.exec_stmt(
                    "SELECT COUNT(*) FROM db3.t3",
                    {"fetch" : True}
                )

        row_cnt_shard_after_split_1_db1_t1_a = \
            int(row_cnt_shard_after_split_1_db1_t1[0][0])
        row_cnt_shard_after_split_1_db2_t2_a = \
            int(row_cnt_shard_after_split_1_db2_t2[0][0])
        row_cnt_shard_after_split_1_db3_t3_a = \
            int(row_cnt_shard_after_split_1_db3_t3[0][0])

        row_cnt_shard_after_split_1_db1_t1_b = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM db1.t1",
                    {"fetch" : True}
                )
        row_cnt_shard_after_split_1_db2_t2_b = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM db2.t2",
                    {"fetch" : True}
                )
        row_cnt_shard_after_split_1_db3_t3_b = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM db3.t3",
                    {"fetch" : True}
                )

        row_cnt_shard_after_split_1_db1_t1_b = \
            int(row_cnt_shard_after_split_1_db1_t1_b[0][0])
        row_cnt_shard_after_split_1_db2_t2_b = \
            int(row_cnt_shard_after_split_1_db2_t2_b[0][0])
        row_cnt_shard_after_split_1_db3_t3_b = \
            int(row_cnt_shard_after_split_1_db3_t3_b[0][0])

        self.assertTrue(
            row_cnt_shard_db1_t1 ==
            (row_cnt_shard_after_split_1_db1_t1_a +
            row_cnt_shard_after_split_1_db1_t1_b)
        )
        self.assertTrue(
            row_cnt_shard_db2_t2 ==
            (row_cnt_shard_after_split_1_db2_t2_a +
            row_cnt_shard_after_split_1_db2_t2_b)
        )
        self.assertTrue(
            row_cnt_shard_db3_t3 ==
            (row_cnt_shard_after_split_1_db3_t3_a +
            row_cnt_shard_after_split_1_db3_t3_b)
        )
        #Enter data into the global server
        self.__server_1.exec_stmt("DROP DATABASE IF EXISTS global")
        self.__server_1.exec_stmt("CREATE DATABASE global")
        self.__server_1.exec_stmt("CREATE TABLE global.global_table"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 11):
            self.__server_1.exec_stmt("INSERT INTO global.global_table "
                                  "VALUES(%s, 'TEST %s')" % (i, i))
        time.sleep(5)
        #Verify that the data is there in the first shard.
        global_table_count = self.__server_2.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the second shard.
        global_table_count = self.__server_3.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the third shard.
        global_table_count = self.__server_4.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the fourth shard.
        global_table_count = self.__server_5.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the fifth shard.
        global_table_count = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)

    def test_hash_dump(self):
        """Test the dump of the HASH sharding information for the table.
        """
        shardinginformation_1 = [0, 0, 0,
            [['db1', 't1', 'userID',
            'E2996A7B8509367020B55A4ACD2AE46A',
            '1', 'HASH', 'GROUPID2', 'GROUPID1'],
            ['db1', 't1', 'userID',
            'E88F547DF45C99F27646C76316FB21DF',
            '2', 'HASH', 'GROUPID3', 'GROUPID1'],
            ['db1', 't1', 'userID',
            '7A2E76448FF04233F3851A492BEF1090',
            '3', 'HASH', 'GROUPID4', 'GROUPID1'],
            ['db1', 't1', 'userID',
            '97427AA63E300F56536710F5D73A35FA',
            '4', 'HASH', 'GROUPID5', 'GROUPID1']]]
        self.assertEqual(
            self.proxy.dump.sharding_information(0, "db1.t1"),
            shardinginformation_1
        )

    def tearDown(self):
        """Clean up the existing environment
        """
        self.proxy.sharding.disable_shard(1)
        self.proxy.sharding.remove_shard(1)

        self.proxy.sharding.disable_shard(2)
        self.proxy.sharding.remove_shard(2)

        self.proxy.sharding.disable_shard(3)
        self.proxy.sharding.remove_shard(3)

        self.proxy.sharding.disable_shard(4)
        self.proxy.sharding.remove_shard(4)

        self.proxy.sharding.disable_shard(5)
        self.proxy.sharding.remove_shard(5)

        tests.utils.cleanup_environment()
        tests.utils.teardown_xmlrpc(self.manager, self.proxy)
Пример #45
0
class TestShardingPrune(unittest.TestCase):

    def assertStatus(self, status, expect):
        items = (item['diagnosis'] for item in status[1] if item['diagnosis'])
        self.assertEqual(status[1][-1]["success"], expect, "\n".join(items))

    def setUp(self):
        """Creates the following topology for testing,

        GROUPID1 - localhost:13001, localhost:13002 - Global Group
        GROUPID2 - localhost:13003, localhost:13004 - shard 1
        GROUPID3 - localhost:13005, localhost:13006 - shard 2
        """
        self.manager, self.proxy = tests.utils.setup_xmlrpc()

        self.__options_1 = {
            "uuid" :  _uuid.UUID("{aa75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(0),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd
        }

        uuid_server1 = MySQLServer.discover_uuid(self.__options_1["address"])
        self.__options_1["uuid"] = _uuid.UUID(uuid_server1)
        self.__server_1 = MySQLServer(**self.__options_1)
        MySQLServer.add(self.__server_1)

        self.__group_1 = Group("GROUPID1", "First description.")
        Group.add(self.__group_1)
        self.__group_1.add_server(self.__server_1)
        tests.utils.configure_decoupled_master(self.__group_1, self.__server_1)

        self.__options_2 = {
            "uuid" :  _uuid.UUID("{aa45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(1),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd
        }

        uuid_server2 = MySQLServer.discover_uuid(self.__options_2["address"])
        self.__options_2["uuid"] = _uuid.UUID(uuid_server2)
        self.__server_2 = MySQLServer(**self.__options_2)
        MySQLServer.add(self.__server_2)
        self.__server_2.connect()
        self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_2.exec_stmt("CREATE DATABASE db1")
        self.__server_2.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 601):
            self.__server_2.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES(%s, 'TEST %s')" % (i, i))


        self.__group_2 = Group("GROUPID2", "Second description.")
        Group.add(self.__group_2)
        self.__group_2.add_server(self.__server_2)
        tests.utils.configure_decoupled_master(self.__group_2, self.__server_2)

        self.__options_3 = {
            "uuid" :  _uuid.UUID("{bb75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(2),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd
        }

        uuid_server3 = MySQLServer.discover_uuid(self.__options_3["address"])
        self.__options_3["uuid"] = _uuid.UUID(uuid_server3)
        self.__server_3 = MySQLServer(**self.__options_3)
        MySQLServer.add( self.__server_3)
        self.__server_3.connect()
        self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_3.exec_stmt("CREATE DATABASE db1")
        self.__server_3.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 601):
            self.__server_3.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES(%s, 'TEST %s')" % (i, i))

        self.__group_3 = Group("GROUPID3", "Third description.")
        Group.add( self.__group_3)
        self.__group_3.add_server(self.__server_3)
        tests.utils.configure_decoupled_master(self.__group_3, self.__server_3)

        self.__options_4 = {
            "uuid" :  _uuid.UUID("{bb45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(3),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd
        }

        uuid_server4 = MySQLServer.discover_uuid(self.__options_4["address"])
        self.__options_4["uuid"] = _uuid.UUID(uuid_server4)
        self.__server_4 = MySQLServer(**self.__options_4)
        MySQLServer.add(self.__server_4)
        self.__server_4.connect()
        self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_4.exec_stmt("CREATE DATABASE db1")
        self.__server_4.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 601):
            self.__server_4.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES(%s, 'TEST %s')" % (i, i))

        self.__group_4 = Group("GROUPID4", "Fourth description.")
        Group.add( self.__group_4)
        self.__group_4.add_server(self.__server_4)
        tests.utils.configure_decoupled_master(self.__group_4, self.__server_4)

        self.__options_5 = {
            "uuid" :  _uuid.UUID("{cc75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(4),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd
        }

        uuid_server5 = MySQLServer.discover_uuid(self.__options_5["address"])
        self.__options_5["uuid"] = _uuid.UUID(uuid_server5)
        self.__server_5 = MySQLServer(**self.__options_5)
        MySQLServer.add(self.__server_5)
        self.__server_5.connect()
        self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_5.exec_stmt("CREATE DATABASE db1")
        self.__server_5.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 601):
            self.__server_5.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES(%s, 'TEST %s')" % (i, i))

        self.__group_5 = Group("GROUPID5", "Fifth description.")
        Group.add( self.__group_5)
        self.__group_5.add_server(self.__server_5)
        tests.utils.configure_decoupled_master(self.__group_5, self.__server_5)

        self.__options_6 = {
            "uuid" :  _uuid.UUID("{cc45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(5),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd
        }

        uuid_server6 = MySQLServer.discover_uuid(self.__options_6["address"])
        self.__options_6["uuid"] = _uuid.UUID(uuid_server6)
        self.__server_6 = MySQLServer(**self.__options_6)
        MySQLServer.add(self.__server_6)
        self.__server_6.connect()
        self.__server_6.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_6.exec_stmt("CREATE DATABASE db1")
        self.__server_6.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 601):
            self.__server_6.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES(%s, 'TEST %s')" % (i, i))

        self.__group_6 = Group("GROUPID6", "Sixth description.")
        Group.add( self.__group_6)
        self.__group_6.add_server(self.__server_6)
        tests.utils.configure_decoupled_master(self.__group_6, self.__server_6)

        status = self.proxy.sharding.create_definition("RANGE", "GROUPID1")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_define_shard_mapping).")
        self.assertEqual(status[2], 1)

        status = self.proxy.sharding.add_table(1, "db1.t1", "userID")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_add_shard_mapping).")

        status = self.proxy.sharding.add_shard(
            1,
            "GROUPID2/1,GROUPID3/101,GROUPID4/201,"
            "GROUPID5/301,GROUPID6/401",
            "ENABLED"
        )
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_add_shard).")

    def test_prune_shard(self):
        status = self.proxy.sharding.prune_shard("db1.t1")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_prune_shard_tables).")

        status = self.proxy.sharding.lookup_servers("db1.t1", 1,  "LOCAL")
        self.assertEqual(status[0], True)
        self.assertEqual(status[1], "")
        obtained_server_list = status[2]
        shard_uuid = obtained_server_list[0][0]
        shard_server = MySQLServer.fetch(shard_uuid)
        shard_server.connect()
        rows = shard_server.exec_stmt(
                                    "SELECT COUNT(*) FROM db1.t1",
                                    {"fetch" : True})
        self.assertTrue(int(rows[0][0]) == 100)
        rows = shard_server.exec_stmt(
                                    "SELECT MAX(userID) FROM db1.t1",
                                    {"fetch" : True})
        self.assertTrue(int(rows[0][0]) == 100)
        rows = shard_server.exec_stmt(
                                    "SELECT MIN(userID) FROM db1.t1",
                                    {"fetch" : True})
        self.assertTrue(int(rows[0][0]) == 1)

        status = self.proxy.sharding.lookup_servers("db1.t1", 101,  "LOCAL")
        self.assertEqual(status[0], True)
        self.assertEqual(status[1], "")
        obtained_server_list = status[2]
        shard_uuid = obtained_server_list[0][0]
        shard_server = MySQLServer.fetch(shard_uuid)
        shard_server.connect()
        rows = shard_server.exec_stmt(
                                    "SELECT COUNT(*) FROM db1.t1",
                                    {"fetch" : True})
        self.assertTrue(int(rows[0][0]) == 100)
        rows = shard_server.exec_stmt(
                                    "SELECT MAX(userID) FROM db1.t1",
                                    {"fetch" : True})
        self.assertTrue(int(rows[0][0]) == 200)
        rows = shard_server.exec_stmt(
                                    "SELECT MIN(userID) FROM db1.t1",
                                    {"fetch" : True})
        self.assertTrue(int(rows[0][0]) == 101)

        status = self.proxy.sharding.lookup_servers("db1.t1", 202,  "LOCAL")
        self.assertEqual(status[0], True)
        self.assertEqual(status[1], "")
        obtained_server_list = status[2]
        shard_uuid = obtained_server_list[0][0]
        shard_server = MySQLServer.fetch(shard_uuid)
        shard_server.connect()
        rows = shard_server.exec_stmt(
                                    "SELECT COUNT(*) FROM db1.t1",
                                    {"fetch" : True})
        self.assertTrue(int(rows[0][0]) == 100)
        rows = shard_server.exec_stmt(
                                    "SELECT MAX(userID) FROM db1.t1",
                                    {"fetch" : True})
        self.assertTrue(int(rows[0][0]) == 300)
        rows = shard_server.exec_stmt(
                                    "SELECT MIN(userID) FROM db1.t1",
                                    {"fetch" : True})
        self.assertTrue(int(rows[0][0]) == 201)

        status = self.proxy.sharding.lookup_servers("db1.t1", 303,  "LOCAL")
        self.assertEqual(status[0], True)
        self.assertEqual(status[1], "")
        obtained_server_list = status[2]
        shard_uuid = obtained_server_list[0][0]
        shard_server = MySQLServer.fetch(shard_uuid)
        shard_server.connect()
        rows = shard_server.exec_stmt(
                                    "SELECT COUNT(*) FROM db1.t1",
                                    {"fetch" : True})
        self.assertTrue(int(rows[0][0]) == 100)
        rows = shard_server.exec_stmt(
                                    "SELECT MAX(userID) FROM db1.t1",
                                    {"fetch" : True})
        self.assertTrue(int(rows[0][0]) == 400)
        rows = shard_server.exec_stmt(
                                    "SELECT MIN(userID) FROM db1.t1",
                                    {"fetch" : True})
        self.assertTrue(int(rows[0][0]) == 301)

        status = self.proxy.sharding.lookup_servers("db1.t1", 404,  "LOCAL")
        self.assertEqual(status[0], True)
        self.assertEqual(status[1], "")
        obtained_server_list = status[2]
        shard_uuid = obtained_server_list[0][0]
        shard_server = MySQLServer.fetch(shard_uuid)
        shard_server.connect()
        rows = shard_server.exec_stmt(
                                    "SELECT COUNT(*) FROM db1.t1",
                                    {"fetch" : True})
        self.assertTrue(int(rows[0][0]) == 200)
        rows = shard_server.exec_stmt(
                                    "SELECT MAX(userID) FROM db1.t1",
                                    {"fetch" : True})
        self.assertTrue(int(rows[0][0]) == 600)
        rows = shard_server.exec_stmt(
                                    "SELECT MIN(userID) FROM db1.t1",
                                    {"fetch" : True})
        self.assertTrue(int(rows[0][0]) == 401)

    def tearDown(self):
        self.__server_2.exec_stmt("DROP TABLE db1.t1")
        self.__server_2.exec_stmt("DROP DATABASE db1")
        self.__server_3.exec_stmt("DROP TABLE db1.t1")
        self.__server_3.exec_stmt("DROP DATABASE db1")
        self.__server_4.exec_stmt("DROP TABLE db1.t1")
        self.__server_4.exec_stmt("DROP DATABASE db1")
        self.__server_5.exec_stmt("DROP TABLE db1.t1")
        self.__server_5.exec_stmt("DROP DATABASE db1")
        self.__server_6.exec_stmt("DROP TABLE db1.t1")
        self.__server_6.exec_stmt("DROP DATABASE db1")

        status = self.proxy.sharding.disable_shard("1")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_disable_shard).")

        status = self.proxy.sharding.disable_shard("2")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_disable_shard).")

        status = self.proxy.sharding.disable_shard("3")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_disable_shard).")

        status = self.proxy.sharding.disable_shard("4")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_disable_shard).")

        status = self.proxy.sharding.disable_shard("5")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_disable_shard).")

        status = self.proxy.sharding.remove_shard("1")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_remove_shard).")

        status = self.proxy.sharding.remove_shard("2")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_remove_shard).")

        status = self.proxy.sharding.remove_shard("3")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_remove_shard).")

        status = self.proxy.sharding.remove_shard("4")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_remove_shard).")

        status = self.proxy.sharding.remove_shard("5")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_remove_shard).")

        status = self.proxy.sharding.remove_table("db1.t1")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_remove_shard_mapping).")

        status = self.proxy.sharding.remove_definition("1")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_remove_shard_mapping_defn).")

        self.proxy.group.demote("GROUPID1")
        self.proxy.group.demote("GROUPID2")
        self.proxy.group.demote("GROUPID3")
        self.proxy.group.demote("GROUPID4")
        self.proxy.group.demote("GROUPID5")
        self.proxy.group.demote("GROUPID6")

        for group_id in ("GROUPID1", "GROUPID2", "GROUPID3",
            "GROUPID4", "GROUPID5", "GROUPID6"):
            status = self.proxy.group.lookup_servers(group_id)
            self.assertEqual(status[0], True)
            self.assertEqual(status[1], "")
            obtained_server_list = status[2]
            status = \
                self.proxy.group.remove(
                    group_id, obtained_server_list[0]["server_uuid"]
                )
            self.assertStatus(status, _executor.Job.SUCCESS)
            self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
            self.assertEqual(status[1][-1]["description"],
                             "Executed action (_remove_server).")
            status = self.proxy.group.destroy(group_id)
            self.assertStatus(status, _executor.Job.SUCCESS)
            self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
            self.assertEqual(status[1][-1]["description"],
                             "Executed action (_destroy_group).")

        tests.utils.cleanup_environment()
        tests.utils.teardown_xmlrpc(self.manager, self.proxy)
Пример #46
0
    def test_switchover_with_no_master(self):
        """Ensure that a switchover/failover happens when masters in the
        shard and global groups are dead.
        """
        # Check that a shard group has it master pointing to a the master
        # in the global group.
        global_group = Group.fetch("GROUPID1")
        shard_group = Group.fetch("GROUPID2")
        other_shard_group = Group.fetch("GROUPID3")
        global_master = fetch_test_server(global_group.master)
        global_master.connect()
        shard_master = fetch_test_server(shard_group.master)
        shard_master.connect()
        other_shard_master = fetch_test_server(other_shard_group.master)
        other_shard_master.connect()
        self.assertEqual(_replication.slave_has_master(shard_master),
                         str(global_group.master))
        self.assertEqual(_replication.slave_has_master(other_shard_master),
                         str(global_group.master))

        # Demote the master in the global group and check that a
        # shard group points to None.
        global_group = Group.fetch("GROUPID1")
        self.assertEqual(global_group.master, global_master.uuid)
        self.proxy.group.demote("GROUPID1")
        global_group = Group.fetch("GROUPID1")
        self.assertEqual(global_group.master, None)
        self.assertEqual(_replication.slave_has_master(shard_master), None)
        self.assertEqual(_replication.slave_has_master(other_shard_master),
                         None)

        # Demote the master in a shard group and promote the master
        # in the global group.
        global_group = Group.fetch("GROUPID1")
        self.assertEqual(global_group.master, None)
        shard_group = Group.fetch("GROUPID2")
        self.assertEqual(shard_group.master, shard_master.uuid)
        self.proxy.group.demote("GROUPID2")
        shard_group = Group.fetch("GROUPID2")
        self.assertEqual(shard_group.master, None)
        self.proxy.group.promote("GROUPID1", str(global_master.uuid))
        global_group = Group.fetch("GROUPID1")
        self.assertEqual(global_group.master, global_master.uuid)
        self.assertEqual(_replication.slave_has_master(shard_master), None)
        self.assertEqual(_replication.slave_has_master(other_shard_master),
                         str(global_group.master))

        # Promote the master in the previous shard group and check that
        # everything is back to normal.
        global_group = Group.fetch("GROUPID1")
        self.assertEqual(global_group.master, global_master.uuid)
        self.assertEqual(_replication.slave_has_master(shard_master), None)
        shard_group = Group.fetch("GROUPID2")
        self.assertEqual(shard_group.master, None)
        self.proxy.group.promote("GROUPID2", str(shard_master.uuid))
        self.assertEqual(_replication.slave_has_master(shard_master),
                         str(global_group.master))
        self.assertEqual(_replication.slave_has_master(other_shard_master),
                         str(global_group.master))
        shard_group = Group.fetch("GROUPID2")
        self.assertEqual(shard_group.master, shard_master.uuid)

        # Demote the master in the global group, check that a shard group
        # points to None, promot it again and check that everything is back
        # to normal
        global_group = Group.fetch("GROUPID1")
        self.assertEqual(global_group.master, global_master.uuid)
        shard_group = Group.fetch("GROUPID2")
        self.assertEqual(shard_group.master, shard_master.uuid)
        self.proxy.group.demote("GROUPID1")
        global_group = Group.fetch("GROUPID1")
        self.assertEqual(global_group.master, None)
        self.assertEqual(_replication.slave_has_master(shard_master), None)
        self.proxy.group.promote("GROUPID1", str(global_master.uuid))
        global_group = Group.fetch("GROUPID1")
        self.assertEqual(global_group.master, global_master.uuid)
        self.assertEqual(_replication.slave_has_master(shard_master),
                         str(global_group.master))
        self.assertEqual(_replication.slave_has_master(other_shard_master),
                         str(global_group.master))
class TestHashMoveGlobal(unittest.TestCase):
    """Contains unit tests for testing the shard move operation and for
    verifying that the global server configuration remains constant after
    the shard move configuration.
    """

    def assertStatus(self, status, expect):
        items = (item['diagnosis'] for item in status[1] if item['diagnosis'])
        self.assertEqual(status[1][-1]["success"], expect, "\n".join(items))

    def setUp(self):
        """Configure the existing environment
        """
        tests.utils.cleanup_environment()
        self.manager, self.proxy = tests.utils.setup_xmlrpc()

        self.__options_1 = {
            "uuid" :  _uuid.UUID("{aa75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(0),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server1 = MySQLServer.discover_uuid(self.__options_1["address"])
        self.__options_1["uuid"] = _uuid.UUID(uuid_server1)
        self.__server_1 = MySQLServer(**self.__options_1)
        MySQLServer.add(self.__server_1)
        self.__server_1.connect()

        self.__group_1 = Group("GROUPID1", "First description.")
        Group.add(self.__group_1)
        self.__group_1.add_server(self.__server_1)
        tests.utils.configure_decoupled_master(self.__group_1, self.__server_1)

        self.__options_2 = {
            "uuid" :  _uuid.UUID("{aa45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(1),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server2 = MySQLServer.discover_uuid(self.__options_2["address"])
        self.__options_2["uuid"] = _uuid.UUID(uuid_server2)
        self.__server_2 = MySQLServer(**self.__options_2)
        MySQLServer.add(self.__server_2)
        self.__server_2.connect()
        self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_2.exec_stmt("CREATE DATABASE db1")
        self.__server_2.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT PRIMARY KEY, name VARCHAR(30))")
        for i in range(1, 1001):
            self.__server_2.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES(%s, 'TEST %s')" % (i, i))
        self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db2")
        self.__server_2.exec_stmt("CREATE DATABASE db2")
        self.__server_2.exec_stmt("CREATE TABLE db2.t2"
                                  "(userID INT, salary INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 1001):
            self.__server_2.exec_stmt("INSERT INTO db2.t2 "
                                  "VALUES(%s, %s)" % (i, i))

        self.__group_2 = Group("GROUPID2", "Second description.")
        Group.add(self.__group_2)
        self.__group_2.add_server(self.__server_2)
        tests.utils.configure_decoupled_master(self.__group_2, self.__server_2)

        self.__options_3 = {
            "uuid" :  _uuid.UUID("{bb75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(2),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server3 = MySQLServer.discover_uuid(self.__options_3["address"])
        self.__options_3["uuid"] = _uuid.UUID(uuid_server3)
        self.__server_3 = MySQLServer(**self.__options_3)
        MySQLServer.add( self.__server_3)
        self.__server_3.connect()
        self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_3.exec_stmt("CREATE DATABASE db1")
        self.__server_3.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT PRIMARY KEY, name VARCHAR(30))")
        for i in range(1, 1001):
            self.__server_3.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES(%s, 'TEST %s')" % (i, i))
        self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db2")
        self.__server_3.exec_stmt("CREATE DATABASE db2")
        self.__server_3.exec_stmt("CREATE TABLE db2.t2"
                                  "(userID INT, salary INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 1001):
            self.__server_3.exec_stmt("INSERT INTO db2.t2 "
                                  "VALUES(%s, %s)" % (i, i))

        self.__group_3 = Group("GROUPID3", "Third description.")
        Group.add( self.__group_3)
        self.__group_3.add_server(self.__server_3)
        tests.utils.configure_decoupled_master(self.__group_3, self.__server_3)

        self.__options_4 = {
            "uuid" :  _uuid.UUID("{bb45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(3),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server4 = MySQLServer.discover_uuid(self.__options_4["address"])
        self.__options_4["uuid"] = _uuid.UUID(uuid_server4)
        self.__server_4 = MySQLServer(**self.__options_4)
        MySQLServer.add(self.__server_4)
        self.__server_4.connect()
        self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_4.exec_stmt("CREATE DATABASE db1")
        self.__server_4.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT PRIMARY KEY, name VARCHAR(30))")
        for i in range(1, 1001):
            self.__server_4.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES(%s, 'TEST %s')" % (i, i))
        self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db2")
        self.__server_4.exec_stmt("CREATE DATABASE db2")
        self.__server_4.exec_stmt("CREATE TABLE db2.t2"
                                  "(userID INT, salary INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 1001):
            self.__server_4.exec_stmt("INSERT INTO db2.t2 "
                                  "VALUES(%s, %s)" % (i, i))

        self.__group_4 = Group("GROUPID4", "Fourth description.")
        Group.add( self.__group_4)
        self.__group_4.add_server(self.__server_4)
        tests.utils.configure_decoupled_master(self.__group_4, self.__server_4)

        self.__options_5 = {
            "uuid" :  _uuid.UUID("{cc75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(4),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server5 = MySQLServer.discover_uuid(self.__options_5["address"])
        self.__options_5["uuid"] = _uuid.UUID(uuid_server5)
        self.__server_5 = MySQLServer(**self.__options_5)
        MySQLServer.add(self.__server_5)
        self.__server_5.connect()
        self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_5.exec_stmt("CREATE DATABASE db1")
        self.__server_5.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT PRIMARY KEY, name VARCHAR(30))")
        for i in range(1, 1001):
            self.__server_5.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES(%s, 'TEST %s')" % (i, i))
        self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db2")
        self.__server_5.exec_stmt("CREATE DATABASE db2")
        self.__server_5.exec_stmt("CREATE TABLE db2.t2"
                                  "(userID INT, salary INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 1001):
            self.__server_5.exec_stmt("INSERT INTO db2.t2 "
                                  "VALUES(%s, %s)" % (i, i))

        self.__group_5 = Group("GROUPID5", "Fifth description.")
        Group.add( self.__group_5)
        self.__group_5.add_server(self.__server_5)
        tests.utils.configure_decoupled_master(self.__group_5, self.__server_5)

        self.__options_6 = {
            "uuid" :  _uuid.UUID("{cc45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(5),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server6 = MySQLServer.discover_uuid(self.__options_6["address"])
        self.__options_6["uuid"] = _uuid.UUID(uuid_server6)
        self.__server_6 = MySQLServer(**self.__options_6)
        MySQLServer.add(self.__server_6)

        self.__group_6 = Group("GROUPID6", "Sixth description.")
        Group.add( self.__group_6)
        self.__group_6.add_server(self.__server_6)
        tests.utils.configure_decoupled_master(self.__group_6, self.__server_6)

        status = self.proxy.sharding.create_definition("HASH", "GROUPID1")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_define_shard_mapping).")
        self.assertEqual(status[2], 1)

        status = self.proxy.sharding.add_table(1, "db1.t1", "userID")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_add_shard_mapping).")

        status = self.proxy.sharding.add_table(1, "db2.t2", "userID")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_add_shard_mapping).")

        status = self.proxy.sharding.add_shard(
            1,
            "GROUPID2,GROUPID3,GROUPID4,GROUPID5",
            "ENABLED"
        )
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_add_shard).")

        status = self.proxy.sharding.prune_shard("db1.t1")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_prune_shard_tables).")

    def test_move_shard_1(self):
        '''Test the move of shard 1 and the global server configuration
        after that. The test moves shard 1 from GROUPID2 to GROUPID6.
        After the move is done, it verifies the count on GROUPID6 to check
        that the group has all the tuples from the earlier group. Now it fires
        an INSERT on the global group and verifies that all the shards have got
        the inserted tuples. GROUPID2 should not have received the values
        since it has had the shard moved away from it.
        '''
        row_cnt_shard_before_move_db1_t1 = self.__server_2.exec_stmt(
                    "SELECT COUNT(*) FROM db1.t1",
                    {"fetch" : True}
                )
        row_cnt_shard_before_move_db2_t2 = self.__server_2.exec_stmt(
                    "SELECT COUNT(*) FROM db2.t2",
                    {"fetch" : True}
                )
        row_cnt_shard_before_move_db1_t1 = \
            int(row_cnt_shard_before_move_db1_t1[0][0])
        row_cnt_shard_before_move_db2_t2 = \
            int(row_cnt_shard_before_move_db2_t2[0][0])
        status = self.proxy.sharding.move_shard("1", "GROUPID6")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_setup_resharding_switch).")
        self.__server_6.connect()
        row_cnt_shard_after_move_db1_t1 = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM db1.t1",
                    {"fetch" : True}
                )
        row_cnt_shard_after_move_db2_t2 = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM db2.t2",
                    {"fetch" : True}
                )
        row_cnt_shard_after_move_db1_t1 = \
            int(row_cnt_shard_after_move_db1_t1[0][0])
        row_cnt_shard_after_move_db2_t2 = \
            int(row_cnt_shard_after_move_db2_t2[0][0])
        self.assertTrue(
            row_cnt_shard_before_move_db1_t1 == row_cnt_shard_after_move_db1_t1
        )
        self.assertTrue(
            row_cnt_shard_before_move_db2_t2 == row_cnt_shard_after_move_db2_t2
        )
        #Enter data into the global server
        self.__server_1.exec_stmt("DROP DATABASE IF EXISTS global")
        self.__server_1.exec_stmt("CREATE DATABASE global")
        self.__server_1.exec_stmt("CREATE TABLE global.global_table"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 11):
            self.__server_1.exec_stmt("INSERT INTO global.global_table "
                                  "VALUES(%s, 'TEST %s')" % (i, i))
        time.sleep(5)

        try:
            #Verify that the data is not there in the first shard.
            global_table_count = self.__server_2.exec_stmt(
                        "SELECT COUNT(*) FROM global.global_table",
                        {"fetch" : True}
                    )
            raise Exception("Server should not be connected to global server")
        except _errors.DatabaseError:
            pass

        #Verify that the data is there in the second shard.
        global_table_count = self.__server_3.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the third shard.
        global_table_count = self.__server_4.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the fourth shard.
        global_table_count = self.__server_5.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the fifth shard.
        global_table_count = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)

    def test_move_shard_2(self):
        '''Test the move of shard 2 and the global server configuration
        after that.  The test moves shard 2 from GROUPID3 to GROUPID6.
        After the move is done, it verifies the count on GROUPID6 to check
        that the group has all the tuples from the earlier group. Now it fires
        an INSERT on the global group and verifies that all the shards have got
        the inserted tuples. GROUPID3 should not have received the values
        since it has had the shard moved away from it.
        '''
        row_cnt_shard_before_move_db1_t1 = self.__server_3.exec_stmt(
                    "SELECT COUNT(*) FROM db1.t1",
                    {"fetch" : True}
                )
        row_cnt_shard_before_move_db2_t2 = self.__server_3.exec_stmt(
                    "SELECT COUNT(*) FROM db2.t2",
                    {"fetch" : True}
                )
        row_cnt_shard_before_move_db1_t1 = \
            int(row_cnt_shard_before_move_db1_t1[0][0])
        row_cnt_shard_before_move_db2_t2 = \
            int(row_cnt_shard_before_move_db2_t2[0][0])
        status = self.proxy.sharding.move_shard("2", "GROUPID6")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_setup_resharding_switch).")
        self.__server_6.connect()
        row_cnt_shard_after_move_db1_t1 = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM db1.t1",
                    {"fetch" : True}
                )
        row_cnt_shard_after_move_db2_t2 = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM db2.t2",
                    {"fetch" : True}
                )
        row_cnt_shard_after_move_db1_t1 = \
            int(row_cnt_shard_after_move_db1_t1[0][0])
        row_cnt_shard_after_move_db2_t2 = \
            int(row_cnt_shard_after_move_db2_t2[0][0])
        self.assertTrue(
            row_cnt_shard_before_move_db1_t1 == row_cnt_shard_after_move_db1_t1
        )
        self.assertTrue(
            row_cnt_shard_before_move_db2_t2 == row_cnt_shard_after_move_db2_t2
        )
        #Enter data into the global server
        self.__server_1.exec_stmt("DROP DATABASE IF EXISTS global")
        self.__server_1.exec_stmt("CREATE DATABASE global")
        self.__server_1.exec_stmt("CREATE TABLE global.global_table"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 11):
            self.__server_1.exec_stmt("INSERT INTO global.global_table "
                                  "VALUES(%s, 'TEST %s')" % (i, i))
        time.sleep(5)

        try:
            #Verify that the data is not there in the second shard.
            global_table_count = self.__server_3.exec_stmt(
                        "SELECT COUNT(*) FROM global.global_table",
                        {"fetch" : True}
                    )
            raise Exception("Server should not be connected to global server")
        except _errors.DatabaseError:
            pass

        #Verify that the data is there in the first shard.
        global_table_count = self.__server_2.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the third shard.
        global_table_count = self.__server_4.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the fourth shard.
        global_table_count = self.__server_5.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the fifth shard.
        global_table_count = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)

    def test_move_shard_3(self):
        '''Test the move of shard 3 and the global server configuration
        after that. The test moves shard 3 from GROUPID4 to GROUPID6.
        After the move is done, it verifies the count on GROUPID6 to check
        that the group has all the tuples from the earlier group. Now it fires
        an INSERT on the global group and verifies that all the shards have got
        the inserted tuples. GROUPID4 should not have received the values
        since it has had the shard moved away from it.
        '''
        row_cnt_shard_before_move_db1_t1 = self.__server_4.exec_stmt(
                    "SELECT COUNT(*) FROM db1.t1",
                    {"fetch" : True}
                )
        row_cnt_shard_before_move_db2_t2 = self.__server_4.exec_stmt(
                    "SELECT COUNT(*) FROM db2.t2",
                    {"fetch" : True}
                )
        row_cnt_shard_before_move_db1_t1 = \
            int(row_cnt_shard_before_move_db1_t1[0][0])
        row_cnt_shard_before_move_db2_t2 = \
            int(row_cnt_shard_before_move_db2_t2[0][0])
        status = self.proxy.sharding.move_shard("3", "GROUPID6")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_setup_resharding_switch).")
        self.__server_6.connect()
        row_cnt_shard_after_move_db1_t1 = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM db1.t1",
                    {"fetch" : True}
                )
        row_cnt_shard_after_move_db2_t2 = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM db2.t2",
                    {"fetch" : True}
                )
        row_cnt_shard_after_move_db1_t1 = \
            int(row_cnt_shard_after_move_db1_t1[0][0])
        row_cnt_shard_after_move_db2_t2 = \
            int(row_cnt_shard_after_move_db2_t2[0][0])
        self.assertTrue(
            row_cnt_shard_before_move_db1_t1 == row_cnt_shard_after_move_db1_t1
        )
        self.assertTrue(
            row_cnt_shard_before_move_db2_t2 == row_cnt_shard_after_move_db2_t2
        )
        #Enter data into the global server
        self.__server_1.exec_stmt("DROP DATABASE IF EXISTS global")
        self.__server_1.exec_stmt("CREATE DATABASE global")
        self.__server_1.exec_stmt("CREATE TABLE global.global_table"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 11):
            self.__server_1.exec_stmt("INSERT INTO global.global_table "
                                  "VALUES(%s, 'TEST %s')" % (i, i))
        time.sleep(5)
        try:
            #Verify that the data is not there in the third shard's
            #original group.
            global_table_count = self.__server_4.exec_stmt(
                        "SELECT COUNT(*) FROM global.global_table",
                        {"fetch" : True}
                    )
            raise Exception("Server should not be connected to global server")
        except _errors.DatabaseError:
            pass
        #Verify that the data is there in the first shard.
        global_table_count = self.__server_2.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the second shard.
        global_table_count = self.__server_3.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the fourth shard.
        global_table_count = self.__server_5.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the third shard
        #new group.
        global_table_count = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)

    def test_move_shard_4(self):
        '''Test the move of shard 4 and the global server configuration
        after that. The test moves shard 4 from GROUPID5 to GROUPID6.
        After the move is done, it verifies the count on GROUPID6 to check
        that the group has all the tuples from the earlier group. Now it fires
        an INSERT on the global group and verifies that all the shards have got
        the inserted tuples. GROUPID5 should not have received the values
        since it has had the shard moved away from it.
        '''
        row_cnt_shard_before_move_db1_t1 = self.__server_5.exec_stmt(
                    "SELECT COUNT(*) FROM db1.t1",
                    {"fetch" : True}
                )
        row_cnt_shard_before_move_db2_t2 = self.__server_5.exec_stmt(
                    "SELECT COUNT(*) FROM db2.t2",
                    {"fetch" : True}
                )
        row_cnt_shard_before_move_db1_t1 = \
            int(row_cnt_shard_before_move_db1_t1[0][0])
        row_cnt_shard_before_move_db2_t2 = \
            int(row_cnt_shard_before_move_db2_t2[0][0])
        status = self.proxy.sharding.move_shard("4", "GROUPID6")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_setup_resharding_switch).")
        self.__server_6.connect()
        row_cnt_shard_after_move_db1_t1 = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM db1.t1",
                    {"fetch" : True}
                )
        row_cnt_shard_after_move_db2_t2 = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM db2.t2",
                    {"fetch" : True}
                )
        row_cnt_shard_after_move_db1_t1 = \
            int(row_cnt_shard_after_move_db1_t1[0][0])
        row_cnt_shard_after_move_db2_t2 = \
            int(row_cnt_shard_after_move_db2_t2[0][0])
        self.assertTrue(
            row_cnt_shard_before_move_db1_t1 == row_cnt_shard_after_move_db1_t1
        )
        self.assertTrue(
            row_cnt_shard_before_move_db2_t2 == row_cnt_shard_after_move_db2_t2
        )
        #Enter data into the global server
        self.__server_1.exec_stmt("DROP DATABASE IF EXISTS global")
        self.__server_1.exec_stmt("CREATE DATABASE global")
        self.__server_1.exec_stmt("CREATE TABLE global.global_table"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 11):
            self.__server_1.exec_stmt("INSERT INTO global.global_table "
                                  "VALUES(%s, 'TEST %s')" % (i, i))
        time.sleep(5)
        try:
            #Verify that the data is not there in the fourth shard's
            #original group.
            global_table_count = self.__server_5.exec_stmt(
                        "SELECT COUNT(*) FROM global.global_table",
                        {"fetch" : True}
                    )
            raise Exception("Server should not be connected to global server")
        except _errors.DatabaseError:
            pass
        #Verify that the data is there in the first shard.
        global_table_count = self.__server_2.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the second shard.
        global_table_count = self.__server_3.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the third shard.
        global_table_count = self.__server_4.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the fourth shard's new
        #group.
        global_table_count = self.__server_6.exec_stmt(
                    "SELECT COUNT(*) FROM global.global_table",
                    {"fetch" : True}
                )
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)

    def tearDown(self):
        """Clean up the existing environment
        """
        self.proxy.sharding.disable_shard(1)
        self.proxy.sharding.remove_shard(1)

        self.proxy.sharding.disable_shard(2)
        self.proxy.sharding.remove_shard(2)

        self.proxy.sharding.disable_shard(3)
        self.proxy.sharding.remove_shard(3)

        self.proxy.sharding.disable_shard(4)
        self.proxy.sharding.remove_shard(4)

        self.proxy.sharding.disable_shard(5)
        self.proxy.sharding.remove_shard(5)

        tests.utils.cleanup_environment()
        tests.utils.teardown_xmlrpc(self.manager, self.proxy)
Пример #48
0
class TestHashSharding(unittest.TestCase):
    def assertStatus(self, status, expect):
        items = (item['diagnosis'] for item in status[1] if item['diagnosis'])
        self.assertEqual(status[1][-1]["success"], expect, "\n".join(items))

    def setUp(self):
        self.manager, self.proxy = tests.utils.setup_xmlrpc()

        self.__options_1 = {
            "uuid" :  _uuid.UUID("{aa75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(0),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }
        uuid_server1 = MySQLServer.discover_uuid(self.__options_1["address"])
        self.__options_1["uuid"] = _uuid.UUID(uuid_server1)
        self.__server_1 = MySQLServer(**self.__options_1)
        MySQLServer.add(self.__server_1)

        self.__group_1 = Group("GROUPID1", "First description.")
        Group.add(self.__group_1)
        self.__group_1.add_server(self.__server_1)
        tests.utils.configure_decoupled_master(self.__group_1, self.__server_1)

        self.__options_2 = {
            "uuid" :  _uuid.UUID("{aa45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(1),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server2 = MySQLServer.discover_uuid(self.__options_2["address"])
        self.__options_2["uuid"] = _uuid.UUID(uuid_server2)
        self.__server_2 = MySQLServer(**self.__options_2)
        MySQLServer.add(self.__server_2)
        self.__server_2.connect()
        self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_2.exec_stmt("CREATE DATABASE db1")
        self.__server_2.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 501):
            self.__server_2.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES(%s, 'TEST %s')" % (i, i))


        self.__group_2 = Group("GROUPID2", "Second description.")
        Group.add(self.__group_2)
        self.__group_2.add_server(self.__server_2)
        tests.utils.configure_decoupled_master(self.__group_2, self.__server_2)

        self.__options_3 = {
            "uuid" :  _uuid.UUID("{bb75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(2),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server3 = MySQLServer.discover_uuid(self.__options_3["address"])
        self.__options_3["uuid"] = _uuid.UUID(uuid_server3)
        self.__server_3 = MySQLServer(**self.__options_3)
        MySQLServer.add( self.__server_3)
        self.__server_3.connect()
        self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_3.exec_stmt("CREATE DATABASE db1")
        self.__server_3.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 501):
            self.__server_3.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES(%s, 'TEST %s')" % (i, i))

        self.__group_3 = Group("GROUPID3", "Third description.")
        Group.add( self.__group_3)
        self.__group_3.add_server(self.__server_3)
        tests.utils.configure_decoupled_master(self.__group_3, self.__server_3)

        self.__options_4 = {
            "uuid" :  _uuid.UUID("{bb45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(3),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server4 = MySQLServer.discover_uuid(self.__options_4["address"])
        self.__options_4["uuid"] = _uuid.UUID(uuid_server4)
        self.__server_4 = MySQLServer(**self.__options_4)
        MySQLServer.add(self.__server_4)
        self.__server_4.connect()
        self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_4.exec_stmt("CREATE DATABASE db1")
        self.__server_4.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 501):
            self.__server_4.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES(%s, 'TEST %s')" % (i, i))

        self.__group_4 = Group("GROUPID4", "Fourth description.")
        Group.add( self.__group_4)
        self.__group_4.add_server(self.__server_4)
        tests.utils.configure_decoupled_master(self.__group_4, self.__server_4)

        self.__options_5 = {
            "uuid" :  _uuid.UUID("{cc75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(4),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server5 = MySQLServer.discover_uuid(self.__options_5["address"])
        self.__options_5["uuid"] = _uuid.UUID(uuid_server5)
        self.__server_5 = MySQLServer(**self.__options_5)
        MySQLServer.add(self.__server_5)
        self.__server_5.connect()
        self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_5.exec_stmt("CREATE DATABASE db1")
        self.__server_5.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 501):
            self.__server_5.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES(%s, 'TEST %s')" % (i, i))

        self.__group_5 = Group("GROUPID5", "Fifth description.")
        Group.add( self.__group_5)
        self.__group_5.add_server(self.__server_5)
        tests.utils.configure_decoupled_master(self.__group_5, self.__server_5)

        self.__options_6 = {
            "uuid" :  _uuid.UUID("{cc45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(5),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server6 = MySQLServer.discover_uuid(self.__options_6["address"])
        self.__options_6["uuid"] = _uuid.UUID(uuid_server6)
        self.__server_6 = MySQLServer(**self.__options_6)
        MySQLServer.add(self.__server_6)
        self.__server_6.connect()
        self.__server_6.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_6.exec_stmt("CREATE DATABASE db1")
        self.__server_6.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 501):
            self.__server_6.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES(%s, 'TEST %s')" % (i, i))

        self.__group_6 = Group("GROUPID6", "Sixth description.")
        Group.add( self.__group_6)
        self.__group_6.add_server(self.__server_6)
        tests.utils.configure_decoupled_master(self.__group_6, self.__server_6)

        self.__shard_mapping_list = ShardMapping.list_shard_mapping_defn()
        self.assertEquals( self.__shard_mapping_list,  [])

        self.__shard_mapping_id_1 = ShardMapping.define("HASH", "GROUPID1")

        self.__shard_mapping_1 = ShardMapping.add(
                                    self.__shard_mapping_id_1,
                                    "db1.t1",
                                    "userID"
                                )

        self.__shard_1 = Shards.add("GROUPID2")
        self.__shard_2 = Shards.add("GROUPID3")
        self.__shard_3 = Shards.add("GROUPID4")
        self.__shard_4 = Shards.add("GROUPID5")
        self.__shard_5 = Shards.add("GROUPID6")

        self.__hash_sharding_specification_1 = HashShardingSpecification.add(
            self.__shard_mapping_1.shard_mapping_id,
            self.__shard_1.shard_id
        )

        self.__hash_sharding_specification_2 = HashShardingSpecification.add(
            self.__shard_mapping_1.shard_mapping_id,
            self.__shard_2.shard_id
        )

        self.__hash_sharding_specification_3 = HashShardingSpecification.add(
            self.__shard_mapping_1.shard_mapping_id,
            self.__shard_3.shard_id
        )

        self.__hash_sharding_specification_4 = HashShardingSpecification.add(
            self.__shard_mapping_1.shard_mapping_id,
            self.__shard_4.shard_id
        )

        self.__hash_sharding_specification_5 = HashShardingSpecification.add(
            self.__shard_mapping_1.shard_mapping_id,
            self.__shard_5.shard_id
        )

    def test_hash_lookup(self):
        """Test the hash sharding lookup.
        """
        shard_1_cnt = 0
        shard_2_cnt = 0
        shard_3_cnt = 0
        shard_4_cnt = 0
        shard_5_cnt = 0

        #Lookup a range of keys to ensure that all the shards are
        #utilized.
        for i in range(0,  1000):
            hash_sharding_spec_1 = HashShardingSpecification.lookup(
                                        i,
                                        self.__shard_mapping_id_1,
                                         "HASH"
                                    )
            if self.__shard_1.shard_id == hash_sharding_spec_1.shard_id:
                shard_1_cnt = shard_1_cnt + 1
            elif self.__shard_2.shard_id == hash_sharding_spec_1.shard_id:
                shard_2_cnt = shard_2_cnt + 1
            elif self.__shard_3.shard_id == hash_sharding_spec_1.shard_id:
                shard_3_cnt = shard_3_cnt + 1
            elif self.__shard_4.shard_id == hash_sharding_spec_1.shard_id:
                shard_4_cnt = shard_4_cnt + 1
            elif self.__shard_5.shard_id == hash_sharding_spec_1.shard_id:
                shard_5_cnt = shard_5_cnt + 1

        #The following will ensure that both the hash shards are utilized
        #to store the keys and the values are not skewed in one shard.
        self.assertTrue(shard_1_cnt > 0)
        self.assertTrue(shard_2_cnt > 0)
        self.assertTrue(shard_3_cnt > 0)
        self.assertTrue(shard_4_cnt > 0)
        self.assertTrue(shard_5_cnt > 0)

    def test_hash_remove(self):
        """Test the removal of hash shards.
        """
        hash_sharding_specification_1 = HashShardingSpecification.fetch(1)
        hash_sharding_specification_2 = HashShardingSpecification.fetch(2)
        hash_sharding_specification_3 = HashShardingSpecification.fetch(3)
        hash_sharding_specification_4 = HashShardingSpecification.fetch(4)
        hash_sharding_specification_5 = HashShardingSpecification.fetch(5)
        hash_sharding_specification_1.remove()
        hash_sharding_specification_2.remove()
        hash_sharding_specification_3.remove()
        hash_sharding_specification_4.remove()
        hash_sharding_specification_5.remove()

        self.__shard_1.remove()
        self.__shard_2.remove()
        self.__shard_3.remove()
        self.__shard_4.remove()
        self.__shard_5.remove()

        for i in range(0,  10):
            hash_sharding_spec = HashShardingSpecification.lookup(
                                        i,
                                        self.__shard_mapping_id_1,
                                         "HASH"
                                    )
            self.assertEqual(hash_sharding_spec,  None)

    def test_list_shard_mapping(self):
        """Test the listing of HASH shards in a shard mapping.
        """
        expected_shard_mapping_list1 =   [1, "HASH", "GROUPID1"]
        obtained_shard_mapping_list = ShardMapping.list_shard_mapping_defn()
        self.assertEqual(set(expected_shard_mapping_list1),
                         set(obtained_shard_mapping_list[0]))

    def test_shard_mapping_list_mappings(self):
        """Test the listing of all HASH shards in a shard mapping.
        """
        shard_mappings = ShardMapping.list("HASH")
        self.assertTrue(ShardingUtils.compare_shard_mapping
                         (self.__shard_mapping_1, shard_mappings[0]))

    def test_fetch_sharding_scheme(self):
        """Test the fetch method of the HASH sharding scheme.
        """
        hash_sharding_specification_1 = HashShardingSpecification.fetch(1)
        hash_sharding_specification_2 = HashShardingSpecification.fetch(2)
        hash_sharding_specification_3 = HashShardingSpecification.fetch(3)
        hash_sharding_specification_4 = HashShardingSpecification.fetch(4)
        hash_sharding_specification_5 = HashShardingSpecification.fetch(5)
        hash_sharding_specifications = HashShardingSpecification.list(1)

        #list does not return the hashing specifications in order of shard_id,
        #hence a direct comparison is not posssible.
        self.assertTrue(
            self.hash_sharding_specification_in_list(
                hash_sharding_specification_1,
                hash_sharding_specifications
            )
        )
        self.assertTrue(
            self.hash_sharding_specification_in_list(
                hash_sharding_specification_2,
                hash_sharding_specifications
            )
        )
        self.assertTrue(
            self.hash_sharding_specification_in_list(
                hash_sharding_specification_3,
                hash_sharding_specifications
            )
        )
        self.assertTrue(
            self.hash_sharding_specification_in_list(
                hash_sharding_specification_4,
                hash_sharding_specifications
            )
        )
        self.assertTrue(
            self.hash_sharding_specification_in_list(
                hash_sharding_specification_5,
                hash_sharding_specifications
            )
        )

    def test_prune_shard(self):
        rows =  self.__server_2.exec_stmt(
                                            "SELECT COUNT(*) FROM db1.t1",
                                            {"fetch" : True})
        self.assertTrue(int(rows[0][0]) == 500)
        rows =  self.__server_3.exec_stmt(
                                            "SELECT COUNT(*) FROM db1.t1",
                                            {"fetch" : True})
        self.assertTrue(int(rows[0][0]) == 500)
        rows =  self.__server_4.exec_stmt(
                                            "SELECT COUNT(*) FROM db1.t1",
                                            {"fetch" : True})
        self.assertTrue(int(rows[0][0]) == 500)
        rows =  self.__server_5.exec_stmt(
                                            "SELECT COUNT(*) FROM db1.t1",
                                            {"fetch" : True})
        self.assertTrue(int(rows[0][0]) == 500)
        rows =  self.__server_6.exec_stmt(
                                            "SELECT COUNT(*) FROM db1.t1",
                                            {"fetch" : True})
        self.assertTrue(int(rows[0][0]) == 500)

        status = self.proxy.sharding.prune_shard("db1.t1")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_prune_shard_tables).")

        rows =  self.__server_2.exec_stmt(
                                            "SELECT COUNT(*) FROM db1.t1",
                                            {"fetch" : True})
        cnt1 = int(rows[0][0])
        self.assertTrue(int(rows[0][0]) < 500)
        rows =  self.__server_3.exec_stmt(
                                            "SELECT COUNT(*) FROM db1.t1",
                                            {"fetch" : True})
        cnt2 = int(rows[0][0])
        self.assertTrue(int(rows[0][0]) < 500)
        rows =  self.__server_4.exec_stmt(
                                            "SELECT COUNT(*) FROM db1.t1",
                                            {"fetch" : True})
        cnt3 = int(rows[0][0])
        self.assertTrue(int(rows[0][0]) < 500)
        rows =  self.__server_5.exec_stmt(
                                            "SELECT COUNT(*) FROM db1.t1",
                                            {"fetch" : True})
        cnt4 = int(rows[0][0])
        self.assertTrue(int(rows[0][0]) < 500)
        rows =  self.__server_6.exec_stmt(
                                            "SELECT COUNT(*) FROM db1.t1",
                                            {"fetch" : True})
        cnt5 = int(rows[0][0])
        self.assertTrue(int(rows[0][0]) < 500)
        self.assertTrue((cnt1 + cnt2 + cnt3 + cnt4 + cnt5) == 500)

    def test_prune_lookup(self):
        self.proxy.sharding.prune_shard("db1.t1")
        rows =  self.__server_2.exec_stmt(
                                            "SELECT userID FROM db1.t1",
                                            {"fetch" : True})
        for val in rows[0:len(rows)][0]:
            hash_sharding_spec_1 = HashShardingSpecification.lookup(
                                    val,
                                    self.__shard_mapping_id_1,
                                     "HASH"
                                )
            self.assertEqual(
                             hash_sharding_spec_1.shard_id,
                             1
            )

        rows =  self.__server_3.exec_stmt(
                                            "SELECT userID FROM db1.t1",
                                            {"fetch" : True})
        for val in rows[0:len(rows)][0]:
            hash_sharding_spec_2 = HashShardingSpecification.lookup(
                                    val,
                                    self.__shard_mapping_id_1,
                                    "HASH"
                                )
            self.assertEqual(
                             hash_sharding_spec_2.shard_id,
                             2
            )

        rows =  self.__server_4.exec_stmt(
                                            "SELECT userID FROM db1.t1",
                                            {"fetch" : True})
        for val in rows[0:len(rows)][0]:
            hash_sharding_spec_3 = HashShardingSpecification.lookup(
                                    val,
                                    self.__shard_mapping_id_1,
                                    "HASH"
                                )
            self.assertEqual(
                             hash_sharding_spec_3.shard_id,
                             3
            )

        rows =  self.__server_5.exec_stmt(
                                            "SELECT userID FROM db1.t1",
                                            {"fetch" : True})
        for val in rows[0:len(rows)][0]:
            hash_sharding_spec_4 = HashShardingSpecification.lookup(
                                    val,
                                    self.__shard_mapping_id_1,
                                    "HASH"
                                )
            self.assertEqual(
                             hash_sharding_spec_4.shard_id,
                             4
            )

        rows =  self.__server_6.exec_stmt(
                                            "SELECT userID FROM db1.t1",
                                            {"fetch" : True})
        for val in rows[0:len(rows)][0]:
            hash_sharding_spec_5 = HashShardingSpecification.lookup(
                                    val,
                                    self.__shard_mapping_id_1,
                                    "HASH"
                                )
            self.assertEqual(
                             hash_sharding_spec_5.shard_id,
                             5
            )


    def hash_sharding_specification_in_list(self,
                                            hash_sharding_spec,
                                            hash_sharding_specification_list
                                            ):
        """Verify if the given hash sharding specification is present in
        the list of hash sharding specifications.

        :param hash_sharding_spec: The hash sharding specification that
            needs to be lookedup.
        :param hash_sharding_specification_list: The list of hash sharding
            specifications

        :return: True if the hash sharding specification is present.
                False otherwise.
        """
        for i in range(0, len(hash_sharding_specification_list)):
            if ShardingUtils.compare_hash_specifications(
                hash_sharding_spec,
                hash_sharding_specification_list[i]
            ):
                return True
        return False

    def tearDown(self):
        """Tear down the state store setup.
        """
        self.__server_2.exec_stmt("DROP TABLE db1.t1")
        self.__server_2.exec_stmt("DROP DATABASE db1")
        self.__server_3.exec_stmt("DROP TABLE db1.t1")
        self.__server_3.exec_stmt("DROP DATABASE db1")
        self.__server_4.exec_stmt("DROP TABLE db1.t1")
        self.__server_4.exec_stmt("DROP DATABASE db1")
        self.__server_5.exec_stmt("DROP TABLE db1.t1")
        self.__server_5.exec_stmt("DROP DATABASE db1")
        self.__server_6.exec_stmt("DROP TABLE db1.t1")
        self.__server_6.exec_stmt("DROP DATABASE db1")

        tests.utils.cleanup_environment()
        tests.utils.teardown_xmlrpc(self.manager, self.proxy)
Пример #49
0
    def setUp(self):
        self.manager, self.proxy = tests.utils.setup_xmlrpc()

        self.__options_1 = {
            "uuid" :  _uuid.UUID("{aa75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(0),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }
        uuid_server1 = MySQLServer.discover_uuid(self.__options_1["address"])
        self.__options_1["uuid"] = _uuid.UUID(uuid_server1)
        self.__server_1 = MySQLServer(**self.__options_1)
        MySQLServer.add(self.__server_1)

        self.__group_1 = Group("GROUPID1", "First description.")
        Group.add(self.__group_1)
        self.__group_1.add_server(self.__server_1)
        tests.utils.configure_decoupled_master(self.__group_1, self.__server_1)

        self.__options_2 = {
            "uuid" :  _uuid.UUID("{aa45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(1),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server2 = MySQLServer.discover_uuid(self.__options_2["address"])
        self.__options_2["uuid"] = _uuid.UUID(uuid_server2)
        self.__server_2 = MySQLServer(**self.__options_2)
        MySQLServer.add(self.__server_2)
        self.__server_2.connect()
        self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_2.exec_stmt("CREATE DATABASE db1")
        self.__server_2.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 501):
            self.__server_2.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES(%s, 'TEST %s')" % (i, i))


        self.__group_2 = Group("GROUPID2", "Second description.")
        Group.add(self.__group_2)
        self.__group_2.add_server(self.__server_2)
        tests.utils.configure_decoupled_master(self.__group_2, self.__server_2)

        self.__options_3 = {
            "uuid" :  _uuid.UUID("{bb75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(2),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server3 = MySQLServer.discover_uuid(self.__options_3["address"])
        self.__options_3["uuid"] = _uuid.UUID(uuid_server3)
        self.__server_3 = MySQLServer(**self.__options_3)
        MySQLServer.add( self.__server_3)
        self.__server_3.connect()
        self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_3.exec_stmt("CREATE DATABASE db1")
        self.__server_3.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 501):
            self.__server_3.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES(%s, 'TEST %s')" % (i, i))

        self.__group_3 = Group("GROUPID3", "Third description.")
        Group.add( self.__group_3)
        self.__group_3.add_server(self.__server_3)
        tests.utils.configure_decoupled_master(self.__group_3, self.__server_3)

        self.__options_4 = {
            "uuid" :  _uuid.UUID("{bb45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(3),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server4 = MySQLServer.discover_uuid(self.__options_4["address"])
        self.__options_4["uuid"] = _uuid.UUID(uuid_server4)
        self.__server_4 = MySQLServer(**self.__options_4)
        MySQLServer.add(self.__server_4)
        self.__server_4.connect()
        self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_4.exec_stmt("CREATE DATABASE db1")
        self.__server_4.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 501):
            self.__server_4.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES(%s, 'TEST %s')" % (i, i))

        self.__group_4 = Group("GROUPID4", "Fourth description.")
        Group.add( self.__group_4)
        self.__group_4.add_server(self.__server_4)
        tests.utils.configure_decoupled_master(self.__group_4, self.__server_4)

        self.__options_5 = {
            "uuid" :  _uuid.UUID("{cc75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(4),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server5 = MySQLServer.discover_uuid(self.__options_5["address"])
        self.__options_5["uuid"] = _uuid.UUID(uuid_server5)
        self.__server_5 = MySQLServer(**self.__options_5)
        MySQLServer.add(self.__server_5)
        self.__server_5.connect()
        self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_5.exec_stmt("CREATE DATABASE db1")
        self.__server_5.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 501):
            self.__server_5.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES(%s, 'TEST %s')" % (i, i))

        self.__group_5 = Group("GROUPID5", "Fifth description.")
        Group.add( self.__group_5)
        self.__group_5.add_server(self.__server_5)
        tests.utils.configure_decoupled_master(self.__group_5, self.__server_5)

        self.__options_6 = {
            "uuid" :  _uuid.UUID("{cc45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(5),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server6 = MySQLServer.discover_uuid(self.__options_6["address"])
        self.__options_6["uuid"] = _uuid.UUID(uuid_server6)
        self.__server_6 = MySQLServer(**self.__options_6)
        MySQLServer.add(self.__server_6)
        self.__server_6.connect()
        self.__server_6.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_6.exec_stmt("CREATE DATABASE db1")
        self.__server_6.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 501):
            self.__server_6.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES(%s, 'TEST %s')" % (i, i))

        self.__group_6 = Group("GROUPID6", "Sixth description.")
        Group.add( self.__group_6)
        self.__group_6.add_server(self.__server_6)
        tests.utils.configure_decoupled_master(self.__group_6, self.__server_6)

        self.__shard_mapping_list = ShardMapping.list_shard_mapping_defn()
        self.assertEquals( self.__shard_mapping_list,  [])

        self.__shard_mapping_id_1 = ShardMapping.define("HASH", "GROUPID1")

        self.__shard_mapping_1 = ShardMapping.add(
                                    self.__shard_mapping_id_1,
                                    "db1.t1",
                                    "userID"
                                )

        self.__shard_1 = Shards.add("GROUPID2")
        self.__shard_2 = Shards.add("GROUPID3")
        self.__shard_3 = Shards.add("GROUPID4")
        self.__shard_4 = Shards.add("GROUPID5")
        self.__shard_5 = Shards.add("GROUPID6")

        self.__hash_sharding_specification_1 = HashShardingSpecification.add(
            self.__shard_mapping_1.shard_mapping_id,
            self.__shard_1.shard_id
        )

        self.__hash_sharding_specification_2 = HashShardingSpecification.add(
            self.__shard_mapping_1.shard_mapping_id,
            self.__shard_2.shard_id
        )

        self.__hash_sharding_specification_3 = HashShardingSpecification.add(
            self.__shard_mapping_1.shard_mapping_id,
            self.__shard_3.shard_id
        )

        self.__hash_sharding_specification_4 = HashShardingSpecification.add(
            self.__shard_mapping_1.shard_mapping_id,
            self.__shard_4.shard_id
        )

        self.__hash_sharding_specification_5 = HashShardingSpecification.add(
            self.__shard_mapping_1.shard_mapping_id,
            self.__shard_5.shard_id
        )
Пример #50
0
def _setup_shard_switch_split(shard_id, source_group_id, destination_group_id,
                              split_value, prune_limit, cmd, update_only):
    """Setup the moved shard to map to the new group.

    :param shard_id: The shard ID of the shard that needs to be moved.
    :param source_group_id: The group_id of the source shard.
    :param destn_group_id: The ID of the group to which the shard needs to
                           be moved.
    :param split_value: Indicates the value at which the range for the
                        particular shard will be split. Will be set only
                        for shard split operations.
    :param prune_limit: The number of DELETEs that should be
                        done in one batch.
    :param cmd: Indicates the type of re-sharding operation.
    :update_only: Only update the state store and skip provisioning.
    """
    #Fetch the Range sharding specification.
    range_sharding_spec, source_shard, shard_mappings, shard_mapping_defn = \
            _services_sharding.verify_and_fetch_shard(shard_id)

    #Disable the old shard
    source_shard.disable()

    #Remove the old shard.
    range_sharding_spec.remove()
    source_shard.remove()

    destination_group = Group.fetch(destination_group_id)
    if destination_group is None:
        raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND %
                                    (destination_group_id, ))
    destn_group_master = MySQLServer.fetch(destination_group.master)
    if destn_group_master is None:
        raise _errors.ShardingError(
            _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND)
    destn_group_master.connect()

    #Make the destination group as read only to disable updates until the
    #connectors update their caches, thus avoiding inconsistency.
    destn_group_master.read_only = True

    #Add the new shards. Generate new shard IDs for the shard being
    #split and also for the shard that is created as a result of the split.
    new_shard_1 = Shards.add(source_shard.group_id, "DISABLED")
    new_shard_2 = Shards.add(destination_group_id, "DISABLED")

    #Both of the shard mappings associated with this shard_id should
    #be of the same sharding type. Hence it is safe to use one of the
    #shard mappings.
    if shard_mappings[0].type_name == "HASH":
        #In the case of a split involving a HASH sharding scheme,
        #the shard that is split gets a new shard_id, while the split
        #gets the new computed lower_bound and also a new shard id.
        #NOTE: How the shard that is split retains its lower_bound.
        HashShardingSpecification.add_hash_split(
            range_sharding_spec.shard_mapping_id,
            new_shard_1.shard_id,
            range_sharding_spec.lower_bound
        )
        HashShardingSpecification.add_hash_split(
            range_sharding_spec.shard_mapping_id,
            new_shard_2.shard_id,
            split_value
        )
    else:
        #Add the new ranges. Note that the shard being split retains
        #its lower_bound, while the new shard gets the computed,
        #lower_bound.
        RangeShardingSpecification.add(
            range_sharding_spec.shard_mapping_id,
            range_sharding_spec.lower_bound,
            new_shard_1.shard_id
        )
        RangeShardingSpecification.add(
            range_sharding_spec.shard_mapping_id,
            split_value,
            new_shard_2.shard_id
        )

    #The sleep ensures that the connector have refreshed their caches with the
    #new shards that have been added as a result of the split.
    time.sleep(_utils.TTL)

    #The source shard group master would have been marked as read only
    #during the sync. Remove the read_only flag.
    source_group = Group.fetch(source_group_id)
    if source_group is None:
        raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND %
                                    (source_group_id, ))

    source_group_master = MySQLServer.fetch(source_group.master)
    if source_group_master is None:
        raise _errors.ShardingError(
            _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND)
    source_group_master.connect()

    #Kill all the existing connections on the servers
    source_group.kill_connections_on_servers()

    #Allow connections on the source group master
    source_group_master.read_only = False

    #Allow connections on the destination group master
    destn_group_master.read_only = False

    #Setup replication for the new group from the global server
    _group_replication.setup_group_replication \
            (shard_mapping_defn[2], destination_group_id)

    #Enable the split shards
    new_shard_1.enable()
    new_shard_2.enable()

    #Trigger changing the mappings for the shard that was copied
    if not update_only:
        _events.trigger_within_procedure(
            PRUNE_SHARDS, new_shard_1.shard_id, new_shard_2.shard_id, prune_limit
        )
Пример #51
0
    def setUp(self):
        """Configure the existing environment
        """
        self.manager, self.proxy = tests.utils.setup_xmlrpc()
        self.__options_1 = {
            "uuid": _uuid.UUID("{bb75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": "server_1.mysql.com:3060",
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }
        self.__server_1 = MySQLServer(**self.__options_1)
        MySQLServer.add(self.__server_1)
        self.__options_2 = {
            "uuid": _uuid.UUID("{aa75a12a-98d1-414c-96af-9e9d4b179678}"),
            "address": "server_2.mysql.com:3060",
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }
        self.__server_2 = MySQLServer(**self.__options_2)
        MySQLServer.add(self.__server_2)
        self.__group_1 = Group("GROUPID1", "First description.")
        Group.add(self.__group_1)
        self.__group_1.add_server(self.__server_1)
        self.__group_1.add_server(self.__server_2)

        self.__options_3 = {
            "uuid": _uuid.UUID("{cc75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(0),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }
        uuid_server3 = MySQLServer.discover_uuid(self.__options_3["address"])
        self.__options_3["uuid"] = _uuid.UUID(uuid_server3)
        self.__server_3 = MySQLServer(**self.__options_3)
        MySQLServer.add(self.__server_3)

        self.__options_4 = {
            "uuid": _uuid.UUID("{dd75a12a-98d1-414c-96af-9e9d4b179678}"),
            "address": "server_4.mysql.com:3060",
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }
        self.__server_4 = MySQLServer(**self.__options_4)
        MySQLServer.add(self.__server_4)
        self.__group_2 = Group("GROUPID2", "Second description.")
        Group.add(self.__group_2)
        self.__group_2.add_server(self.__server_3)
        self.__group_2.add_server(self.__server_4)
        tests.utils.configure_decoupled_master(self.__group_2, self.__server_3)

        self.__options_5 = {
            "uuid": _uuid.UUID("{ee75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(2),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }
        uuid_server5 = MySQLServer.discover_uuid(self.__options_5["address"])
        self.__options_5["uuid"] = _uuid.UUID(uuid_server5)
        self.__server_5 = MySQLServer(**self.__options_5)
        MySQLServer.add(self.__server_5)

        self.__options_6 = {
            "uuid": _uuid.UUID("{ff75a12a-98d1-414c-96af-9e9d4b179678}"),
            "address": "server_6.mysql.com:3060",
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }
        self.__server_6 = MySQLServer(**self.__options_6)
        MySQLServer.add(self.__server_6)
        self.__group_3 = Group("GROUPID3", "Third description.")
        Group.add(self.__group_3)
        self.__group_3.add_server(self.__server_5)
        self.__group_3.add_server(self.__server_6)
        tests.utils.configure_decoupled_master(self.__group_3, self.__server_5)

        self.__options_1_host,  self.__options_1_port = \
            server_utils.split_host_port(self.__options_1["address"], 13001)
        self.__options_2_host,  self.__options_2_port = \
            server_utils.split_host_port(self.__options_2["address"], 13001)
        self.__options_3_host,  self.__options_3_port = \
            server_utils.split_host_port(self.__options_3["address"], 13001)
        self.__options_4_host,  self.__options_4_port = \
            server_utils.split_host_port(self.__options_4["address"], 13001)
        self.__options_5_host,  self.__options_5_port = \
            server_utils.split_host_port(self.__options_5["address"], 13001)
        self.__options_6_host,  self.__options_6_port = \
            server_utils.split_host_port(self.__options_6["address"], 13001)

        group_4 = Group("GROUPID4", "4TH description.")
        Group.add(group_4)
        group_5 = Group("GROUPID5", "5TH description.")
        Group.add(group_5)
        group_6 = Group("GROUPID6", "6TH description.")
        Group.add(group_6)
        group_7 = Group("GROUPID7", "7TH description.")
        Group.add(group_7)
        group_8 = Group("GROUPID8", "8TH description.")
        Group.add(group_8)
        group_9 = Group("GROUPID9", "9TH description.")
        Group.add(group_9)
        group_10 = Group("GROUPID10", "10TH description.")
        Group.add(group_10)
        group_11 = Group("GROUPID11", "11TH description.")
        Group.add(group_11)
        group_12 = Group("GROUPID12", "12TH description.")
        Group.add(group_12)
        group_13 = Group("GROUPID13", "13TH description.")
        Group.add(group_13)
        group_14 = Group("GROUPID14", "14TH description.")
        Group.add(group_14)

        self.__shard_mapping_list = ShardMapping.list_shard_mapping_defn()
        self.assertEquals(self.__shard_mapping_list, [])
        self.__shard_mapping_id_1 = ShardMapping.define("RANGE", "GROUPID10")
        self.__shard_mapping_id_2 = ShardMapping.define("RANGE", "GROUPID11")
        self.__shard_mapping_id_3 = ShardMapping.define("RANGE", "GROUPID12")
        #Test with sharding type values in lower case
        self.__shard_mapping_id_4 = ShardMapping.define("range", "GROUPID13")
        self.__shard_mapping_id_5 = ShardMapping.define("range", "GROUPID14")

        self.__shard_mapping_1 = \
            ShardMapping.add(self.__shard_mapping_id_1, "db1.t1", "userID1")
        self.__shard_mapping_2 = \
            ShardMapping.add(self.__shard_mapping_id_2, "db2.t2", "userID2")
        self.__shard_mapping_3 = \
            ShardMapping.add(self.__shard_mapping_id_3, "db3.t3", "userID3")
        self.__shard_mapping_4 = \
            ShardMapping.add(self.__shard_mapping_id_4, "db4.t4", "userID4")

        self.__shard_mapping_5 = \
            ShardMapping.add(self.__shard_mapping_id_5, "prune_db.prune_table",
                             "userID")

        self.__shard_id_1 = Shards.add("GROUPID1", "ENABLED")
        self.__shard_id_2 = Shards.add("GROUPID10", "ENABLED")
        self.__shard_id_3 = Shards.add("GROUPID11", "DISABLED")
        self.__shard_id_4 = Shards.add("GROUPID4", "ENABLED")
        self.__shard_id_5 = Shards.add("GROUPID5", "ENABLED")
        self.__shard_id_6 = Shards.add("GROUPID6", "ENABLED")
        self.__shard_id_7 = Shards.add("GROUPID7", "ENABLED")
        self.__shard_id_8 = Shards.add("GROUPID8", "ENABLED")
        self.__shard_id_9 = Shards.add("GROUPID9", "ENABLED")
        self.__shard_id_10 = Shards.add("GROUPID2", "ENABLED")
        self.__shard_id_11 = Shards.add("GROUPID3", "ENABLED")

        self.__range_sharding_specification_1 = RangeShardingSpecification.add(
            self.__shard_mapping_1.shard_mapping_id, 0,
            self.__shard_id_1.shard_id)
        self.__range_sharding_specification_2 = RangeShardingSpecification.add(
            self.__shard_mapping_1.shard_mapping_id, 1001,
            self.__shard_id_2.shard_id)
        self.__range_sharding_specification_3 = RangeShardingSpecification.add(
            self.__shard_mapping_1.shard_mapping_id, 2001,
            self.__shard_id_3.shard_id)

        self.__range_sharding_specification_4 = RangeShardingSpecification.add(
            self.__shard_mapping_2.shard_mapping_id, 3001,
            self.__shard_id_4.shard_id)
        self.__range_sharding_specification_5 = RangeShardingSpecification.add(
            self.__shard_mapping_2.shard_mapping_id, 4001,
            self.__shard_id_5.shard_id)

        self.__range_sharding_specification_6 = RangeShardingSpecification.add(
            self.__shard_mapping_3.shard_mapping_id, 6001,
            self.__shard_id_6.shard_id)
        self.__range_sharding_specification_7 = RangeShardingSpecification.add(
            self.__shard_mapping_3.shard_mapping_id, 7001,
            self.__shard_id_7.shard_id)

        self.__range_sharding_specification_8 = RangeShardingSpecification.add(
            self.__shard_mapping_4.shard_mapping_id, 8001,
            self.__shard_id_8.shard_id)
        self.__range_sharding_specification_9 = RangeShardingSpecification.add(
            self.__shard_mapping_4.shard_mapping_id, 10001,
            self.__shard_id_9.shard_id)

        self.__range_sharding_specification_10 = RangeShardingSpecification.add(
            self.__shard_mapping_5.shard_mapping_id, 100,
            self.__shard_id_10.shard_id)
        self.__range_sharding_specification_11 = RangeShardingSpecification.add(
            self.__shard_mapping_5.shard_mapping_id, 201,
            self.__shard_id_11.shard_id)

        READ_ONLY = MySQLServer.get_mode_idx(MySQLServer.READ_ONLY)
        READ_WRITE = MySQLServer.get_mode_idx(MySQLServer.READ_WRITE)

        SECONDARY = MySQLServer.get_status_idx(MySQLServer.SECONDARY)
        PRIMARY = MySQLServer.get_status_idx(MySQLServer.PRIMARY)

        self.__setofservers = tests.utils.make_servers_result(
            [[
                str(self.__server_1.uuid), 'GROUPID1', self.__options_1_host,
                self.__options_1_port, READ_ONLY, SECONDARY, 1.0
            ],
             [
                 str(self.__server_2.uuid), 'GROUPID1', self.__options_2_host,
                 self.__options_2_port, READ_ONLY, SECONDARY, 1.0
             ],
             [
                 str(self.__server_3.uuid), 'GROUPID2', self.__options_3_host,
                 self.__options_3_port, READ_WRITE, PRIMARY, 1.0
             ],
             [
                 str(self.__server_4.uuid), 'GROUPID2', self.__options_4_host,
                 self.__options_4_port, READ_ONLY, SECONDARY, 1.0
             ],
             [
                 str(self.__server_5.uuid), 'GROUPID3', self.__options_5_host,
                 self.__options_5_port, READ_WRITE, PRIMARY, 1.0
             ],
             [
                 str(self.__server_6.uuid), 'GROUPID3', self.__options_6_host,
                 self.__options_6_port, READ_ONLY, SECONDARY, 1.0
             ]])

        self.__setofservers_1 = tests.utils.make_servers_result(
            [[
                str(self.__server_1.uuid), 'GROUPID1', self.__options_1_host,
                self.__options_1_port, READ_ONLY, SECONDARY, 1.0
            ],
             [
                 str(self.__server_2.uuid), 'GROUPID1', self.__options_2_host,
                 self.__options_2_port, READ_ONLY, SECONDARY, 1.0
             ]])

        self.__setofservers_2 = tests.utils.make_servers_result(
            [[
                str(self.__server_1.uuid), 'GROUPID1', self.__options_1_host,
                self.__options_1_port, READ_ONLY, SECONDARY, 1.0
            ],
             [
                 str(self.__server_2.uuid), 'GROUPID1', self.__options_2_host,
                 self.__options_2_port, READ_ONLY, SECONDARY, 1.0
             ],
             [
                 str(self.__server_3.uuid), 'GROUPID2', self.__options_3_host,
                 self.__options_3_port, READ_WRITE, PRIMARY, 1.0
             ],
             [
                 str(self.__server_4.uuid), 'GROUPID2', self.__options_4_host,
                 self.__options_4_port, READ_ONLY, SECONDARY, 1.0
             ]])

        self.__setofservers_3 = tests.utils.make_servers_result(
            [[
                str(self.__server_1.uuid), 'GROUPID1', self.__options_1_host,
                self.__options_1_port, READ_ONLY, SECONDARY, 1.0
            ],
             [
                 str(self.__server_2.uuid), 'GROUPID1', self.__options_2_host,
                 self.__options_2_port, READ_ONLY, SECONDARY, 1.0
             ],
             [
                 str(self.__server_3.uuid), 'GROUPID2', self.__options_3_host,
                 self.__options_3_port, READ_WRITE, PRIMARY, 1.0
             ],
             [
                 str(self.__server_4.uuid), 'GROUPID2', self.__options_4_host,
                 self.__options_4_port, READ_ONLY, SECONDARY, 1.0
             ],
             [
                 str(self.__server_5.uuid), 'GROUPID3', self.__options_5_host,
                 self.__options_5_port, READ_WRITE, PRIMARY, 1.0
             ],
             [
                 str(self.__server_6.uuid), 'GROUPID3', self.__options_6_host,
                 self.__options_6_port, READ_ONLY, SECONDARY, 1.0
             ]])

        self.__setoftables = tests.utils.make_tables_result(
            [['db1', 't1', 'userID1', '1'], ['db2', 't2', 'userID2', '2'],
             ['db3', 't3', 'userID3', '3'], ['db4', 't4', 'userID4', '4'],
             ['prune_db', 'prune_table', 'userID', '5']])
        self.__setoftables_1 = tests.utils.make_tables_result(
            [['db1', 't1', 'userID1', '1']])
        self.__setoftables_2 = tests.utils.make_tables_result(
            [['db1', 't1', 'userID1', '1'], ['db2', 't2', 'userID2', '2']])
        self.__setoftables_3 = tests.utils.make_tables_result(
            [['db1', 't1', 'userID1', '1'], ['db2', 't2', 'userID2', '2'],
             ['db3', 't3', 'userID3', '3']])
        self.__setofshardmaps = tests.utils.make_mapping_result(
            [['1', 'RANGE', 'GROUPID10'], ['2', 'RANGE', 'GROUPID11'],
             ['3', 'RANGE', 'GROUPID12'], ['4', 'RANGE', 'GROUPID13'],
             ['5', 'RANGE', 'GROUPID14']])
        self.__setofshardmaps_1 = tests.utils.make_mapping_result(
            [['1', 'RANGE', 'GROUPID10']])
        self.__setofshardmaps_2 = tests.utils.make_mapping_result(
            [['1', 'RANGE', 'GROUPID10'], ['2', 'RANGE', 'GROUPID11']])
        self.__setofshardmaps_3 = tests.utils.make_mapping_result(
            [['1', 'RANGE', 'GROUPID10'], ['2', 'RANGE', 'GROUPID11'],
             ['3', 'RANGE', 'GROUPID12']])
        self.__setofshardindexes = tests.utils.make_index_result(
            [['0', '1', '1', 'GROUPID1'], ['1001', '1', '2', 'GROUPID10'],
             ['3001', '2', '4', 'GROUPID4'], ['4001', '2', '5', 'GROUPID5'],
             ['6001', '3', '6', 'GROUPID6'], ['7001', '3', '7', 'GROUPID7'],
             ['8001', '4', '8', 'GROUPID8'], ['10001', '4', '9', 'GROUPID9'],
             ['100', '5', '10', 'GROUPID2'], ['201', '5', '11', 'GROUPID3']])
        self.__setofshardindexes_1 = tests.utils.make_index_result(
            [['0', '1', '1', 'GROUPID1'], ['1001', '1', '2', 'GROUPID10']])
        self.__setofshardindexes_3 = tests.utils.make_index_result(
            [['0', '1', '1', 'GROUPID1'], ['1001', '1', '2', 'GROUPID10'],
             ['3001', '2', '4', 'GROUPID4'], ['4001', '2', '5', 'GROUPID5'],
             ['6001', '3', '6', 'GROUPID6'], ['7001', '3', '7', 'GROUPID7']])
        self.__setofshardindexes_5 = tests.utils.make_index_result(
            [['0', '1', '1', 'GROUPID1'], ['1001', '1', '2', 'GROUPID10'],
             ['3001', '2', '4', 'GROUPID4'], ['4001', '2', '5', 'GROUPID5'],
             ['6001', '3', '6', 'GROUPID6'], ['7001', '3', '7', 'GROUPID7'],
             ['8001', '4', '8', 'GROUPID8'], ['10001', '4', '9', 'GROUPID9'],
             ['100', '5', '10', 'GROUPID2'], ['201', '5', '11', 'GROUPID3']])
        self.__shardinginformation_1 = tests.utils.make_info_result(
            [[
                'db1', 't1', 'userID1', '0', '1', 'RANGE', 'GROUPID1',
                'GROUPID10'
            ],
             [
                 'db1', 't1', 'userID1', '1001', '2', 'RANGE', 'GROUPID10',
                 'GROUPID10'
             ]])
        self.__shardinginformation_2 = tests.utils.make_info_result(
            [[
                'db1', 't1', 'userID1', '0', '1', 'RANGE', 'GROUPID1',
                'GROUPID10'
            ],
             [
                 'db1', 't1', 'userID1', '1001', '2', 'RANGE', 'GROUPID10',
                 'GROUPID10'
             ],
             [
                 'db2', 't2', 'userID2', '3001', '4', 'RANGE', 'GROUPID4',
                 'GROUPID11'
             ],
             [
                 'db2', 't2', 'userID2', '4001', '5', 'RANGE', 'GROUPID5',
                 'GROUPID11'
             ]])
        self.__shardinginformation_3 = tests.utils.make_info_result(
            [[
                'db1', 't1', 'userID1', '0', '1', 'RANGE', 'GROUPID1',
                'GROUPID10'
            ],
             [
                 'db1', 't1', 'userID1', '1001', '2', 'RANGE', 'GROUPID10',
                 'GROUPID10'
             ],
             [
                 'db2', 't2', 'userID2', '3001', '4', 'RANGE', 'GROUPID4',
                 'GROUPID11'
             ],
             [
                 'db2', 't2', 'userID2', '4001', '5', 'RANGE', 'GROUPID5',
                 'GROUPID11'
             ],
             [
                 'db3', 't3', 'userID3', '6001', '6', 'RANGE', 'GROUPID6',
                 'GROUPID12'
             ],
             [
                 'db3', 't3', 'userID3', '7001', '7', 'RANGE', 'GROUPID7',
                 'GROUPID12'
             ]])
class TestShardingServices(unittest.TestCase):

    def assertStatus(self, status, expect):
        items = (item['diagnosis'] for item in status[1] if item['diagnosis'])
        self.assertEqual(status[1][-1]["success"], expect, "\n".join(items))

    def setUp(self):
        """Configure the existing environment
        """
        self.manager, self.proxy = tests.utils.setup_xmlrpc()

        self.__options_1 = {
            "uuid" :  _uuid.UUID("{aa75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(0),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server1 = MySQLServer.discover_uuid(self.__options_1["address"])
        self.__options_1["uuid"] = _uuid.UUID(uuid_server1)
        self.__server_1 = MySQLServer(**self.__options_1)
        MySQLServer.add(self.__server_1)

        self.__group_1 = Group("GROUPID1", "First description.")
        Group.add(self.__group_1)
        self.__group_1.add_server(self.__server_1)
        tests.utils.configure_decoupled_master(self.__group_1, self.__server_1)

        self.__options_2 = {
            "uuid" :  _uuid.UUID("{aa45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(1),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server2 = MySQLServer.discover_uuid(self.__options_2["address"])
        self.__options_2["uuid"] = _uuid.UUID(uuid_server2)
        self.__server_2 = MySQLServer(**self.__options_2)
        MySQLServer.add(self.__server_2)

        self.__group_2 = Group("GROUPID2", "Second description.")
        Group.add(self.__group_2)
        self.__group_2.add_server(self.__server_2)
        tests.utils.configure_decoupled_master(self.__group_2, self.__server_2)

        self.__options_3 = {
            "uuid" :  _uuid.UUID("{bb75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(2),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server3 = MySQLServer.discover_uuid(self.__options_3["address"])
        self.__options_3["uuid"] = _uuid.UUID(uuid_server3)
        self.__server_3 = MySQLServer(**self.__options_3)
        MySQLServer.add( self.__server_3)

        self.__group_3 = Group("GROUPID3", "Third description.")
        Group.add( self.__group_3)
        self.__group_3.add_server(self.__server_3)
        tests.utils.configure_decoupled_master(self.__group_3, self.__server_3)

        self.__options_4 = {
            "uuid" :  _uuid.UUID("{bb45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(3),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server4 = MySQLServer.discover_uuid(self.__options_4["address"])
        self.__options_4["uuid"] = _uuid.UUID(uuid_server4)
        self.__server_4 = MySQLServer(**self.__options_4)
        MySQLServer.add(self.__server_4)

        self.__group_4 = Group("GROUPID4", "Fourth description.")
        Group.add( self.__group_4)
        self.__group_4.add_server(self.__server_4)
        tests.utils.configure_decoupled_master(self.__group_4, self.__server_4)

        self.__server_4.connect()
        self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db2")
        self.__server_4.exec_stmt("CREATE DATABASE db2")
        self.__server_4.exec_stmt("CREATE TABLE db2.t2"
                                  "(userID2 INT, name VARCHAR(30))")
        for i in range(1, 201):
            self.__server_4.exec_stmt("INSERT INTO db2.t2 "
                                  "VALUES(%s, 'TEST %s')" % (i, i))


        self.__options_5 = {
            "uuid" :  _uuid.UUID("{cc75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(4),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server5 = MySQLServer.discover_uuid(self.__options_5["address"])
        self.__options_5["uuid"] = _uuid.UUID(uuid_server5)
        self.__server_5 = MySQLServer(**self.__options_5)
        MySQLServer.add(self.__server_5)

        self.__group_5 = Group("GROUPID5", "Fifth description.")
        Group.add( self.__group_5)
        self.__group_5.add_server(self.__server_5)
        tests.utils.configure_decoupled_master(self.__group_5, self.__server_5)

        self.__server_5.connect()
        self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db2")
        self.__server_5.exec_stmt("CREATE DATABASE db2")
        self.__server_5.exec_stmt("CREATE TABLE db2.t2"
                                  "(userID2 INT, name VARCHAR(30))")
        for i in range(1, 201):
            self.__server_5.exec_stmt("INSERT INTO db2.t2 "
                                  "VALUES(%s, 'TEST %s')" % (i, i))

        self.__options_6 = {
            "uuid" :  _uuid.UUID("{cc45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(5),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server6 = MySQLServer.discover_uuid(self.__options_6["address"])
        self.__options_6["uuid"] = _uuid.UUID(uuid_server6)
        self.__server_6 = MySQLServer(**self.__options_6)
        MySQLServer.add(self.__server_6)

        self.__group_6 = Group("GROUPID6", "Sixth description.")
        Group.add( self.__group_6)
        self.__group_6.add_server(self.__server_6)
        tests.utils.configure_decoupled_master(self.__group_6, self.__server_6)

        status = self.proxy.sharding.create_definition("HASH", "GROUPID1")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_define_shard_mapping).")
        self.assertEqual(status[2], 1)

        status = self.proxy.sharding.create_definition("RANGE", "GROUPID1")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_define_shard_mapping).")
        self.assertEqual(status[2], 2)

        status = self.proxy.sharding.create_definition("HASH", "GROUPID1")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_define_shard_mapping).")
        self.assertEqual(status[2], 3)

        status = self.proxy.sharding.add_table(1, "db1.t1", "userID1")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_add_shard_mapping).")

        status = self.proxy.sharding.add_table(2, "db2.t2", "userID2")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_add_shard_mapping).")

        status = self.proxy.sharding.add_table(3, "db3.t3", "userID3")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_add_shard_mapping).")

        status = self.proxy.sharding.add_shard(1, "GROUPID2,GROUPID3",
            "ENABLED")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_add_shard).")

        status = self.proxy.sharding.add_shard(2, "GROUPID4/0,GROUPID5/101",
            "ENABLED")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_add_shard).")

        status = self.proxy.sharding.add_shard(3, "GROUPID6", "ENABLED")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_add_shard).")

    def test_list_shard_defns(self):
        expected_shard_mapping_list1 =   [
                                          [1, "HASH", "GROUPID1"],
                                          [2, "RANGE", "GROUPID1"],
                                          [3, "HASH", "GROUPID1"]
                                        ]
        status = self.proxy.sharding.list_definitions()
        self.assertEqual(status[0], True)
        self.assertEqual(status[1], "")
        obtained_shard_mapping_list = status[2]
        self.assertEqual(set(expected_shard_mapping_list1[0]),
                         set(obtained_shard_mapping_list[0]))
        self.assertEqual(set(expected_shard_mapping_list1[1]),
                         set(obtained_shard_mapping_list[1]))
        self.assertEqual(set(expected_shard_mapping_list1[2]),
                         set(obtained_shard_mapping_list[2]))

    def test_list_shard_mappings(self):
        status = self.proxy.sharding.list_tables("HASH")
        self.assertEqual(status[0], True)
        self.assertEqual(status[1], "")
        self.assertEqual(status[2], [
                                     {"shard_mapping_id":1,
                                     "table_name":"db1.t1",
                                     "column_name":"userID1",
                                     "type_name":"HASH",
                                     "global_group":"GROUPID1"},
                                     {"shard_mapping_id":3,
                                     "table_name":"db3.t3",
                                     "column_name":"userID3",
                                     "type_name":"HASH",
                                     "global_group":"GROUPID1"}
                                ])
        status = self.proxy.sharding.list_tables("RANGE")
        self.assertEqual(status[0], True)
        self.assertEqual(status[1], "")
        self.assertEqual(status[2], [
                                     {"shard_mapping_id":2,
                                     "table_name":"db2.t2",
                                     "column_name":"userID2",
                                     "type_name":"RANGE",
                                     "global_group":"GROUPID1"}
                                ])

    def test_shard_prune(self):
        status = self.proxy.sharding.prune_shard("db2.t2")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_prune_shard_tables).")

        status = self.proxy.sharding.lookup_servers("db2.t2", 1,  "LOCAL")
        self.assertEqual(status[0], True)
        self.assertEqual(status[1], "")
        obtained_server_list = status[2]
        shard_uuid = obtained_server_list[0][0]
        shard_server = MySQLServer.fetch(shard_uuid)
        shard_server.connect()
        rows = shard_server.exec_stmt(
                                    "SELECT COUNT(*) FROM db2.t2",
                                    {"fetch" : True})
        self.assertTrue(int(rows[0][0]) == 100)
        rows = shard_server.exec_stmt(
                                    "SELECT MAX(userID2) FROM db2.t2",
                                    {"fetch" : True})
        self.assertTrue(int(rows[0][0]) == 100)
        rows = shard_server.exec_stmt(
                                    "SELECT MIN(userID2) FROM db2.t2",
                                    {"fetch" : True})
        self.assertTrue(int(rows[0][0]) == 1)

        status = self.proxy.sharding.lookup_servers("db2.t2", 101,  "LOCAL")
        self.assertEqual(status[0], True)
        self.assertEqual(status[1], "")
        obtained_server_list = status[2]
        shard_uuid = obtained_server_list[0][0]
        shard_server = MySQLServer.fetch(shard_uuid)
        shard_server.connect()
        rows = shard_server.exec_stmt(
                                    "SELECT COUNT(*) FROM db2.t2",
                                    {"fetch" : True})
        self.assertTrue(int(rows[0][0]) == 100)
        rows = shard_server.exec_stmt(
                                    "SELECT MAX(userID2) FROM db2.t2",
                                    {"fetch" : True})
        self.assertTrue(int(rows[0][0]) == 200)
        rows = shard_server.exec_stmt(
                                    "SELECT MIN(userID2) FROM db2.t2",
                                    {"fetch" : True})
        self.assertTrue(int(rows[0][0]) == 101)

    def tearDown(self):
        """Clean up the existing environment
        """
        self.proxy.sharding.disable_shard(1)
        self.proxy.sharding.remove_shard(1)

        self.proxy.sharding.disable_shard(2)
        self.proxy.sharding.remove_shard(2)

        self.proxy.sharding.disable_shard(3)
        self.proxy.sharding.remove_shard(3)

        self.proxy.sharding.disable_shard(4)
        self.proxy.sharding.remove_shard(4)

        self.proxy.sharding.disable_shard(5)
        self.proxy.sharding.remove_shard(5)

        tests.utils.cleanup_environment()
        tests.utils.teardown_xmlrpc(self.manager, self.proxy)
Пример #53
0
class TestShardingPrune(tests.utils.TestCase):
    def setUp(self):
        """Creates the following topology for testing,

        GROUPID1 - localhost:13001, localhost:13002 - Global Group
        GROUPID2 - localhost:13003, localhost:13004 - shard 1
        GROUPID3 - localhost:13005, localhost:13006 - shard 2
        """
        self.manager, self.proxy = tests.utils.setup_xmlrpc()

        self.__options_1 = {
            "uuid": _uuid.UUID("{aa75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(0),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd
        }

        uuid_server1 = MySQLServer.discover_uuid(self.__options_1["address"])
        self.__options_1["uuid"] = _uuid.UUID(uuid_server1)
        self.__server_1 = MySQLServer(**self.__options_1)
        MySQLServer.add(self.__server_1)

        self.__group_1 = Group("GROUPID1", "First description.")
        Group.add(self.__group_1)
        self.__group_1.add_server(self.__server_1)
        tests.utils.configure_decoupled_master(self.__group_1, self.__server_1)

        self.__options_2 = {
            "uuid": _uuid.UUID("{aa45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(1),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd
        }

        uuid_server2 = MySQLServer.discover_uuid(self.__options_2["address"])
        self.__options_2["uuid"] = _uuid.UUID(uuid_server2)
        self.__server_2 = MySQLServer(**self.__options_2)
        MySQLServer.add(self.__server_2)
        self.__server_2.connect()
        self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_2.exec_stmt("CREATE DATABASE db1")
        self.__server_2.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 71):
            self.__server_2.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(101, 301):
            self.__server_2.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(1001, 1201):
            self.__server_2.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(10001, 10201):
            self.__server_2.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(100001, 100201):
            self.__server_2.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))

        self.__group_2 = Group("GROUPID2", "Second description.")
        Group.add(self.__group_2)
        self.__group_2.add_server(self.__server_2)
        tests.utils.configure_decoupled_master(self.__group_2, self.__server_2)

        self.__options_3 = {
            "uuid": _uuid.UUID("{bb75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(2),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd
        }

        uuid_server3 = MySQLServer.discover_uuid(self.__options_3["address"])
        self.__options_3["uuid"] = _uuid.UUID(uuid_server3)
        self.__server_3 = MySQLServer(**self.__options_3)
        MySQLServer.add(self.__server_3)
        self.__server_3.connect()
        self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_3.exec_stmt("CREATE DATABASE db1")
        self.__server_3.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 71):
            self.__server_3.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(101, 301):
            self.__server_3.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(1001, 1201):
            self.__server_3.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(10001, 10201):
            self.__server_3.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(100001, 100201):
            self.__server_3.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))

        self.__group_3 = Group("GROUPID3", "Third description.")
        Group.add(self.__group_3)
        self.__group_3.add_server(self.__server_3)
        tests.utils.configure_decoupled_master(self.__group_3, self.__server_3)

        self.__options_4 = {
            "uuid": _uuid.UUID("{bb45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(3),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd
        }

        uuid_server4 = MySQLServer.discover_uuid(self.__options_4["address"])
        self.__options_4["uuid"] = _uuid.UUID(uuid_server4)
        self.__server_4 = MySQLServer(**self.__options_4)
        MySQLServer.add(self.__server_4)
        self.__server_4.connect()
        self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_4.exec_stmt("CREATE DATABASE db1")
        self.__server_4.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 71):
            self.__server_4.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(101, 301):
            self.__server_4.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(1001, 1201):
            self.__server_4.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(10001, 10201):
            self.__server_4.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(100001, 100201):
            self.__server_4.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))

        self.__group_4 = Group("GROUPID4", "Fourth description.")
        Group.add(self.__group_4)
        self.__group_4.add_server(self.__server_4)
        tests.utils.configure_decoupled_master(self.__group_4, self.__server_4)

        self.__options_5 = {
            "uuid": _uuid.UUID("{cc75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(4),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd
        }

        uuid_server5 = MySQLServer.discover_uuid(self.__options_5["address"])
        self.__options_5["uuid"] = _uuid.UUID(uuid_server5)
        self.__server_5 = MySQLServer(**self.__options_5)
        MySQLServer.add(self.__server_5)
        self.__server_5.connect()
        self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_5.exec_stmt("CREATE DATABASE db1")
        self.__server_5.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 71):
            self.__server_5.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(101, 301):
            self.__server_5.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(1001, 1201):
            self.__server_5.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(10001, 10201):
            self.__server_5.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(100001, 100201):
            self.__server_5.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))

        self.__group_5 = Group("GROUPID5", "Fifth description.")
        Group.add(self.__group_5)
        self.__group_5.add_server(self.__server_5)
        tests.utils.configure_decoupled_master(self.__group_5, self.__server_5)

        self.__options_6 = {
            "uuid": _uuid.UUID("{cc45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(5),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd
        }

        uuid_server6 = MySQLServer.discover_uuid(self.__options_6["address"])
        self.__options_6["uuid"] = _uuid.UUID(uuid_server6)
        self.__server_6 = MySQLServer(**self.__options_6)
        MySQLServer.add(self.__server_6)
        self.__server_6.connect()
        self.__server_6.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_6.exec_stmt("CREATE DATABASE db1")
        self.__server_6.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 71):
            self.__server_6.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(101, 301):
            self.__server_6.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(1001, 1201):
            self.__server_6.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(10001, 10201):
            self.__server_6.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(100001, 100201):
            self.__server_6.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))

        self.__group_6 = Group("GROUPID6", "Sixth description.")
        Group.add(self.__group_6)
        self.__group_6.add_server(self.__server_6)
        tests.utils.configure_decoupled_master(self.__group_6, self.__server_6)

        status = self.proxy.sharding.create_definition("RANGE", "GROUPID1")
        self.check_xmlrpc_command_result(status, returns=1)

        status = self.proxy.sharding.add_table(1, "db1.t1", "userID")
        self.check_xmlrpc_command_result(status)

        status = self.proxy.sharding.add_shard(
            1, "GROUPID2/1,GROUPID3/101,GROUPID4/1001,"
            "GROUPID5/10001,GROUPID6/100001", "ENABLED")
        self.check_xmlrpc_command_result(status)

    def test_prune_table_not_exists(self):
        """Delete the data in the database and verify that the prune does
        not fail once the database has been removed.
        """
        self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_6.exec_stmt("DROP DATABASE IF EXISTS db1")
        status = self.proxy.sharding.prune_shard("db1.t1")

    def test_prune_shard(self):
        status = self.proxy.sharding.prune_shard("db1.t1")
        self.check_xmlrpc_command_result(status)

        status = self.proxy.sharding.lookup_servers("db1.t1", 1, "LOCAL")
        info = self.check_xmlrpc_simple(status, {}, rowcount=1)
        shard_server = fetch_test_server(info['server_uuid'])
        shard_server.connect()
        rows = shard_server.exec_stmt("SELECT COUNT(*) FROM db1.t1",
                                      {"fetch": True})
        self.assertTrue(int(rows[0][0]) == 70)
        rows = shard_server.exec_stmt("SELECT MAX(userID) FROM db1.t1",
                                      {"fetch": True})
        self.assertTrue(int(rows[0][0]) == 70)
        rows = shard_server.exec_stmt("SELECT MIN(userID) FROM db1.t1",
                                      {"fetch": True})
        self.assertTrue(int(rows[0][0]) == 1)

        status = self.proxy.sharding.lookup_servers("db1.t1", 101, "LOCAL")
        info = self.check_xmlrpc_simple(status, {}, rowcount=1)
        shard_server = fetch_test_server(info['server_uuid'])
        shard_server.connect()
        rows = shard_server.exec_stmt("SELECT COUNT(*) FROM db1.t1",
                                      {"fetch": True})
        self.assertTrue(int(rows[0][0]) == 200)
        rows = shard_server.exec_stmt("SELECT MAX(userID) FROM db1.t1",
                                      {"fetch": True})
        self.assertTrue(int(rows[0][0]) == 300)
        rows = shard_server.exec_stmt("SELECT MIN(userID) FROM db1.t1",
                                      {"fetch": True})
        self.assertTrue(int(rows[0][0]) == 101)

        status = self.proxy.sharding.lookup_servers("db1.t1", 1202, "LOCAL")
        info = self.check_xmlrpc_simple(status, {}, rowcount=1)
        shard_server = fetch_test_server(info['server_uuid'])
        shard_server.connect()
        rows = shard_server.exec_stmt("SELECT COUNT(*) FROM db1.t1",
                                      {"fetch": True})
        self.assertTrue(int(rows[0][0]) == 200)
        rows = shard_server.exec_stmt("SELECT MAX(userID) FROM db1.t1",
                                      {"fetch": True})
        self.assertTrue(int(rows[0][0]) == 1200)
        rows = shard_server.exec_stmt("SELECT MIN(userID) FROM db1.t1",
                                      {"fetch": True})
        self.assertTrue(int(rows[0][0]) == 1001)

        status = self.proxy.sharding.lookup_servers("db1.t1", 11000, "LOCAL")
        info = self.check_xmlrpc_simple(status, {}, rowcount=1)
        shard_server = fetch_test_server(info['server_uuid'])
        shard_server.connect()
        rows = shard_server.exec_stmt("SELECT COUNT(*) FROM db1.t1",
                                      {"fetch": True})
        self.assertTrue(int(rows[0][0]) == 200)
        rows = shard_server.exec_stmt("SELECT MAX(userID) FROM db1.t1",
                                      {"fetch": True})
        self.assertTrue(int(rows[0][0]) == 10200)
        rows = shard_server.exec_stmt("SELECT MIN(userID) FROM db1.t1",
                                      {"fetch": True})
        self.assertTrue(int(rows[0][0]) == 10001)

        status = self.proxy.sharding.lookup_servers("db1.t1", 100100, "LOCAL")
        info = self.check_xmlrpc_simple(status, {}, rowcount=1)
        shard_server = fetch_test_server(info['server_uuid'])
        shard_server.connect()
        rows = shard_server.exec_stmt("SELECT COUNT(*) FROM db1.t1",
                                      {"fetch": True})
        self.assertTrue(int(rows[0][0]) == 200)
        rows = shard_server.exec_stmt("SELECT MAX(userID) FROM db1.t1",
                                      {"fetch": True})
        self.assertTrue(int(rows[0][0]) == 100200)
        rows = shard_server.exec_stmt("SELECT MIN(userID) FROM db1.t1",
                                      {"fetch": True})
        self.assertTrue(int(rows[0][0]) == 100001)

    def tearDown(self):
        """Clean up the existing environment
        """
        tests.utils.cleanup_environment()
Пример #54
0
    def setUp(self):
        """Creates the following topology for testing,

        GROUPID1 - localhost:13001, localhost:13002 - Global Group
        GROUPID2 - localhost:13003, localhost:13004 - shard 1
        GROUPID3 - localhost:13005, localhost:13006 - shard 2
        """
        self.manager, self.proxy = tests.utils.setup_xmlrpc()

        self.__options_1 = {
            "uuid" :  _uuid.UUID("{aa75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(0),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd
        }

        uuid_server1 = MySQLServer.discover_uuid(self.__options_1["address"])
        self.__options_1["uuid"] = _uuid.UUID(uuid_server1)
        self.__server_1 = MySQLServer(**self.__options_1)
        MySQLServer.add(self.__server_1)

        self.__group_1 = Group("GROUPID1", "First description.")
        Group.add(self.__group_1)
        self.__group_1.add_server(self.__server_1)
        tests.utils.configure_decoupled_master(self.__group_1, self.__server_1)

        self.__options_2 = {
            "uuid" :  _uuid.UUID("{aa45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(1),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd
        }

        uuid_server2 = MySQLServer.discover_uuid(self.__options_2["address"])
        self.__options_2["uuid"] = _uuid.UUID(uuid_server2)
        self.__server_2 = MySQLServer(**self.__options_2)
        MySQLServer.add(self.__server_2)
        self.__server_2.connect()
        self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_2.exec_stmt("CREATE DATABASE db1")
        self.__server_2.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 601):
            self.__server_2.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES(%s, 'TEST %s')" % (i, i))


        self.__group_2 = Group("GROUPID2", "Second description.")
        Group.add(self.__group_2)
        self.__group_2.add_server(self.__server_2)
        tests.utils.configure_decoupled_master(self.__group_2, self.__server_2)

        self.__options_3 = {
            "uuid" :  _uuid.UUID("{bb75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(2),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd
        }

        uuid_server3 = MySQLServer.discover_uuid(self.__options_3["address"])
        self.__options_3["uuid"] = _uuid.UUID(uuid_server3)
        self.__server_3 = MySQLServer(**self.__options_3)
        MySQLServer.add( self.__server_3)
        self.__server_3.connect()
        self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_3.exec_stmt("CREATE DATABASE db1")
        self.__server_3.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 601):
            self.__server_3.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES(%s, 'TEST %s')" % (i, i))

        self.__group_3 = Group("GROUPID3", "Third description.")
        Group.add( self.__group_3)
        self.__group_3.add_server(self.__server_3)
        tests.utils.configure_decoupled_master(self.__group_3, self.__server_3)

        self.__options_4 = {
            "uuid" :  _uuid.UUID("{bb45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(3),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd
        }

        uuid_server4 = MySQLServer.discover_uuid(self.__options_4["address"])
        self.__options_4["uuid"] = _uuid.UUID(uuid_server4)
        self.__server_4 = MySQLServer(**self.__options_4)
        MySQLServer.add(self.__server_4)
        self.__server_4.connect()
        self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_4.exec_stmt("CREATE DATABASE db1")
        self.__server_4.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 601):
            self.__server_4.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES(%s, 'TEST %s')" % (i, i))

        self.__group_4 = Group("GROUPID4", "Fourth description.")
        Group.add( self.__group_4)
        self.__group_4.add_server(self.__server_4)
        tests.utils.configure_decoupled_master(self.__group_4, self.__server_4)

        self.__options_5 = {
            "uuid" :  _uuid.UUID("{cc75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(4),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd
        }

        uuid_server5 = MySQLServer.discover_uuid(self.__options_5["address"])
        self.__options_5["uuid"] = _uuid.UUID(uuid_server5)
        self.__server_5 = MySQLServer(**self.__options_5)
        MySQLServer.add(self.__server_5)
        self.__server_5.connect()
        self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_5.exec_stmt("CREATE DATABASE db1")
        self.__server_5.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 601):
            self.__server_5.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES(%s, 'TEST %s')" % (i, i))

        self.__group_5 = Group("GROUPID5", "Fifth description.")
        Group.add( self.__group_5)
        self.__group_5.add_server(self.__server_5)
        tests.utils.configure_decoupled_master(self.__group_5, self.__server_5)

        self.__options_6 = {
            "uuid" :  _uuid.UUID("{cc45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(5),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd
        }

        uuid_server6 = MySQLServer.discover_uuid(self.__options_6["address"])
        self.__options_6["uuid"] = _uuid.UUID(uuid_server6)
        self.__server_6 = MySQLServer(**self.__options_6)
        MySQLServer.add(self.__server_6)
        self.__server_6.connect()
        self.__server_6.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_6.exec_stmt("CREATE DATABASE db1")
        self.__server_6.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 601):
            self.__server_6.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES(%s, 'TEST %s')" % (i, i))

        self.__group_6 = Group("GROUPID6", "Sixth description.")
        Group.add( self.__group_6)
        self.__group_6.add_server(self.__server_6)
        tests.utils.configure_decoupled_master(self.__group_6, self.__server_6)

        status = self.proxy.sharding.create_definition("RANGE", "GROUPID1")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_define_shard_mapping).")
        self.assertEqual(status[2], 1)

        status = self.proxy.sharding.add_table(1, "db1.t1", "userID")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_add_shard_mapping).")

        status = self.proxy.sharding.add_shard(
            1,
            "GROUPID2/1,GROUPID3/101,GROUPID4/201,"
            "GROUPID5/301,GROUPID6/401",
            "ENABLED"
        )
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_add_shard).")
class TestShardingPrune(tests.utils.TestCase):
    """Contains unit tests for testing the shard move operation and for
    verifying that the global server configuration remains constant after
    the shard move configuration.
    """
    def check_xmlrpc_command_success(self, packet, expect, has_error):
        result = _xmlrpc._decode(packet)
        self.assertEqual(bool(result.error), has_error,
                         "Had error '%s'" % result.error)

        # Check that it returned the expected result (if one is
        # expected), which is in the first result set (containing
        # execution result), first row (there is just one for
        # procedures), and last column (containing the result).
        if expect is not None:
            self.assertEqual(result.results[0][0][3], expect)

        # If the call was successful, check that there is at least 2
        # result sets and that the second result set contain more than
        # zero jobs.
        if not has_error:
            self.assertTrue(len(result.results) > 1, str(result))
            self.assertNotEqual(result.results[1].rowcount, 0,
                                "had %d result sets" % len(result.results))

    def setUp(self):
        """Creates the topology for testing.
        """
        tests.utils.cleanup_environment()
        self.manager, self.proxy = tests.utils.setup_xmlrpc()

        self.__options_1 = {
            "uuid": _uuid.UUID("{aa75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(0),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd
        }

        uuid_server1 = MySQLServer.discover_uuid(self.__options_1["address"])
        self.__options_1["uuid"] = _uuid.UUID(uuid_server1)
        self.__server_1 = MySQLServer(**self.__options_1)
        MySQLServer.add(self.__server_1)
        self.__server_1.connect()
        self.__server_1.exec_stmt("SET FOREIGN_KEY_CHECKS = OFF")

        self.__group_1 = Group("GROUPID1", "First description.")
        Group.add(self.__group_1)
        self.__group_1.add_server(self.__server_1)
        tests.utils.configure_decoupled_master(self.__group_1, self.__server_1)

        self.__options_2 = {
            "uuid": _uuid.UUID("{aa45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(1),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd
        }

        uuid_server2 = MySQLServer.discover_uuid(self.__options_2["address"])
        self.__options_2["uuid"] = _uuid.UUID(uuid_server2)
        self.__server_2 = MySQLServer(**self.__options_2)
        MySQLServer.add(self.__server_2)
        self.__server_2.connect()
        self.__server_2.exec_stmt("SET FOREIGN_KEY_CHECKS = OFF")
        self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_2.exec_stmt("CREATE DATABASE db1")
        self.__server_2.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT PRIMARY KEY, name VARCHAR(30))")
        for i in range(1, 71, 10):
            self.__server_2.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(101, 301, 10):
            self.__server_2.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(1001, 1201, 10):
            self.__server_2.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(10001, 10201, 10):
            self.__server_2.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(100001, 100201, 10):
            self.__server_2.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db2")
        self.__server_2.exec_stmt("CREATE DATABASE db2")
        self.__server_2.exec_stmt("CREATE TABLE db2.t2"
                                  "(userID INT, salary INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 71, 10):
            self.__server_2.exec_stmt("INSERT INTO db2.t2 "
                                      "VALUES(%s, %s)" % (i, i))
        for i in range(101, 301, 10):
            self.__server_2.exec_stmt("INSERT INTO db2.t2 "
                                      "VALUES(%s, %s)" % (i, i))
        for i in range(1001, 1201, 10):
            self.__server_2.exec_stmt("INSERT INTO db2.t2 "
                                      "VALUES(%s, %s)" % (i, i))
        for i in range(10001, 10201, 10):
            self.__server_2.exec_stmt("INSERT INTO db2.t2 "
                                      "VALUES(%s, %s)" % (i, i))
        for i in range(100001, 100201, 10):
            self.__server_2.exec_stmt("INSERT INTO db2.t2 "
                                      "VALUES(%s, %s)" % (i, i))

        self.__group_2 = Group("GROUPID2", "Second description.")
        Group.add(self.__group_2)
        self.__group_2.add_server(self.__server_2)
        tests.utils.configure_decoupled_master(self.__group_2, self.__server_2)

        self.__options_3 = {
            "uuid": _uuid.UUID("{bb75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(2),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd
        }

        uuid_server3 = MySQLServer.discover_uuid(self.__options_3["address"])
        self.__options_3["uuid"] = _uuid.UUID(uuid_server3)
        self.__server_3 = MySQLServer(**self.__options_3)
        MySQLServer.add(self.__server_3)
        self.__server_3.connect()
        self.__server_3.exec_stmt("SET FOREIGN_KEY_CHECKS = OFF")
        self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_3.exec_stmt("CREATE DATABASE db1")
        self.__server_3.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT PRIMARY KEY, name VARCHAR(30))")
        for i in range(1, 71, 10):
            self.__server_3.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(101, 301, 10):
            self.__server_3.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(1001, 1201, 10):
            self.__server_3.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(10001, 10201, 10):
            self.__server_3.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(100001, 100201, 10):
            self.__server_3.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db2")
        self.__server_3.exec_stmt("CREATE DATABASE db2")
        self.__server_3.exec_stmt("CREATE TABLE db2.t2"
                                  "(userID INT, salary INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 71, 10):
            self.__server_3.exec_stmt("INSERT INTO db2.t2 "
                                      "VALUES(%s, %s)" % (i, i))
        for i in range(101, 301, 10):
            self.__server_3.exec_stmt("INSERT INTO db2.t2 "
                                      "VALUES(%s, %s)" % (i, i))
        for i in range(1001, 1201, 10):
            self.__server_3.exec_stmt("INSERT INTO db2.t2 "
                                      "VALUES(%s, %s)" % (i, i))
        for i in range(10001, 10201, 10):
            self.__server_3.exec_stmt("INSERT INTO db2.t2 "
                                      "VALUES(%s, %s)" % (i, i))
        for i in range(100001, 100201, 10):
            self.__server_3.exec_stmt("INSERT INTO db2.t2 "
                                      "VALUES(%s, %s)" % (i, i))

        self.__group_3 = Group("GROUPID3", "Third description.")
        Group.add(self.__group_3)
        self.__group_3.add_server(self.__server_3)
        tests.utils.configure_decoupled_master(self.__group_3, self.__server_3)

        self.__options_4 = {
            "uuid": _uuid.UUID("{bb45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(3),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd
        }

        uuid_server4 = MySQLServer.discover_uuid(self.__options_4["address"])
        self.__options_4["uuid"] = _uuid.UUID(uuid_server4)
        self.__server_4 = MySQLServer(**self.__options_4)
        MySQLServer.add(self.__server_4)
        self.__server_4.connect()
        self.__server_4.exec_stmt("SET FOREIGN_KEY_CHECKS = OFF")
        self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_4.exec_stmt("CREATE DATABASE db1")
        self.__server_4.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT PRIMARY KEY, name VARCHAR(30))")
        for i in range(1, 71, 10):
            self.__server_4.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(101, 301, 10):
            self.__server_4.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(1001, 1201, 10):
            self.__server_4.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(10001, 10201, 10):
            self.__server_4.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(100001, 100201, 10):
            self.__server_4.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db2")
        self.__server_4.exec_stmt("CREATE DATABASE db2")
        self.__server_4.exec_stmt("CREATE TABLE db2.t2"
                                  "(userID INT, salary INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 71, 10):
            self.__server_4.exec_stmt("INSERT INTO db2.t2 "
                                      "VALUES(%s, %s)" % (i, i))
        for i in range(101, 301, 10):
            self.__server_4.exec_stmt("INSERT INTO db2.t2 "
                                      "VALUES(%s, %s)" % (i, i))
        for i in range(1001, 1201, 10):
            self.__server_4.exec_stmt("INSERT INTO db2.t2 "
                                      "VALUES(%s, %s)" % (i, i))
        for i in range(10001, 10201, 10):
            self.__server_4.exec_stmt("INSERT INTO db2.t2 "
                                      "VALUES(%s, %s)" % (i, i))
        for i in range(100001, 100201, 10):
            self.__server_4.exec_stmt("INSERT INTO db2.t2 "
                                      "VALUES(%s, %s)" % (i, i))

        self.__group_4 = Group("GROUPID4", "Fourth description.")
        Group.add(self.__group_4)
        self.__group_4.add_server(self.__server_4)
        tests.utils.configure_decoupled_master(self.__group_4, self.__server_4)

        self.__options_5 = {
            "uuid": _uuid.UUID("{cc75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(4),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd
        }

        uuid_server5 = MySQLServer.discover_uuid(self.__options_5["address"])
        self.__options_5["uuid"] = _uuid.UUID(uuid_server5)
        self.__server_5 = MySQLServer(**self.__options_5)
        MySQLServer.add(self.__server_5)
        self.__server_5.connect()
        self.__server_5.exec_stmt("SET FOREIGN_KEY_CHECKS = OFF")
        self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_5.exec_stmt("CREATE DATABASE db1")
        self.__server_5.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT PRIMARY KEY, name VARCHAR(30))")
        for i in range(1, 71, 10):
            self.__server_5.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(101, 301, 10):
            self.__server_5.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(1001, 1201, 10):
            self.__server_5.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(10001, 10201, 10):
            self.__server_5.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        for i in range(100001, 100201, 10):
            self.__server_5.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db2")
        self.__server_5.exec_stmt("CREATE DATABASE db2")
        self.__server_5.exec_stmt("CREATE TABLE db2.t2"
                                  "(userID INT, salary INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 71, 10):
            self.__server_5.exec_stmt("INSERT INTO db2.t2 "
                                      "VALUES(%s, %s)" % (i, i))
        for i in range(101, 301, 10):
            self.__server_5.exec_stmt("INSERT INTO db2.t2 "
                                      "VALUES(%s, %s)" % (i, i))
        for i in range(1001, 1201, 10):
            self.__server_5.exec_stmt("INSERT INTO db2.t2 "
                                      "VALUES(%s, %s)" % (i, i))
        for i in range(10001, 10201, 10):
            self.__server_5.exec_stmt("INSERT INTO db2.t2 "
                                      "VALUES(%s, %s)" % (i, i))
        for i in range(100001, 100201, 10):
            self.__server_5.exec_stmt("INSERT INTO db2.t2 "
                                      "VALUES(%s, %s)" % (i, i))

        self.__group_5 = Group("GROUPID5", "Fifth description.")
        Group.add(self.__group_5)
        self.__group_5.add_server(self.__server_5)
        tests.utils.configure_decoupled_master(self.__group_5, self.__server_5)

        self.__options_6 = {
            "uuid": _uuid.UUID("{cc45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(5),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd
        }

        uuid_server6 = MySQLServer.discover_uuid(self.__options_6["address"])
        self.__options_6["uuid"] = _uuid.UUID(uuid_server6)
        self.__server_6 = MySQLServer(**self.__options_6)
        MySQLServer.add(self.__server_6)
        self.__server_6.connect()
        self.__server_6.exec_stmt("SET FOREIGN_KEY_CHECKS = OFF")

        self.__group_6 = Group("GROUPID6", "Sixth description.")
        Group.add(self.__group_6)
        self.__group_6.add_server(self.__server_6)
        tests.utils.configure_decoupled_master(self.__group_6, self.__server_6)

        packet = self.proxy.sharding.create_definition("RANGE", "GROUPID1")
        self.check_xmlrpc_command_success(packet, 1, False)

        packet = self.proxy.sharding.add_table(1, "db1.t1", "userID")
        self.check_xmlrpc_command_success(packet, None, False)

        packet = self.proxy.sharding.add_table(1, "db2.t2", "userID")
        self.check_xmlrpc_command_success(packet, None, False)

        packet = self.proxy.sharding.add_shard(
            1, "GROUPID2/1,GROUPID3/101,GROUPID4/1001,GROUPID5/10001",
            "ENABLED")
        self.check_xmlrpc_command_success(packet, None, False)

        packet = self.proxy.sharding.prune_shard("db1.t1")
        self.check_xmlrpc_command_success(packet, None, False)

    def test_move_shard_1(self):
        '''Test the move of shard 1 and the global server configuration
        after that. The test moves shard 1 from GROUPID2 to GROUPID6.
        After the move is done, it verifies the count on GROUPID6 to check
        that the group has all the tuples from the earlier group. Now it fires
        an INSERT on the global group and verifies that all the shards have got
        the inserted tuples. GROUPID2 should not have received the values
        since it has had the shard moved away from it.
        '''
        row_cnt_shard_before_move_db1_t1 = self.__server_2.exec_stmt(
            "SELECT COUNT(*) FROM db1.t1", {"fetch": True})
        row_cnt_shard_before_move_db2_t2 = self.__server_2.exec_stmt(
            "SELECT COUNT(*) FROM db2.t2", {"fetch": True})
        row_cnt_shard_before_move_db1_t1 = \
            int(row_cnt_shard_before_move_db1_t1[0][0])
        row_cnt_shard_before_move_db2_t2 = \
            int(row_cnt_shard_before_move_db2_t2[0][0])

        self.assertEqual(row_cnt_shard_before_move_db1_t1, 7)
        self.assertEqual(row_cnt_shard_before_move_db2_t2, 7)
        packet = self.proxy.sharding.move_shard("1", "GROUPID6")
        self.check_xmlrpc_command_success(packet, None, False)
        row_cnt_shard_after_move_db1_t1 = self.__server_6.exec_stmt(
            "SELECT COUNT(*) FROM db1.t1", {"fetch": True})
        row_cnt_shard_after_move_db2_t2 = self.__server_6.exec_stmt(
            "SELECT COUNT(*) FROM db2.t2", {"fetch": True})
        row_cnt_shard_after_move_db1_t1 = \
            int(row_cnt_shard_after_move_db1_t1[0][0])
        row_cnt_shard_after_move_db2_t2 = \
            int(row_cnt_shard_after_move_db2_t2[0][0])

        self.assertEqual(row_cnt_shard_after_move_db1_t1,
                         row_cnt_shard_before_move_db1_t1)
        self.assertEqual(row_cnt_shard_after_move_db2_t2,
                         row_cnt_shard_before_move_db2_t2)

        #Enter data into the global server
        self.__server_1.exec_stmt("DROP DATABASE IF EXISTS global")
        self.__server_1.exec_stmt("CREATE DATABASE global")
        self.__server_1.exec_stmt("CREATE TABLE global.global_table"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 11):
            self.__server_1.exec_stmt("INSERT INTO global.global_table "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        time.sleep(5)
        try:
            #Verify that the data is not there in the first shard's old
            #group.
            global_table_count = self.__server_2.exec_stmt(
                "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
            raise Exception("Server should not be connected to global server")
        except _errors.DatabaseError:
            pass
        #Verify that the data is there in the second shard.
        global_table_count = self.__server_3.exec_stmt(
            "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
        global_table_count = int(global_table_count[0][0])
        self.assertEqual(global_table_count, 10)
        #Verify that the data is there in the third shard.
        global_table_count = self.__server_4.exec_stmt(
            "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the fourth shard.
        global_table_count = self.__server_5.exec_stmt(
            "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the first shard's new
        #group.
        global_table_count = self.__server_6.exec_stmt(
            "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)

    def test_move_shard_2(self):
        '''Test the move of shard 2 and the global server configuration
        after that.  The test moves shard 2 from GROUPID3 to GROUPID6.
        After the move is done, it verifies the count on GROUPID6 to check
        that the group has all the tuples from the earlier group. Now it fires
        an INSERT on the global group and verifies that all the shards have got
        the inserted tuples. GROUPID3 should not have received the values
        since it has had the shard moved away from it.
        '''
        row_cnt_shard_before_move_db1_t1 = self.__server_3.exec_stmt(
            "SELECT COUNT(*) FROM db1.t1", {"fetch": True})
        row_cnt_shard_before_move_db2_t2 = self.__server_3.exec_stmt(
            "SELECT COUNT(*) FROM db2.t2", {"fetch": True})
        row_cnt_shard_before_move_db1_t1 = \
            int(row_cnt_shard_before_move_db1_t1[0][0])
        row_cnt_shard_before_move_db2_t2 = \
            int(row_cnt_shard_before_move_db2_t2[0][0])

        self.assertEqual(row_cnt_shard_before_move_db1_t1, 20)
        self.assertEqual(row_cnt_shard_before_move_db2_t2, 20)
        packet = self.proxy.sharding.move_shard("2", "GROUPID6")
        self.check_xmlrpc_command_success(packet, None, False)
        row_cnt_shard_after_move_db1_t1 = self.__server_6.exec_stmt(
            "SELECT COUNT(*) FROM db1.t1", {"fetch": True})
        row_cnt_shard_after_move_db2_t2 = self.__server_6.exec_stmt(
            "SELECT COUNT(*) FROM db2.t2", {"fetch": True})
        row_cnt_shard_after_move_db1_t1 = \
            int(row_cnt_shard_after_move_db1_t1[0][0])
        row_cnt_shard_after_move_db2_t2 = \
            int(row_cnt_shard_after_move_db2_t2[0][0])

        self.assertEqual(row_cnt_shard_after_move_db1_t1,
                         row_cnt_shard_before_move_db1_t1)
        self.assertEqual(row_cnt_shard_after_move_db2_t2,
                         row_cnt_shard_before_move_db2_t2)
        #Enter data into the global server
        self.__server_1.exec_stmt("DROP DATABASE IF EXISTS global")
        self.__server_1.exec_stmt("CREATE DATABASE global")
        self.__server_1.exec_stmt("CREATE TABLE global.global_table"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 11):
            self.__server_1.exec_stmt("INSERT INTO global.global_table "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        time.sleep(5)
        try:
            #Verify that the data is not there in the second shard's
            #old group.
            global_table_count = self.__server_3.exec_stmt(
                "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
            raise Exception("Server should not be connected to global server")
        except _errors.DatabaseError:
            pass
        #Verify that the data is there in the first shard.
        global_table_count = self.__server_2.exec_stmt(
            "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
        global_table_count = int(global_table_count[0][0])
        self.assertEqual(global_table_count, 10)
        #Verify that the data is there in the third shard.
        global_table_count = self.__server_4.exec_stmt(
            "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the fourth shard.
        global_table_count = self.__server_5.exec_stmt(
            "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the second shard's new
        #group.
        global_table_count = self.__server_6.exec_stmt(
            "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)

    def test_move_shard_3(self):
        '''Test the move of shard 3 and the global server configuration
        after that. The test moves shard 3 from GROUPID4 to GROUPID6.
        After the move is done, it verifies the count on GROUPID6 to check
        that the group has all the tuples from the earlier group. Now it fires
        an INSERT on the global group and verifies that all the shards have got
        the inserted tuples. GROUPID4 should not have received the values
        since it has had the shard moved away from it.
        '''
        row_cnt_shard_before_move_db1_t1 = self.__server_4.exec_stmt(
            "SELECT COUNT(*) FROM db1.t1", {"fetch": True})
        row_cnt_shard_before_move_db2_t2 = self.__server_4.exec_stmt(
            "SELECT COUNT(*) FROM db2.t2", {"fetch": True})
        row_cnt_shard_before_move_db1_t1 = \
            int(row_cnt_shard_before_move_db1_t1[0][0])
        row_cnt_shard_before_move_db2_t2 = \
            int(row_cnt_shard_before_move_db2_t2[0][0])

        self.assertEqual(row_cnt_shard_before_move_db1_t1, 20)
        self.assertEqual(row_cnt_shard_before_move_db2_t2, 20)
        packet = self.proxy.sharding.move_shard("3", "GROUPID6")
        self.check_xmlrpc_command_success(packet, None, False)
        row_cnt_shard_after_move_db1_t1 = self.__server_6.exec_stmt(
            "SELECT COUNT(*) FROM db1.t1", {"fetch": True})
        row_cnt_shard_after_move_db2_t2 = self.__server_6.exec_stmt(
            "SELECT COUNT(*) FROM db2.t2", {"fetch": True})
        row_cnt_shard_after_move_db1_t1 = \
            int(row_cnt_shard_after_move_db1_t1[0][0])
        row_cnt_shard_after_move_db2_t2 = \
            int(row_cnt_shard_after_move_db2_t2[0][0])

        self.assertEqual(row_cnt_shard_after_move_db1_t1,
                         row_cnt_shard_before_move_db1_t1)
        self.assertEqual(row_cnt_shard_after_move_db2_t2,
                         row_cnt_shard_before_move_db2_t2)
        #Enter data into the global server
        self.__server_1.exec_stmt("DROP DATABASE IF EXISTS global")
        self.__server_1.exec_stmt("CREATE DATABASE global")
        self.__server_1.exec_stmt("CREATE TABLE global.global_table"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 11):
            self.__server_1.exec_stmt("INSERT INTO global.global_table "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        time.sleep(5)
        try:
            #Verify that the data is not there in the third shard's
            #original group.
            global_table_count = self.__server_4.exec_stmt(
                "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
            raise Exception("Server should not be connected to global server")
        except _errors.DatabaseError:
            pass
        #Verify that the data is there in the first shard.
        global_table_count = self.__server_2.exec_stmt(
            "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
        global_table_count = int(global_table_count[0][0])
        self.assertEqual(global_table_count, 10)
        #Verify that the data is there in the second shard.
        global_table_count = self.__server_3.exec_stmt(
            "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the fourth shard.
        global_table_count = self.__server_5.exec_stmt(
            "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the third shard
        #new group.
        global_table_count = self.__server_6.exec_stmt(
            "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)

    def test_move_shard_4(self):
        '''Test the move of shard 4 and the global server configuration
        after that. The test moves shard 4 from GROUPID5 to GROUPID6.
        After the move is done, it verifies the count on GROUPID6 to check
        that the group has all the tuples from the earlier group. Now it fires
        an INSERT on the global group and verifies that all the shards have got
        the inserted tuples. GROUPID5 should not have received the values
        since it has had the shard moved away from it.
        '''
        row_cnt_shard_before_move_db1_t1 = self.__server_5.exec_stmt(
            "SELECT COUNT(*) FROM db1.t1", {"fetch": True})
        row_cnt_shard_before_move_db2_t2 = self.__server_5.exec_stmt(
            "SELECT COUNT(*) FROM db2.t2", {"fetch": True})
        row_cnt_shard_before_move_db1_t1 = \
            int(row_cnt_shard_before_move_db1_t1[0][0])
        row_cnt_shard_before_move_db2_t2 = \
            int(row_cnt_shard_before_move_db2_t2[0][0])

        self.assertEqual(row_cnt_shard_before_move_db1_t1, 40)
        self.assertEqual(row_cnt_shard_before_move_db2_t2, 40)
        packet = self.proxy.sharding.move_shard("4", "GROUPID6")
        self.check_xmlrpc_command_success(packet, None, False)
        row_cnt_shard_after_move_db1_t1 = self.__server_6.exec_stmt(
            "SELECT COUNT(*) FROM db1.t1", {"fetch": True})
        row_cnt_shard_after_move_db2_t2 = self.__server_6.exec_stmt(
            "SELECT COUNT(*) FROM db2.t2", {"fetch": True})
        row_cnt_shard_after_move_db1_t1 = \
            int(row_cnt_shard_after_move_db1_t1[0][0])
        row_cnt_shard_after_move_db2_t2 = \
            int(row_cnt_shard_after_move_db2_t2[0][0])

        self.assertEqual(row_cnt_shard_after_move_db1_t1,
                         row_cnt_shard_before_move_db1_t1)
        self.assertEqual(row_cnt_shard_after_move_db2_t2,
                         row_cnt_shard_before_move_db2_t2)
        #Enter data into the global server
        self.__server_1.exec_stmt("DROP DATABASE IF EXISTS global")
        self.__server_1.exec_stmt("CREATE DATABASE global")
        self.__server_1.exec_stmt("CREATE TABLE global.global_table"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 11):
            self.__server_1.exec_stmt("INSERT INTO global.global_table "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        time.sleep(5)
        try:
            #Verify that the data is not there in the fourth shard's
            #original group.
            global_table_count = self.__server_5.exec_stmt(
                "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
            raise Exception("Server should not be connected to global server")
        except _errors.DatabaseError:
            pass
        #Verify that the data is there in the first shard.
        global_table_count = self.__server_2.exec_stmt(
            "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
        global_table_count = int(global_table_count[0][0])
        self.assertEqual(global_table_count, 10)
        #Verify that the data is there in the second shard.
        global_table_count = self.__server_3.exec_stmt(
            "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the third shard.
        global_table_count = self.__server_4.exec_stmt(
            "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the fourth shard's new
        #group.
        global_table_count = self.__server_6.exec_stmt(
            "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)

    def tearDown(self):
        """Clean up the existing environment
        """
        tests.utils.cleanup_environment()
    def setUp(self):
        """Configure the existing environment
        """
        tests.utils.cleanup_environment()
        self.manager, self.proxy = tests.utils.setup_xmlrpc()

        self.__options_1 = {
            "uuid" :  _uuid.UUID("{aa75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(0),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server1 = MySQLServer.discover_uuid(self.__options_1["address"])
        self.__options_1["uuid"] = _uuid.UUID(uuid_server1)
        self.__server_1 = MySQLServer(**self.__options_1)
        MySQLServer.add(self.__server_1)
        self.__server_1.connect()

        self.__group_1 = Group("GROUPID1", "First description.")
        Group.add(self.__group_1)
        self.__group_1.add_server(self.__server_1)
        tests.utils.configure_decoupled_master(self.__group_1, self.__server_1)

        self.__options_2 = {
            "uuid" :  _uuid.UUID("{aa45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(1),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server2 = MySQLServer.discover_uuid(self.__options_2["address"])
        self.__options_2["uuid"] = _uuid.UUID(uuid_server2)
        self.__server_2 = MySQLServer(**self.__options_2)
        MySQLServer.add(self.__server_2)
        self.__server_2.connect()
        self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_2.exec_stmt("CREATE DATABASE db1")
        self.__server_2.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT PRIMARY KEY, name VARCHAR(30))")
        for i in range(1, 1001):
            self.__server_2.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES(%s, 'TEST %s')" % (i, i))
        self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db2")
        self.__server_2.exec_stmt("CREATE DATABASE db2")
        self.__server_2.exec_stmt("CREATE TABLE db2.t2"
                                  "(userID INT, salary INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 1001):
            self.__server_2.exec_stmt("INSERT INTO db2.t2 "
                                  "VALUES(%s, %s)" % (i, i))

        self.__group_2 = Group("GROUPID2", "Second description.")
        Group.add(self.__group_2)
        self.__group_2.add_server(self.__server_2)
        tests.utils.configure_decoupled_master(self.__group_2, self.__server_2)

        self.__options_3 = {
            "uuid" :  _uuid.UUID("{bb75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(2),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server3 = MySQLServer.discover_uuid(self.__options_3["address"])
        self.__options_3["uuid"] = _uuid.UUID(uuid_server3)
        self.__server_3 = MySQLServer(**self.__options_3)
        MySQLServer.add( self.__server_3)
        self.__server_3.connect()
        self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_3.exec_stmt("CREATE DATABASE db1")
        self.__server_3.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT PRIMARY KEY, name VARCHAR(30))")
        for i in range(1, 1001):
            self.__server_3.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES(%s, 'TEST %s')" % (i, i))
        self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db2")
        self.__server_3.exec_stmt("CREATE DATABASE db2")
        self.__server_3.exec_stmt("CREATE TABLE db2.t2"
                                  "(userID INT, salary INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 1001):
            self.__server_3.exec_stmt("INSERT INTO db2.t2 "
                                  "VALUES(%s, %s)" % (i, i))

        self.__group_3 = Group("GROUPID3", "Third description.")
        Group.add( self.__group_3)
        self.__group_3.add_server(self.__server_3)
        tests.utils.configure_decoupled_master(self.__group_3, self.__server_3)

        self.__options_4 = {
            "uuid" :  _uuid.UUID("{bb45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(3),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server4 = MySQLServer.discover_uuid(self.__options_4["address"])
        self.__options_4["uuid"] = _uuid.UUID(uuid_server4)
        self.__server_4 = MySQLServer(**self.__options_4)
        MySQLServer.add(self.__server_4)
        self.__server_4.connect()
        self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_4.exec_stmt("CREATE DATABASE db1")
        self.__server_4.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT PRIMARY KEY, name VARCHAR(30))")
        for i in range(1, 1001):
            self.__server_4.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES(%s, 'TEST %s')" % (i, i))
        self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db2")
        self.__server_4.exec_stmt("CREATE DATABASE db2")
        self.__server_4.exec_stmt("CREATE TABLE db2.t2"
                                  "(userID INT, salary INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 1001):
            self.__server_4.exec_stmt("INSERT INTO db2.t2 "
                                  "VALUES(%s, %s)" % (i, i))

        self.__group_4 = Group("GROUPID4", "Fourth description.")
        Group.add( self.__group_4)
        self.__group_4.add_server(self.__server_4)
        tests.utils.configure_decoupled_master(self.__group_4, self.__server_4)

        self.__options_5 = {
            "uuid" :  _uuid.UUID("{cc75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(4),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server5 = MySQLServer.discover_uuid(self.__options_5["address"])
        self.__options_5["uuid"] = _uuid.UUID(uuid_server5)
        self.__server_5 = MySQLServer(**self.__options_5)
        MySQLServer.add(self.__server_5)
        self.__server_5.connect()
        self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_5.exec_stmt("CREATE DATABASE db1")
        self.__server_5.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT PRIMARY KEY, name VARCHAR(30))")
        for i in range(1, 1001):
            self.__server_5.exec_stmt("INSERT INTO db1.t1 "
                                  "VALUES(%s, 'TEST %s')" % (i, i))
        self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db2")
        self.__server_5.exec_stmt("CREATE DATABASE db2")
        self.__server_5.exec_stmt("CREATE TABLE db2.t2"
                                  "(userID INT, salary INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 1001):
            self.__server_5.exec_stmt("INSERT INTO db2.t2 "
                                  "VALUES(%s, %s)" % (i, i))

        self.__group_5 = Group("GROUPID5", "Fifth description.")
        Group.add( self.__group_5)
        self.__group_5.add_server(self.__server_5)
        tests.utils.configure_decoupled_master(self.__group_5, self.__server_5)

        self.__options_6 = {
            "uuid" :  _uuid.UUID("{cc45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address"  : MySQLInstances().get_address(5),
            "user" : MySQLInstances().user,
            "passwd" : MySQLInstances().passwd,
        }

        uuid_server6 = MySQLServer.discover_uuid(self.__options_6["address"])
        self.__options_6["uuid"] = _uuid.UUID(uuid_server6)
        self.__server_6 = MySQLServer(**self.__options_6)
        MySQLServer.add(self.__server_6)

        self.__group_6 = Group("GROUPID6", "Sixth description.")
        Group.add( self.__group_6)
        self.__group_6.add_server(self.__server_6)
        tests.utils.configure_decoupled_master(self.__group_6, self.__server_6)

        status = self.proxy.sharding.create_definition("HASH", "GROUPID1")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_define_shard_mapping).")
        self.assertEqual(status[2], 1)

        status = self.proxy.sharding.add_table(1, "db1.t1", "userID")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_add_shard_mapping).")

        status = self.proxy.sharding.add_table(1, "db2.t2", "userID")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_add_shard_mapping).")

        status = self.proxy.sharding.add_shard(
            1,
            "GROUPID2,GROUPID3,GROUPID4,GROUPID5",
            "ENABLED"
        )
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_add_shard).")

        status = self.proxy.sharding.prune_shard("db1.t1")
        self.assertStatus(status, _executor.Job.SUCCESS)
        self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE)
        self.assertEqual(status[1][-1]["description"],
                         "Executed action (_prune_shard_tables).")
class TestHashSplitGlobal(tests.utils.TestCase):
    """Contains unit tests for testing the shard split operation and for
    verifying that the global server configuration remains constant after
    the shard split configuration.
    """
    def setUp(self):
        """Configure the existing environment
        """
        tests.utils.cleanup_environment()
        self.manager, self.proxy = tests.utils.setup_xmlrpc()

        self.__options_1 = {
            "uuid": _uuid.UUID("{aa75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(0),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }

        uuid_server1 = MySQLServer.discover_uuid(self.__options_1["address"])
        self.__options_1["uuid"] = _uuid.UUID(uuid_server1)
        self.__server_1 = MySQLServer(**self.__options_1)
        MySQLServer.add(self.__server_1)
        self.__server_1.connect()

        self.__group_1 = Group("GROUPID1", "First description.")
        Group.add(self.__group_1)
        self.__group_1.add_server(self.__server_1)
        tests.utils.configure_decoupled_master(self.__group_1, self.__server_1)

        self.__options_2 = {
            "uuid": _uuid.UUID("{aa45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(1),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }

        uuid_server2 = MySQLServer.discover_uuid(self.__options_2["address"])
        self.__options_2["uuid"] = _uuid.UUID(uuid_server2)
        self.__server_2 = MySQLServer(**self.__options_2)
        MySQLServer.add(self.__server_2)
        self.__server_2.connect()
        self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_2.exec_stmt("CREATE DATABASE db1")
        self.__server_2.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT PRIMARY KEY, name VARCHAR(30))")
        for i in range(1, 1001):
            self.__server_2.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db2")
        self.__server_2.exec_stmt("CREATE DATABASE db2")
        self.__server_2.exec_stmt("CREATE TABLE db2.t2"
                                  "(userID INT, salary INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 1001):
            self.__server_2.exec_stmt("INSERT INTO db2.t2 "
                                      "VALUES(%s, %s)" % (i, i))
        self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db3")
        self.__server_2.exec_stmt("CREATE DATABASE db3")
        self.__server_2.exec_stmt("CREATE TABLE db3.t3"
                                  "(userID INT, Department INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 1001):
            self.__server_2.exec_stmt("INSERT INTO db3.t3 "
                                      "VALUES(%s, %s)" % (i, i))

        self.__group_2 = Group("GROUPID2", "Second description.")
        Group.add(self.__group_2)
        self.__group_2.add_server(self.__server_2)
        tests.utils.configure_decoupled_master(self.__group_2, self.__server_2)

        self.__options_3 = {
            "uuid": _uuid.UUID("{bb75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(2),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }

        uuid_server3 = MySQLServer.discover_uuid(self.__options_3["address"])
        self.__options_3["uuid"] = _uuid.UUID(uuid_server3)
        self.__server_3 = MySQLServer(**self.__options_3)
        MySQLServer.add(self.__server_3)
        self.__server_3.connect()
        self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_3.exec_stmt("CREATE DATABASE db1")
        self.__server_3.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT PRIMARY KEY, name VARCHAR(30))")
        for i in range(1, 1001):
            self.__server_3.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        self.__server_3.exec_stmt("CREATE DATABASE db2")
        self.__server_3.exec_stmt("CREATE TABLE db2.t2"
                                  "(userID INT, salary INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 1001):
            self.__server_3.exec_stmt("INSERT INTO db2.t2 "
                                      "VALUES(%s, %s)" % (i, i))
        self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db3")
        self.__server_3.exec_stmt("CREATE DATABASE db3")
        self.__server_3.exec_stmt("CREATE TABLE db3.t3"
                                  "(userID INT, Department INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 1001):
            self.__server_3.exec_stmt("INSERT INTO db3.t3 "
                                      "VALUES(%s, %s)" % (i, i))

        self.__group_3 = Group("GROUPID3", "Third description.")
        Group.add(self.__group_3)
        self.__group_3.add_server(self.__server_3)
        tests.utils.configure_decoupled_master(self.__group_3, self.__server_3)

        self.__options_4 = {
            "uuid": _uuid.UUID("{bb45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(3),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }

        uuid_server4 = MySQLServer.discover_uuid(self.__options_4["address"])
        self.__options_4["uuid"] = _uuid.UUID(uuid_server4)
        self.__server_4 = MySQLServer(**self.__options_4)
        MySQLServer.add(self.__server_4)
        self.__server_4.connect()
        self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_4.exec_stmt("CREATE DATABASE db1")
        self.__server_4.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT PRIMARY KEY, name VARCHAR(30))")
        for i in range(1, 1001):
            self.__server_4.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        self.__server_4.exec_stmt("CREATE DATABASE db2")
        self.__server_4.exec_stmt("CREATE TABLE db2.t2"
                                  "(userID INT, salary INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 1001):
            self.__server_4.exec_stmt("INSERT INTO db2.t2 "
                                      "VALUES(%s, %s)" % (i, i))
        self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db3")
        self.__server_4.exec_stmt("CREATE DATABASE db3")
        self.__server_4.exec_stmt("CREATE TABLE db3.t3"
                                  "(userID INT, Department INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 1001):
            self.__server_4.exec_stmt("INSERT INTO db3.t3 "
                                      "VALUES(%s, %s)" % (i, i))

        self.__group_4 = Group("GROUPID4", "Fourth description.")
        Group.add(self.__group_4)
        self.__group_4.add_server(self.__server_4)
        tests.utils.configure_decoupled_master(self.__group_4, self.__server_4)

        self.__options_5 = {
            "uuid": _uuid.UUID("{cc75b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(4),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }

        uuid_server5 = MySQLServer.discover_uuid(self.__options_5["address"])
        self.__options_5["uuid"] = _uuid.UUID(uuid_server5)
        self.__server_5 = MySQLServer(**self.__options_5)
        MySQLServer.add(self.__server_5)
        self.__server_5.connect()
        self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db1")
        self.__server_5.exec_stmt("CREATE DATABASE db1")
        self.__server_5.exec_stmt("CREATE TABLE db1.t1"
                                  "(userID INT PRIMARY KEY, name VARCHAR(30))")
        for i in range(1, 1001):
            self.__server_5.exec_stmt("INSERT INTO db1.t1 "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        self.__server_5.exec_stmt("CREATE DATABASE db2")
        self.__server_5.exec_stmt("CREATE TABLE db2.t2"
                                  "(userID INT, salary INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 1001):
            self.__server_5.exec_stmt("INSERT INTO db2.t2 "
                                      "VALUES(%s, %s)" % (i, i))
        self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db3")
        self.__server_5.exec_stmt("CREATE DATABASE db3")
        self.__server_5.exec_stmt("CREATE TABLE db3.t3"
                                  "(userID INT, Department INT, "
                                  "CONSTRAINT FOREIGN KEY(userID) "
                                  "REFERENCES db1.t1(userID))")
        for i in range(1, 1001):
            self.__server_5.exec_stmt("INSERT INTO db3.t3 "
                                      "VALUES(%s, %s)" % (i, i))

        self.__group_5 = Group("GROUPID5", "Fifth description.")
        Group.add(self.__group_5)
        self.__group_5.add_server(self.__server_5)
        tests.utils.configure_decoupled_master(self.__group_5, self.__server_5)

        self.__options_6 = {
            "uuid": _uuid.UUID("{cc45b12b-98d1-414c-96af-9e9d4b179678}"),
            "address": MySQLInstances().get_address(5),
            "user": MySQLInstances().user,
            "passwd": MySQLInstances().passwd,
        }

        uuid_server6 = MySQLServer.discover_uuid(self.__options_6["address"])
        self.__options_6["uuid"] = _uuid.UUID(uuid_server6)
        self.__server_6 = MySQLServer(**self.__options_6)
        MySQLServer.add(self.__server_6)

        self.__group_6 = Group("GROUPID6", "Sixth description.")
        Group.add(self.__group_6)
        self.__group_6.add_server(self.__server_6)
        tests.utils.configure_decoupled_master(self.__group_6, self.__server_6)

        status = self.proxy.sharding.create_definition("HASH", "GROUPID1")
        self.check_xmlrpc_command_result(status, returns=1)

        status = self.proxy.sharding.add_table(1, "db1.t1", "userID")
        self.check_xmlrpc_command_result(status)
        status = self.proxy.sharding.add_table(1, "db2.t2", "userID")
        self.check_xmlrpc_command_result(status)
        status = self.proxy.sharding.add_table(1, "db3.t3", "userID")
        self.check_xmlrpc_command_result(status)

        status = self.proxy.sharding.add_shard(
            1, "GROUPID2,GROUPID3,GROUPID4,GROUPID5", "ENABLED")
        self.check_xmlrpc_command_result(status)

        status = self.proxy.sharding.prune_shard("db1.t1")
        self.check_xmlrpc_command_result(status)
        status = self.proxy.sharding.prune_shard("db2.t2")
        self.check_xmlrpc_command_result(status)
        status = self.proxy.sharding.prune_shard("db3.t3")
        self.check_xmlrpc_command_result(status)

    def test_split_shard_1(self):
        '''Test the split of shard 1 and the global server configuration
        after that. The test splits shard 1 between GROUPID2 and GROUPID6.
        After the split is done, it verifies the count on GROUPID6 to check
        that the group has tuples from the earlier group. Now it fires
        an INSERT on the global group and verifies that all the shards have got
        the inserted tuples.
        '''
        row_cnt_shard_db1_t1 = self.__server_2.exec_stmt(
            "SELECT COUNT(*) FROM db1.t1", {"fetch": True})
        row_cnt_shard_db2_t2 = self.__server_2.exec_stmt(
            "SELECT COUNT(*) FROM db2.t2", {"fetch": True})
        row_cnt_shard_db3_t3 = self.__server_2.exec_stmt(
            "SELECT COUNT(*) FROM db3.t3", {"fetch": True})
        row_cnt_shard_db1_t1 = int(row_cnt_shard_db1_t1[0][0])
        row_cnt_shard_db2_t2 = int(row_cnt_shard_db2_t2[0][0])
        row_cnt_shard_db3_t3 = int(row_cnt_shard_db3_t3[0][0])

        status = self.proxy.sharding.split_shard("1", "GROUPID6")
        self.check_xmlrpc_command_result(status)

        self.__server_6.connect()

        self.__server_2.connect()
        row_cnt_shard_after_split_1_db1_t1 = self.__server_2.exec_stmt(
            "SELECT COUNT(*) FROM db1.t1", {"fetch": True})
        row_cnt_shard_after_split_1_db2_t2 = self.__server_2.exec_stmt(
            "SELECT COUNT(*) FROM db2.t2", {"fetch": True})
        row_cnt_shard_after_split_1_db3_t3 = self.__server_2.exec_stmt(
            "SELECT COUNT(*) FROM db3.t3", {"fetch": True})

        row_cnt_shard_after_split_1_db1_t1_a = \
            int(row_cnt_shard_after_split_1_db1_t1[0][0])
        row_cnt_shard_after_split_1_db2_t2_a = \
            int(row_cnt_shard_after_split_1_db2_t2[0][0])
        row_cnt_shard_after_split_1_db3_t3_a = \
            int(row_cnt_shard_after_split_1_db3_t3[0][0])

        row_cnt_shard_after_split_1_db1_t1_b = self.__server_6.exec_stmt(
            "SELECT COUNT(*) FROM db1.t1", {"fetch": True})
        row_cnt_shard_after_split_1_db2_t2_b = self.__server_6.exec_stmt(
            "SELECT COUNT(*) FROM db2.t2", {"fetch": True})
        row_cnt_shard_after_split_1_db3_t3_b = self.__server_6.exec_stmt(
            "SELECT COUNT(*) FROM db3.t3", {"fetch": True})

        row_cnt_shard_after_split_1_db1_t1_b = \
            int(row_cnt_shard_after_split_1_db1_t1_b[0][0])
        row_cnt_shard_after_split_1_db2_t2_b = \
            int(row_cnt_shard_after_split_1_db2_t2_b[0][0])
        row_cnt_shard_after_split_1_db3_t3_b = \
            int(row_cnt_shard_after_split_1_db3_t3_b[0][0])

        self.assertTrue(
            row_cnt_shard_db1_t1 == (row_cnt_shard_after_split_1_db1_t1_a +
                                     row_cnt_shard_after_split_1_db1_t1_b))
        self.assertTrue(
            row_cnt_shard_db2_t2 == (row_cnt_shard_after_split_1_db2_t2_a +
                                     row_cnt_shard_after_split_1_db2_t2_b))
        self.assertTrue(
            row_cnt_shard_db3_t3 == (row_cnt_shard_after_split_1_db3_t3_a +
                                     row_cnt_shard_after_split_1_db3_t3_b))

        #Enter data into the global server
        self.__server_1.exec_stmt("DROP DATABASE IF EXISTS global")
        self.__server_1.exec_stmt("CREATE DATABASE global")
        self.__server_1.exec_stmt("CREATE TABLE global.global_table"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 11):
            self.__server_1.exec_stmt("INSERT INTO global.global_table "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        time.sleep(5)
        #Verify that the data is there in the first shard.
        global_table_count = self.__server_2.exec_stmt(
            "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the second shard.
        global_table_count = self.__server_3.exec_stmt(
            "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the third shard.
        global_table_count = self.__server_4.exec_stmt(
            "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the fourth shard.
        global_table_count = self.__server_5.exec_stmt(
            "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the fifth shard.
        global_table_count = self.__server_6.exec_stmt(
            "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)

    def test_split_shard_2(self):
        '''Test the split of shard 2 and the global server configuration
        after that. The test splits shard 2 between GROUPID3 and GROUPID6.
        After the split is done, it verifies the count on GROUPID6 to check
        that the group has tuples from the earlier group. Now it fires
        an INSERT on the global group and verifies that all the shards have got
        the inserted tuples.
        '''
        row_cnt_shard_db1_t1 = self.__server_3.exec_stmt(
            "SELECT COUNT(*) FROM db1.t1", {"fetch": True})
        row_cnt_shard_db2_t2 = self.__server_3.exec_stmt(
            "SELECT COUNT(*) FROM db2.t2", {"fetch": True})
        row_cnt_shard_db3_t3 = self.__server_3.exec_stmt(
            "SELECT COUNT(*) FROM db3.t3", {"fetch": True})
        row_cnt_shard_db1_t1 = int(row_cnt_shard_db1_t1[0][0])
        row_cnt_shard_db2_t2 = int(row_cnt_shard_db2_t2[0][0])
        row_cnt_shard_db3_t3 = int(row_cnt_shard_db3_t3[0][0])

        status = self.proxy.sharding.split_shard("2", "GROUPID6")
        self.check_xmlrpc_command_result(status)

        self.__server_6.connect()

        self.__server_3.connect()
        row_cnt_shard_after_split_1_db1_t1 = self.__server_3.exec_stmt(
            "SELECT COUNT(*) FROM db1.t1", {"fetch": True})
        row_cnt_shard_after_split_1_db2_t2 = self.__server_3.exec_stmt(
            "SELECT COUNT(*) FROM db2.t2", {"fetch": True})
        row_cnt_shard_after_split_1_db3_t3 = self.__server_3.exec_stmt(
            "SELECT COUNT(*) FROM db3.t3", {"fetch": True})

        row_cnt_shard_after_split_1_db1_t1_a = \
            int(row_cnt_shard_after_split_1_db1_t1[0][0])
        row_cnt_shard_after_split_1_db2_t2_a = \
            int(row_cnt_shard_after_split_1_db2_t2[0][0])
        row_cnt_shard_after_split_1_db3_t3_a = \
            int(row_cnt_shard_after_split_1_db3_t3[0][0])

        row_cnt_shard_after_split_1_db1_t1_b = self.__server_6.exec_stmt(
            "SELECT COUNT(*) FROM db1.t1", {"fetch": True})
        row_cnt_shard_after_split_1_db2_t2_b = self.__server_6.exec_stmt(
            "SELECT COUNT(*) FROM db2.t2", {"fetch": True})
        row_cnt_shard_after_split_1_db3_t3_b = self.__server_6.exec_stmt(
            "SELECT COUNT(*) FROM db3.t3", {"fetch": True})

        row_cnt_shard_after_split_1_db1_t1_b = \
            int(row_cnt_shard_after_split_1_db1_t1_b[0][0])
        row_cnt_shard_after_split_1_db2_t2_b = \
            int(row_cnt_shard_after_split_1_db2_t2_b[0][0])
        row_cnt_shard_after_split_1_db3_t3_b = \
            int(row_cnt_shard_after_split_1_db3_t3_b[0][0])

        self.assertTrue(
            row_cnt_shard_db1_t1 == (row_cnt_shard_after_split_1_db1_t1_a +
                                     row_cnt_shard_after_split_1_db1_t1_b))
        self.assertTrue(
            row_cnt_shard_db2_t2 == (row_cnt_shard_after_split_1_db2_t2_a +
                                     row_cnt_shard_after_split_1_db2_t2_b))
        self.assertTrue(
            row_cnt_shard_db3_t3 == (row_cnt_shard_after_split_1_db3_t3_a +
                                     row_cnt_shard_after_split_1_db3_t3_b))

        #Enter data into the global server
        self.__server_1.exec_stmt("DROP DATABASE IF EXISTS global")
        self.__server_1.exec_stmt("CREATE DATABASE global")
        self.__server_1.exec_stmt("CREATE TABLE global.global_table"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 11):
            self.__server_1.exec_stmt("INSERT INTO global.global_table "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        time.sleep(5)
        #Verify that the data is there in the first shard.
        global_table_count = self.__server_2.exec_stmt(
            "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the second shard.
        global_table_count = self.__server_3.exec_stmt(
            "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the third shard.
        global_table_count = self.__server_4.exec_stmt(
            "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the fourth shard.
        global_table_count = self.__server_5.exec_stmt(
            "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the fifth shard.
        global_table_count = self.__server_6.exec_stmt(
            "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)

    def test_split_shard_3(self):
        '''Test the split of shard 3 and the global server configuration
        after that. The test splits shard 3 between GROUPID4 and GROUPID6.
        After the split is done, it verifies the count on GROUPID6 to check
        that the group has tuples from the earlier group. Now it fires
        an INSERT on the global group and verifies that all the shards have got
        the inserted tuples.
        '''
        row_cnt_shard_db1_t1 = self.__server_4.exec_stmt(
            "SELECT COUNT(*) FROM db1.t1", {"fetch": True})
        row_cnt_shard_db2_t2 = self.__server_4.exec_stmt(
            "SELECT COUNT(*) FROM db2.t2", {"fetch": True})
        row_cnt_shard_db3_t3 = self.__server_4.exec_stmt(
            "SELECT COUNT(*) FROM db3.t3", {"fetch": True})
        row_cnt_shard_db1_t1 = int(row_cnt_shard_db1_t1[0][0])
        row_cnt_shard_db2_t2 = int(row_cnt_shard_db2_t2[0][0])
        row_cnt_shard_db3_t3 = int(row_cnt_shard_db3_t3[0][0])

        status = self.proxy.sharding.split_shard("3", "GROUPID6")
        self.check_xmlrpc_command_result(status)

        self.__server_6.connect()

        self.__server_4.connect()
        row_cnt_shard_after_split_1_db1_t1 = self.__server_4.exec_stmt(
            "SELECT COUNT(*) FROM db1.t1", {"fetch": True})
        row_cnt_shard_after_split_1_db2_t2 = self.__server_4.exec_stmt(
            "SELECT COUNT(*) FROM db2.t2", {"fetch": True})
        row_cnt_shard_after_split_1_db3_t3 = self.__server_4.exec_stmt(
            "SELECT COUNT(*) FROM db3.t3", {"fetch": True})

        row_cnt_shard_after_split_1_db1_t1_a = \
            int(row_cnt_shard_after_split_1_db1_t1[0][0])
        row_cnt_shard_after_split_1_db2_t2_a = \
            int(row_cnt_shard_after_split_1_db2_t2[0][0])
        row_cnt_shard_after_split_1_db3_t3_a = \
            int(row_cnt_shard_after_split_1_db3_t3[0][0])

        row_cnt_shard_after_split_1_db1_t1_b = self.__server_6.exec_stmt(
            "SELECT COUNT(*) FROM db1.t1", {"fetch": True})
        row_cnt_shard_after_split_1_db2_t2_b = self.__server_6.exec_stmt(
            "SELECT COUNT(*) FROM db2.t2", {"fetch": True})
        row_cnt_shard_after_split_1_db3_t3_b = self.__server_6.exec_stmt(
            "SELECT COUNT(*) FROM db3.t3", {"fetch": True})

        row_cnt_shard_after_split_1_db1_t1_b = \
            int(row_cnt_shard_after_split_1_db1_t1_b[0][0])
        row_cnt_shard_after_split_1_db2_t2_b = \
            int(row_cnt_shard_after_split_1_db2_t2_b[0][0])
        row_cnt_shard_after_split_1_db3_t3_b = \
            int(row_cnt_shard_after_split_1_db3_t3_b[0][0])

        self.assertTrue(
            row_cnt_shard_db1_t1 == (row_cnt_shard_after_split_1_db1_t1_a +
                                     row_cnt_shard_after_split_1_db1_t1_b))
        self.assertTrue(
            row_cnt_shard_db2_t2 == (row_cnt_shard_after_split_1_db2_t2_a +
                                     row_cnt_shard_after_split_1_db2_t2_b))
        self.assertTrue(
            row_cnt_shard_db3_t3 == (row_cnt_shard_after_split_1_db3_t3_a +
                                     row_cnt_shard_after_split_1_db3_t3_b))

        #Enter data into the global server
        self.__server_1.exec_stmt("DROP DATABASE IF EXISTS global")
        self.__server_1.exec_stmt("CREATE DATABASE global")
        self.__server_1.exec_stmt("CREATE TABLE global.global_table"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 11):
            self.__server_1.exec_stmt("INSERT INTO global.global_table "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        time.sleep(5)
        #Verify that the data is there in the first shard.
        global_table_count = self.__server_2.exec_stmt(
            "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the second shard.
        global_table_count = self.__server_3.exec_stmt(
            "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the third shard.
        global_table_count = self.__server_4.exec_stmt(
            "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the fourth shard.
        global_table_count = self.__server_5.exec_stmt(
            "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the fifth shard.
        global_table_count = self.__server_6.exec_stmt(
            "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)

    def test_split_shard_4(self):
        '''Test the split of shard 4 and the global server configuration
        after that. The test splits shard 4 between GROUPID5 and GROUPID6.
        After the split is done, it verifies the count on GROUPID6 to check
        that the group has tuples from the earlier group. Now it fires
        an INSERT on the global group and verifies that all the shards have got
        the inserted tuples.
        '''
        row_cnt_shard_db1_t1 = self.__server_5.exec_stmt(
            "SELECT COUNT(*) FROM db1.t1", {"fetch": True})
        row_cnt_shard_db2_t2 = self.__server_5.exec_stmt(
            "SELECT COUNT(*) FROM db2.t2", {"fetch": True})
        row_cnt_shard_db3_t3 = self.__server_5.exec_stmt(
            "SELECT COUNT(*) FROM db3.t3", {"fetch": True})
        row_cnt_shard_db1_t1 = int(row_cnt_shard_db1_t1[0][0])
        row_cnt_shard_db2_t2 = int(row_cnt_shard_db2_t2[0][0])
        row_cnt_shard_db3_t3 = int(row_cnt_shard_db3_t3[0][0])

        status = self.proxy.sharding.split_shard("4", "GROUPID6")
        self.check_xmlrpc_command_result(status)

        self.__server_6.connect()

        self.__server_5.connect()
        row_cnt_shard_after_split_1_db1_t1 = self.__server_5.exec_stmt(
            "SELECT COUNT(*) FROM db1.t1", {"fetch": True})
        row_cnt_shard_after_split_1_db2_t2 = self.__server_5.exec_stmt(
            "SELECT COUNT(*) FROM db2.t2", {"fetch": True})
        row_cnt_shard_after_split_1_db3_t3 = self.__server_5.exec_stmt(
            "SELECT COUNT(*) FROM db3.t3", {"fetch": True})

        row_cnt_shard_after_split_1_db1_t1_a = \
            int(row_cnt_shard_after_split_1_db1_t1[0][0])
        row_cnt_shard_after_split_1_db2_t2_a = \
            int(row_cnt_shard_after_split_1_db2_t2[0][0])
        row_cnt_shard_after_split_1_db3_t3_a = \
            int(row_cnt_shard_after_split_1_db3_t3[0][0])

        row_cnt_shard_after_split_1_db1_t1_b = self.__server_6.exec_stmt(
            "SELECT COUNT(*) FROM db1.t1", {"fetch": True})
        row_cnt_shard_after_split_1_db2_t2_b = self.__server_6.exec_stmt(
            "SELECT COUNT(*) FROM db2.t2", {"fetch": True})
        row_cnt_shard_after_split_1_db3_t3_b = self.__server_6.exec_stmt(
            "SELECT COUNT(*) FROM db3.t3", {"fetch": True})

        row_cnt_shard_after_split_1_db1_t1_b = \
            int(row_cnt_shard_after_split_1_db1_t1_b[0][0])
        row_cnt_shard_after_split_1_db2_t2_b = \
            int(row_cnt_shard_after_split_1_db2_t2_b[0][0])
        row_cnt_shard_after_split_1_db3_t3_b = \
            int(row_cnt_shard_after_split_1_db3_t3_b[0][0])

        self.assertTrue(
            row_cnt_shard_db1_t1 == (row_cnt_shard_after_split_1_db1_t1_a +
                                     row_cnt_shard_after_split_1_db1_t1_b))
        self.assertTrue(
            row_cnt_shard_db2_t2 == (row_cnt_shard_after_split_1_db2_t2_a +
                                     row_cnt_shard_after_split_1_db2_t2_b))
        self.assertTrue(
            row_cnt_shard_db3_t3 == (row_cnt_shard_after_split_1_db3_t3_a +
                                     row_cnt_shard_after_split_1_db3_t3_b))
        #Enter data into the global server
        self.__server_1.exec_stmt("DROP DATABASE IF EXISTS global")
        self.__server_1.exec_stmt("CREATE DATABASE global")
        self.__server_1.exec_stmt("CREATE TABLE global.global_table"
                                  "(userID INT, name VARCHAR(30))")
        for i in range(1, 11):
            self.__server_1.exec_stmt("INSERT INTO global.global_table "
                                      "VALUES(%s, 'TEST %s')" % (i, i))
        time.sleep(5)
        #Verify that the data is there in the first shard.
        global_table_count = self.__server_2.exec_stmt(
            "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the second shard.
        global_table_count = self.__server_3.exec_stmt(
            "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the third shard.
        global_table_count = self.__server_4.exec_stmt(
            "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the fourth shard.
        global_table_count = self.__server_5.exec_stmt(
            "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)
        #Verify that the data is there in the fifth shard.
        global_table_count = self.__server_6.exec_stmt(
            "SELECT COUNT(*) FROM global.global_table", {"fetch": True})
        global_table_count = int(global_table_count[0][0])
        self.assertTrue(global_table_count == 10)

    def test_hash_dump(self):
        """Test the dump of the HASH sharding information for the table.
        """
        shardinginformation_1 = tests.utils.make_info_result(
            [[
                'db1', 't1', 'userID', 'E2996A7B8509367020B55A4ACD2AE46A', '1',
                'HASH', 'GROUPID2', 'GROUPID1'
            ],
             [
                 'db1', 't1', 'userID', 'E88F547DF45C99F27646C76316FB21DF',
                 '2', 'HASH', 'GROUPID3', 'GROUPID1'
             ],
             [
                 'db1', 't1', 'userID', '7A2E76448FF04233F3851A492BEF1090',
                 '3', 'HASH', 'GROUPID4', 'GROUPID1'
             ],
             [
                 'db1', 't1', 'userID', '97427AA63E300F56536710F5D73A35FA',
                 '4', 'HASH', 'GROUPID5', 'GROUPID1'
             ]])

        packet = self.proxy.dump.sharding_information(0, "db1.t1")
        self.check_xmlrpc_result(packet, shardinginformation_1)

    def tearDown(self):
        """Clean up the existing environment
        """
        tests.utils.cleanup_environment()