def setUp(self): """Configure the existing environment """ uuid = MySQLServer.discover_uuid(OPTIONS["address"]) OPTIONS["uuid"] = _uuid.UUID(uuid) self.server = MySQLServer(**OPTIONS) MySQLServer.add(self.server)
def test_managment(self): """Test adding server to a group. """ options_1 = { "uuid" : _uuid.UUID("{bb75b12b-98d1-414c-96af-9e9d4b179678}"), "address" : "server_1.mysql.com:3060", } server_1 = MySQLServer(**options_1) MySQLServer.add(server_1) options_2 = { "uuid" : _uuid.UUID("{aa75a12a-98d1-414c-96af-9e9d4b179678}"), "address" : "server_2.mysql.com:3060", } server_2 = MySQLServer(**options_2) MySQLServer.add(server_2) group_1 = Group("oracle.com", "First description.") Group.add(group_1) # Add servers to a group group_1.add_server(server_1) group_1.add_server(server_2) self.assertRaises(AssertionError, group_1.add_server, server_1) self.assertEqual(len(group_1.servers()), 2) # Remove servers to a group group_1.remove_server(server_1) group_1.remove_server(server_2) self.assertRaises(AssertionError, group_1.remove_server, server_1) self.assertEqual(len(group_1.servers()), 0)
def _setup_replication(shard_id, source_group_id, destn_group_id, split_value, prune_limit, cmd): """Setup replication between the source and the destination groups and ensure that they are in sync. :param shard_id: The shard ID of the shard that needs to be moved. :param source_group_id: The group_id of the source shard. :param destn_group_id: The ID of the group to which the shard needs to be moved. :param split_value: Indicates the value at which the range for the particular shard will be split. Will be set only for shard split operations. :param prune_limit: The number of DELETEs that should be done in one batch. :param cmd: Indicates the type of re-sharding operation """ source_group = Group.fetch(source_group_id) if source_group is None: raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND % (source_group_id, )) destination_group = Group.fetch(destn_group_id) if destination_group is None: raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND % (destn_group_id, )) master = MySQLServer.fetch(source_group.master) if master is None: raise _errors.ShardingError( _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND) master.connect() slave = MySQLServer.fetch(destination_group.master) if slave is None: raise _errors.ShardingError( _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND) slave.connect() #Stop and reset any slave that might be running on the slave server. _utils.set_offline_mode(slave, True) ### TODO: if forced offline_mode _replication.stop_slave(slave, wait=True) _replication.reset_slave(slave, clean=True) #Change the master to the shard group master. _replication.switch_master(slave, master, master.repl_user, master.repl_pass) #Start the slave so that syncing of the data begins _replication.start_slave(slave, wait=True) _utils.set_offline_mode(slave, False) ### TODO: if forced offline_mode #Setup sync between the source and the destination groups. _events.trigger_within_procedure( SETUP_SYNC, shard_id, source_group_id, destn_group_id, split_value, prune_limit, cmd )
class TestMySQLMaster(tests.utils.TestCase): """Unit test for the configuration file handling. """ def setUp(self): """Configure the existing environment """ uuid = MySQLServer.discover_uuid(OPTIONS_MASTER["address"]) OPTIONS_MASTER["uuid"] = _uuid.UUID(uuid) self.master = MySQLServer(**OPTIONS_MASTER) self.master.connect() reset_master(self.master) self.master.read_only = True self.master.read_only = False def tearDown(self): """Clean up the existing environment """ cleanup_environment() self.master.disconnect() def test_master_binary_log(self): """Test the get_master_status() function. """ # Note this is only being tested with the binary log. master = self.master # Get master status. check = re.compile('\w+-bin.000001') ret = get_master_status(master) self.assertNotEqual(check.match(ret[0][0]), None) # Reset Master. reset_master(master) ret = get_master_status(master) self.assertTrue(ret[0][1] in (151, 154)) # Format descriptor event. def test_master_health(self): """Test the check_master_issues() function. """ # Note this is only being tested with the binary log. master = self.master # Check health as a master before calling connect. master.disconnect() error, result = check_master_issues(master) expected_result = { 'is_gtid_not_enabled': False, 'is_slave_updates_not_enabled': False, 'is_not_running': True, 'is_binlog_not_enabled': False, 'no_rpl_user': False } self.assertEqual(error, True) self.assertEqual(result, expected_result) # Check health as a master after calling connect. master.connect() error, _ = check_master_issues(master) self.assertEqual(error, False)
def test_max_connections(self): uuid = MySQLServer.discover_uuid(OPTIONS["address"]) server = MySQLServer( _uuid.UUID(uuid), OPTIONS["address"], ) server.connect() res = server.get_variable("max_connections") self.assertNotEqual(int(res), 0)
def setUp(self): """Configure the existing environment """ uuid = MySQLServer.discover_uuid( tests.utils.MySQLInstances().get_address(0)) self.server = MySQLServer(_uuid.UUID(uuid), tests.utils.MySQLInstances().get_address(0)) MySQLServer.add(self.server)
def test_wrong_uuid(self): """Test what happens when a server has a wrong uuid. """ # Check wrong uuid. OPTIONS["uuid"] = _uuid.UUID("FD0AC9BB-1431-11E2-8137-11DEF124DCC5") server = MySQLServer(**OPTIONS) self.assertRaises(_errors.UuidError, server.connect) server.disconnect() ConnectionPool().purge_connections(OPTIONS["uuid"])
def test_wrong_uuid(self): """Test what happens when a server has a wrong uuid. """ # Check wrong uuid. OPTIONS["uuid"] = _uuid.UUID("FD0AC9BB-1431-11E2-8137-11DEF124DCC5") server = MySQLServer(**OPTIONS) self.assertRaises(_errors.UuidError, server.connect) server.disconnect() ConnectionManager().purge_connections(server)
def setUp(self): """Configure the existing environment """ uuid = MySQLServer.discover_uuid(OPTIONS_MASTER["address"]) OPTIONS_MASTER["uuid"] = _uuid.UUID(uuid) self.master = MySQLServer(**OPTIONS_MASTER) self.master.connect() reset_master(self.master) self.master.read_only = True self.master.read_only = False
def setUp(self): """Configure the existing environment """ uuid = MySQLServer.discover_uuid( tests.utils.MySQLInstances().get_address(0) ) self.server = MySQLServer(_uuid.UUID(uuid), tests.utils.MySQLInstances().get_address(0) ) MySQLServer.add(self.server)
def _setup_sync(shard_id, source_group_id, destn_group_id, split_value, prune_limit, cmd): """sync the source and the destination groups. :param shard_id: The shard ID of the shard that needs to be moved. :param source_group_id: The group_id of the source shard. :param destn_group_id: The ID of the group to which the shard needs to be moved. :param split_value: Indicates the value at which the range for the particular shard will be split. Will be set only for shard split operations. :param prune_limit: The number of DELETEs that should be done in one batch. :param cmd: Indicates the type of re-sharding operation """ source_group = Group.fetch(source_group_id) if source_group is None: raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND % (source_group_id, )) destination_group = Group.fetch(destn_group_id) if destination_group is None: raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND % (destn_group_id, )) master = MySQLServer.fetch(source_group.master) if master is None: raise _errors.ShardingError( _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND) master.connect() slave = MySQLServer.fetch(destination_group.master) if slave is None: raise _errors.ShardingError( _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND) slave.connect() #Synchronize until the slave catches up with the master. _replication.synchronize_with_read_only(slave, master) #Reset replication once the syncing is done. _replication.stop_slave(slave, wait=True) _replication.reset_slave(slave, clean=True) #Trigger changing the mappings for the shard that was copied _events.trigger_within_procedure( SETUP_RESHARDING_SWITCH, shard_id, source_group_id, destn_group_id, split_value, prune_limit, cmd )
def setUp(self): """Configure the existing environment """ from __main__ import mysqldump_path, mysqlclient_path self.manager, self.proxy = tests.utils.setup_xmlrpc() self.__options_1 = { "uuid" : _uuid.UUID("{aa75b12b-98d1-414c-96af-9e9d4b179678}"), "address": MySQLInstances().get_address(0), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd, } uuid_server1 = MySQLServer.discover_uuid(self.__options_1["address"]) self.__options_1["uuid"] = _uuid.UUID(uuid_server1) self.__server_1 = MySQLServer(**self.__options_1) MySQLServer.add(self.__server_1) self.__server_1.connect() self.__options_2 = { "uuid" : _uuid.UUID("{aa45b12b-98d1-414c-96af-9e9d4b179678}"), "address":MySQLInstances().get_address(1), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd, } uuid_server2 = MySQLServer.discover_uuid(self.__options_2["address"]) self.__options_2["uuid"] = _uuid.UUID(uuid_server2) self.__server_2 = MySQLServer(**self.__options_2) MySQLServer.add(self.__server_2) self.__server_2.connect() self.__server_1.exec_stmt("DROP DATABASE IF EXISTS backup_db") self.__server_1.exec_stmt("CREATE DATABASE backup_db") self.__server_1.exec_stmt("CREATE TABLE backup_db.backup_table" "(userID INT, name VARCHAR(30))") self.__server_1.exec_stmt("CREATE TABLE backup_db.trigger_table" "(count INT)") self.__server_1.exec_stmt("INSERT INTO backup_db.trigger_table " "VALUES(0)") self.__server_1.exec_stmt("CREATE TRIGGER backup_db.backup_table_ai" " AFTER INSERT ON backup_db.backup_table" " FOR EACH ROW UPDATE backup_db.trigger_table" " SET count = count + 1") self.__server_1.exec_stmt("INSERT INTO backup_db.backup_table " "VALUES(101, 'TEST 1')") self.__server_1.exec_stmt("INSERT INTO backup_db.backup_table " "VALUES(202, 'TEST 2')") self.__server_1.exec_stmt("CREATE INDEX i1 ON" " backup_db.backup_table (userID)") self.__server_1.exec_stmt("CREATE EVENT backup_db.ev1" " ON SCHEDULE EVERY 1 DAY DO SELECT 1") self.mysqldump_path = mysqldump_path self.mysqlclient_path = mysqlclient_path
def test_address(self): """Check whether the address is automatically converted to the format host:port where port is the MySQL default port if a port is not provided. """ server = MySQLServer(_uuid.uuid4(), "address") default_port = _server_utils.MYSQL_DEFAULT_PORT self.assertEqual(server.address, "{0}:{1}".format("address", default_port)) server = MySQLServer(_uuid.uuid4(), "address:port") self.assertEqual(server.address, "{0}:{1}".format("address", "port"))
class TestMySQLMaster(unittest.TestCase): """Unit test for the configuration file handling. """ def setUp(self): """Configure the existing environment """ uuid = MySQLServer.discover_uuid(OPTIONS_MASTER["address"]) OPTIONS_MASTER["uuid"] = _uuid.UUID(uuid) self.master = MySQLServer(**OPTIONS_MASTER) self.master.connect() reset_master(self.master) self.master.read_only = True self.master.read_only = False def tearDown(self): """Clean up the existing environment """ cleanup_environment() self.master.disconnect() def test_master_binary_log(self): """Test the get_master_status() function. """ # Note this is only being tested with the binary log. master = self.master # Get master status. check = re.compile('\w+-bin.000001') ret = get_master_status(master) self.assertNotEqual(check.match(ret[0][0]), None) # Reset Master. reset_master(master) ret = get_master_status(master) self.assertEqual(int(ret[0][1]), 151) # Format descriptor event. def test_master_health(self): """Test the check_master_issues() function. """ # Note this is only being tested with the binary log. master = self.master # Check health as a master before calling connect. master.disconnect() ret = check_master_issues(master) self.assertEqual(ret, {'is_running': False}) # Check health as a master after calling connect. master.connect() ret = check_master_issues(master) self.assertEqual(ret, {})
def test_persister_id(self): """Test Persister's uuid. """ # Get persister'a address. instances = tests.utils.MySQLInstances() address = instances.state_store_address user = instances.store_user passwd = instances.store_passwd # Try to manage the MySQLPersister. uuid = MySQLServer.discover_uuid(address=address, user=user, passwd=passwd) server = MySQLServer(_uuid.UUID(uuid), address, user, passwd) self.assertRaises(_errors.ServerError, MySQLServer.add, server)
def test_shard_split_fail_GTID_EXECUTED(self): self.split_fail = True status = self.proxy.group.lookup_servers("GROUPID3") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] for obtained_server in obtained_server_list: if obtained_server["status"] == "PRIMARY": shard_uuid = obtained_server["server_uuid"] shard_server = MySQLServer.fetch(shard_uuid) shard_server.connect() break shard_server.exec_stmt("DROP DATABASE IF EXISTS Extra") shard_server.exec_stmt("CREATE DATABASE Extra") shard_server.exec_stmt("CREATE TABLE Extra.Extra_Table" "(userID INT, name VARCHAR(30))") shard_server.exec_stmt("INSERT INTO Extra.Extra_Table " "VALUES(101, 'TEST 1')") shard_server.exec_stmt("INSERT INTO Extra.Extra_Table " "VALUES(102, 'TEST 2')") shard_server.exec_stmt("INSERT INTO Extra.Extra_Table " "VALUES(103, 'TEST 3')") shard_server.exec_stmt("INSERT INTO Extra.Extra_Table " "VALUES(701, 'TEST 4')") status = self.proxy.sharding.split_shard("1", "GROUPID3", "600") self.assertStatus(status, _executor.Job.ERROR) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Tried to execute action (_restore_shard_backup).")
def drop_shard_range_trigger(group_id, sharding_type, table_name, column_name): """Drop a trigger on the shard table. :param group_id: The ID of the group on which the trigger definition is applied. The trigger is created on the master of this group. :param sharding_type: The datatype supported by the shards. Used to name the trigger. :param table_name: The name of the table. This is used to name the trigger being created. :param column_name: The name of the column in the table being sharded. This is used to create the name of the trigger. """ global_group = Group.fetch(group_id) master_server = MySQLServer.fetch(global_group.master) master_server.connect() db, table = table_name.split(".") #Drop the INSERT trigger on the sharded table. trigger_name = db + "." + _TRIGGER_PREFIX_INSERT+table drop_insert_trigger = _DROP_TRIGGER_DEFN.format( trigger_name=trigger_name ) master_server.exec_stmt(drop_insert_trigger) #Drop the UPDATE trigger on the sharded table. trigger_name = db + "." + _TRIGGER_PREFIX_UPDATE + table drop_update_trigger = _DROP_TRIGGER_DEFN.format( trigger_name=trigger_name ) master_server.exec_stmt(drop_update_trigger)
def _run(self): """Function that verifies servers' availabilities. """ ignored_status = [MySQLServer.FAULTY] quarantine = {} interval = FailureDetector._DETECTION_INTERVAL detections = FailureDetector._DETECTIONS detection_timeout = FailureDetector._DETECTION_TIMEOUT _persistence.init_thread() while self.__check: try: unreachable = set() group = Group.fetch(self.__group_id) if group is not None: for server in group.servers(): if server.status in ignored_status or \ MySQLServer.is_alive(server, detection_timeout): if server.status == MySQLServer.FAULTY: self.__connection_manager.purge_connections( server ) continue unreachable.add(server.uuid) _LOGGER.warning( "Server (%s) in group (%s) is unreachable.", server.uuid, self.__group_id ) unstable = False failed_attempts = 0 if server.uuid not in quarantine: quarantine[server.uuid] = failed_attempts = 1 else: failed_attempts = quarantine[server.uuid] + 1 quarantine[server.uuid] = failed_attempts if failed_attempts >= detections: unstable = True can_set_faulty = group.can_set_server_faulty( server, get_time() ) if unstable and can_set_faulty: self._spawn_report_failure(server) for uuid in quarantine.keys(): if uuid not in unreachable: del quarantine[uuid] except (_errors.ExecutorError, _errors.DatabaseError): pass except Exception as error: _LOGGER.exception(error) time.sleep(interval / detections) _persistence.deinit_thread()
def setUp(self): """Configure the existing environment """ uuid = MySQLServer.discover_uuid(OPTIONS_MASTER["address"]) OPTIONS_MASTER["uuid"] = _uuid.UUID(uuid) self.master = MySQLServer(**OPTIONS_MASTER) self.master.connect() reset_master(self.master) uuid = MySQLServer.discover_uuid(OPTIONS_SLAVE["address"]) OPTIONS_SLAVE["uuid"] = _uuid.UUID(uuid) self.slave = MySQLServer(**OPTIONS_SLAVE) self.slave.connect() stop_slave(self.slave, wait=True) reset_master(self.slave) reset_slave(self.slave, clean=True)
def test_update_only(self): """Test the shard split but without provisioning. """ # Get group information before the shard_move operation status = self.proxy.sharding.lookup_servers("db1.t1", 500, "LOCAL") local_list_before = status[2] status = self.proxy.sharding.lookup_servers("1", 500, "GLOBAL") global_list_before = status[2] # Do the shard split and compare group information. status = self.proxy.sharding.split_shard("1", "GROUPID3", "600", True) self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_setup_resharding_switch).") status = self.proxy.sharding.lookup_servers("db1.t1", 601, "LOCAL") local_list_after = status[2] self.assertNotEqual(local_list_before, local_list_after) status = self.proxy.sharding.lookup_servers("1", 601, "GLOBAL") global_list_after = status[2] self.assertEqual(global_list_before, global_list_after) # The group has changed but no data was transfered. shard_server = MySQLServer.fetch(local_list_after[0][0]) shard_server.connect() self.assertRaises( DatabaseError, shard_server.exec_stmt, "SELECT NAME FROM db1.t1", {"fetch" : True} )
def test_shard_prune(self): status = self.proxy.sharding.prune_shard("db2.t2") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_prune_shard_tables).") status = self.proxy.sharding.lookup_servers("db2.t2", 1, "LOCAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] shard_uuid = obtained_server_list[0][0] shard_server = MySQLServer.fetch(shard_uuid) shard_server.connect() rows = shard_server.exec_stmt( "SELECT COUNT(*) FROM db2.t2", {"fetch" : True}) self.assertTrue(int(rows[0][0]) == 100) rows = shard_server.exec_stmt( "SELECT MAX(userID2) FROM db2.t2", {"fetch" : True}) self.assertTrue(int(rows[0][0]) == 100) rows = shard_server.exec_stmt( "SELECT MIN(userID2) FROM db2.t2", {"fetch" : True}) self.assertTrue(int(rows[0][0]) == 1) status = self.proxy.sharding.lookup_servers("db2.t2", 101, "LOCAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] shard_uuid = obtained_server_list[0][0] shard_server = MySQLServer.fetch(shard_uuid) shard_server.connect() rows = shard_server.exec_stmt( "SELECT COUNT(*) FROM db2.t2", {"fetch" : True}) self.assertTrue(int(rows[0][0]) == 100) rows = shard_server.exec_stmt( "SELECT MAX(userID2) FROM db2.t2", {"fetch" : True}) self.assertTrue(int(rows[0][0]) == 200) rows = shard_server.exec_stmt( "SELECT MIN(userID2) FROM db2.t2", {"fetch" : True}) self.assertTrue(int(rows[0][0]) == 101)
def test_server_id(self): """Test MySQLServer's uuid. """ # Configure uuid = MySQLServer.discover_uuid(OPTIONS["address"]) OPTIONS["uuid"] = _uuid.UUID(uuid) server_1 = MySQLServer(**OPTIONS) server_2 = MySQLServer(**OPTIONS) # Check that two different objects represent the same server. self.assertEqual(server_1, server_2) # Check that a dictionary with server_1 and server_2 has in # fact only one entry. hash_info = {} hash_info[server_1] = server_1 hash_info[server_2] = server_2 self.assertEqual(len(hash_info), 1)
def test_is_connected(self): """Check whether MySQLServer is alive or not. """ # Check if server is alive. server = self.server self.assertTrue(MySQLServer.is_alive(server)) self.assertFalse(server.is_connected()) server.connect() self.assertTrue(server.is_connected())
def _fetch_master_of_group(group_id): """Return a reference to the master of the group. :param group_id: ID of the group whose master needs to be fetched. :return: MySQLServer object referring to the group master. """ global_group = Group.fetch(group_id) master_server = MySQLServer.fetch(global_group.master) master_server.connect() return master_server
def test_managment(self): """Test adding server to a group. """ options_1 = { "uuid": _uuid.UUID("bb75b12b-98d1-414c-96af-9e9d4b179678"), "address": "server_1.mysql.com:3060", } server_1 = MySQLServer(**options_1) MySQLServer.add(server_1) options_2 = { "uuid": _uuid.UUID("aa75a12a-98d1-414c-96af-9e9d4b179678"), "address": "server_2.mysql.com:3060", } server_2 = MySQLServer(**options_2) MySQLServer.add(server_2) group_1 = Group("oracle.com", "First description.") Group.add(group_1) # Add servers to a group group_1.add_server(server_1) group_1.add_server(server_2) self.assertRaises(AssertionError, group_1.add_server, server_1) self.assertEqual(len(group_1.servers()), 2) # Remove servers to a group group_1.remove_server(server_1) group_1.remove_server(server_2) self.assertRaises(AssertionError, group_1.remove_server, server_1) self.assertEqual(len(group_1.servers()), 0)
def stop_group_slave(group_master_id, group_slave_id, clear_ref): """Stop the slave on the slave group. This utility method is the completement of the setup_group_replication method and is used to stop the replication on the slave group. Given a master group ID and the slave group ID the method stops the slave on the slave group and updates the references on both the master and the slave group. :param group_master_id: The id of the master group. :param group_slave_id: The id of the slave group. :param clear_ref: The parameter indicates if the stop_group_slave needs to clear the references to the group's slaves. For example when you do a disable shard the shard group still retains the references to its slaves, since when enabled it needs to enable the replication. """ master_group = Group.fetch(group_master_id) slave_group = Group.fetch(group_slave_id) if master_group is None: raise _errors.GroupError \ (GROUP_REPLICATION_GROUP_NOT_FOUND_ERROR % (group_master_id, )) if slave_group is None: raise _errors.GroupError \ (GROUP_REPLICATION_GROUP_NOT_FOUND_ERROR % (group_slave_id, )) slave_group_master = MySQLServer.fetch(slave_group.master) if slave_group_master is None: raise _errors.GroupError \ (GROUP_REPLICATION_GROUP_MASTER_NOT_FOUND_ERROR % (slave_group.master, )) if not server_running(slave_group_master): #The server is already down. We cannot connect to it to stop #replication. return try: slave_group_master.connect() except _errors.DatabaseError: #Server is not accessible, unable to connect to the server. return #Stop replication on the master of the group and clear the references, #if clear_ref has been set. _replication.stop_slave(slave_group_master, wait=True) _replication.reset_slave(slave_group_master, clean=True) if clear_ref: slave_group.remove_master_group_id() master_group.remove_slave_group_id(group_slave_id)
def test_persister_id(self): """Test Persister's uuid. """ # Get persister'a address. instances = tests.utils.MySQLInstances() address = instances.state_store_address user = instances.root_user passwd = instances.root_passwd # Try to manage the MySQLPersister. uuid = MySQLServer.discover_uuid( address=address, user=user, passwd=passwd ) server = MySQLServer(_uuid.UUID(uuid), address, user, passwd) self.assertRaises(_errors.ServerError, MySQLServer.add, server)
def stop_group_slaves(master_group_id): """Stop the group slaves for the given master group. This will be used for use cases that required all the slaves replicating from this group to be stopped. An example use case would be disabling a shard. :param master_group_id: The master group ID. """ master_group = Group.fetch(master_group_id) if master_group is None: raise _errors.GroupError \ (GROUP_REPLICATION_GROUP_NOT_FOUND_ERROR % \ (master_group_id, )) # Stop the replication on all of the registered slaves for the group. for slave_group_id in master_group.slave_group_ids: slave_group = Group.fetch(slave_group_id) # Fetch the Slave Group and the master of the Slave Group slave_group_master = MySQLServer.fetch(slave_group.master) if slave_group_master is None: _LOGGER.warning( GROUP_REPLICATION_GROUP_MASTER_NOT_FOUND_ERROR % \ (slave_group.master, ) ) continue if not server_running(slave_group_master): # The server is already down. we cannot connect to it to stop # replication. continue try: slave_group_master.connect() _replication.stop_slave(slave_group_master, wait=True) # Reset the slave to remove the reference of the master so # that when the server is used as a slave next it does not # complaint about having a different master. _replication.reset_slave(slave_group_master, clean=True) except _errors.DatabaseError as error: # Server is not accessible, unable to connect to the server. _LOGGER.warning( "Error while unconfiguring group replication between " "(%s) and (%s): (%s).", master_group_id, slave_group.group_id, error ) continue
def stop_group_slaves(master_group_id): """Stop the group slaves for the given master group. This will be used for use cases that required all the slaves replicating from this group to be stopped. An example use case would be disabling a shard. :param master_group_id: The master group ID. """ master_group = Group.fetch(master_group_id) if master_group is None: raise _errors.GroupError \ (GROUP_REPLICATION_GROUP_NOT_FOUND_ERROR % \ (master_group_id, )) # Stop the replication on all of the registered slaves for the group. for slave_group_id in master_group.slave_group_ids: slave_group = Group.fetch(slave_group_id) # Fetch the Slave Group and the master of the Slave Group slave_group_master = MySQLServer.fetch(slave_group.master) if slave_group_master is None: _LOGGER.warning(GROUP_REPLICATION_GROUP_MASTER_NOT_FOUND_ERROR, slave_group.master) continue if not server_running(slave_group_master): # The server is already down. we cannot connect to it to stop # replication. continue try: slave_group_master.connect() _replication.stop_slave(slave_group_master, wait=True) # Reset the slave to remove the reference of the master so # that when the server is used as a slave next it does not # complaint about having a different master. _replication.reset_slave(slave_group_master, clean=True) except _errors.DatabaseError as error: # Server is not accessible, unable to connect to the server. _LOGGER.warning( "Error while unconfiguring group replication between " "(%s) and (%s): (%s).", master_group_id, slave_group.group_id, error) continue
def add_shard_range_trigger(group_id, sharding_type, table_name, column_name): """Add a trigger on the shard table to ensure that values inserted fall within the valid shard ranges. :param group_id: The ID of the group on which the trigger definition is applied. The trigger is created on the master of this group. :param sharding_type: The datatype supported by the shards. Used to name the trigger. :param table_name: The name of the table. This is used to name the trigger being created. :param column_name: The name of the column in the table being sharded. This is used to create the name of the trigger. """ global_group = Group.fetch(group_id) master_server = MySQLServer.fetch(global_group.master) master_server.connect() #Create an INSERT trigger on the sharded table. db, table = table_name.split(".") trigger_tmpl = _TRIGGER_DEFN[sharding_type] trigger_name = db + "." + _TRIGGER_PREFIX_INSERT + table create_insert_trigger = trigger_tmpl.format( trigger_name=trigger_name, operation="INSERT", table_name=table_name, column_name="NEW"+"."+column_name ) master_server.exec_stmt(create_insert_trigger) #Create an UPDATE trigger on the sharded table. trigger_tmpl = _TRIGGER_DEFN[sharding_type] trigger_name = db + "." + _TRIGGER_PREFIX_UPDATE + table create_update_trigger =trigger_tmpl.format( trigger_name=trigger_name, operation="UPDATE", table_name=table_name, column_name="NEW"+"."+column_name ) master_server.exec_stmt(create_update_trigger)
def _setup_shard_switch_move(shard_id, source_group_id, destination_group_id, update_only): """Setup the moved shard to map to the new group. :param shard_id: The shard ID of the shard that needs to be moved. :param source_group_id: The group_id of the source shard. :param destination_group_id: The ID of the group to which the shard needs to be moved. :update_only: Only update the state store and skip provisioning. """ #Fetch the Range sharding specification. When we start implementing #heterogenous sharding schemes, we need to find out the type of #sharding scheme and we should use that to find out the sharding #implementation. _, source_shard, _, shard_mapping_defn = \ _services_sharding._verify_and_fetch_shard(shard_id) #Setup replication between the shard group and the global group. _group_replication.setup_group_replication \ (shard_mapping_defn[2], destination_group_id) #set the shard to point to the new group. source_shard.group_id = destination_group_id #Stop the replication between the global server and the original #group associated with the shard. _group_replication.stop_group_slave\ (shard_mapping_defn[2], source_group_id, True) #Reset the read only flag on the source server. source_group = Group.fetch(source_group_id) if source_group is None: raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND % (source_group_id, )) master = MySQLServer.fetch(source_group.master) if master is None: raise _errors.ShardingError( _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND) if not update_only: master.connect() master.read_only = False
def test_sync_readonly_servers(self): status = self.proxy.group.lookup_servers("GROUPID3") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] for idx in range(0, 2): if obtained_server_list[idx]["status"] == MySQLServer.SECONDARY: slave_uuid = obtained_server_list[idx]["server_uuid"] slave_server = MySQLServer.fetch(slave_uuid) slave_server.connect() _group_replication.setup_group_replication("GROUPID2", "GROUPID3") _replication.synchronize_with_read_only( slave_server, self.shard_server, 3, 5 ) _group_replication.stop_group_slave("GROUPID2", "GROUPID3", True) try: rows = self.shard_server.exec_stmt( "SELECT NAME FROM db1.t1", {"fetch" : True}) except _errors.DatabaseError: raise Exception("Enable Shard failed to enable shard.") self.assertEqual(len(rows), 15)
def test_connection_pool(self): """Test connection pool. """ # Configuration uuid = MySQLServer.discover_uuid(OPTIONS["address"]) OPTIONS["uuid"] = uuid = _uuid.UUID(uuid) server_1 = MySQLServer(**OPTIONS) server_2 = MySQLServer(**OPTIONS) cnx_pool = ConnectionPool() # Purge connections and check the number of connections in # the pool. cnx_pool.purge_connections(uuid) self.assertEqual(cnx_pool.get_number_connections(uuid), 0) # Connect and check the number of connections in the pool. server_1.connect() server_2.connect() self.assertEqual(cnx_pool.get_number_connections(uuid), 0) # Delete one of the servers and check the number of # connections in the pool. del server_1 self.assertEqual(cnx_pool.get_number_connections(uuid), 1) # Delete one of the servers and check the number of # connections in the pool. del server_2 self.assertEqual(cnx_pool.get_number_connections(uuid), 2) # Purge connections and check the number of connections in # the pool. However, call purge_connections twice. cnx_pool.purge_connections(uuid) self.assertEqual(cnx_pool.get_number_connections(uuid), 0) cnx_pool.purge_connections(uuid) self.assertEqual(cnx_pool.get_number_connections(uuid), 0)
def setUp(self): """Creates the topology for testing. """ tests.utils.cleanup_environment() self.manager, self.proxy = tests.utils.setup_xmlrpc() self.__options_1 = { "uuid" : _uuid.UUID("{aa75b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(0), "user" : MySQLInstances().user, "passwd": MySQLInstances().passwd, } uuid_server1 = MySQLServer.discover_uuid(self.__options_1["address"]) self.__options_1["uuid"] = _uuid.UUID(uuid_server1) self.__server_1 = MySQLServer(**self.__options_1) MySQLServer.add(self.__server_1) self.__server_1.connect() self.__group_1 = Group("GROUPID1", "First description.") Group.add(self.__group_1) self.__group_1.add_server(self.__server_1) tests.utils.configure_decoupled_master(self.__group_1, self.__server_1) self.__options_2 = { "uuid" : _uuid.UUID("{aa45b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(1), "user" : MySQLInstances().user, "passwd": MySQLInstances().passwd, } uuid_server2 = MySQLServer.discover_uuid(self.__options_2["address"]) self.__options_2["uuid"] = _uuid.UUID(uuid_server2) self.__server_2 = MySQLServer(**self.__options_2) MySQLServer.add(self.__server_2) self.__server_2.connect() self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_2.exec_stmt("CREATE DATABASE db1") self.__server_2.exec_stmt("CREATE TABLE db1.t1" "(userID VARCHAR(20) PRIMARY KEY, name VARCHAR(30))") for i in range(1, 71): self.__server_2.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("a"+str(i), i)) for i in range(101, 301): self.__server_2.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("b"+str(i), i)) for i in range(1001, 1201): self.__server_2.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("c"+str(i), i)) for i in range(10001, 10201): self.__server_2.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("d"+str(i), i)) for i in range(100001, 100201): self.__server_2.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("e"+str(i), i)) self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db2") self.__server_2.exec_stmt("CREATE DATABASE db2") self.__server_2.exec_stmt("CREATE TABLE db2.t2" "(userID VARCHAR(20), salary INT, " "CONSTRAINT FOREIGN KEY(userID) " "REFERENCES db1.t1(userID))") for i in range(1, 71): self.__server_2.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("a"+str(i), i)) for i in range(101, 301): self.__server_2.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("b"+str(i), i)) for i in range(1001, 1201): self.__server_2.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("c"+str(i), i)) for i in range(10001, 10201): self.__server_2.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("d"+str(i), i)) for i in range(100001, 100201): self.__server_2.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("e"+str(i), i)) self.__group_2 = Group("GROUPID2", "Second description.") Group.add(self.__group_2) self.__group_2.add_server(self.__server_2) tests.utils.configure_decoupled_master(self.__group_2, self.__server_2) self.__options_3 = { "uuid" : _uuid.UUID("{bb75b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(2), "user" : MySQLInstances().user, "passwd": MySQLInstances().passwd, } uuid_server3 = MySQLServer.discover_uuid(self.__options_3["address"]) self.__options_3["uuid"] = _uuid.UUID(uuid_server3) self.__server_3 = MySQLServer(**self.__options_3) MySQLServer.add( self.__server_3) self.__server_3.connect() self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_3.exec_stmt("CREATE DATABASE db1") self.__server_3.exec_stmt("CREATE TABLE db1.t1" "(userID VARCHAR(20)PRIMARY KEY, name VARCHAR(30))") for i in range(1, 71): self.__server_3.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("a"+str(i), i)) for i in range(101, 301): self.__server_3.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("b"+str(i), i)) for i in range(1001, 1201): self.__server_3.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("c"+str(i), i)) for i in range(10001, 10201): self.__server_3.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("d"+str(i), i)) for i in range(100001, 100201): self.__server_3.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("e"+str(i), i)) self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db2") self.__server_3.exec_stmt("CREATE DATABASE db2") self.__server_3.exec_stmt("CREATE TABLE db2.t2" "(userID VARCHAR(20), salary INT, " "CONSTRAINT FOREIGN KEY(userID) " "REFERENCES db1.t1(userID))") for i in range(1, 71): self.__server_3.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("a"+str(i), i)) for i in range(101, 301): self.__server_3.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("b"+str(i), i)) for i in range(1001, 1201): self.__server_3.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("c"+str(i), i)) for i in range(10001, 10201): self.__server_3.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("d"+str(i), i)) for i in range(100001, 100201): self.__server_3.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("e"+str(i), i)) self.__group_3 = Group("GROUPID3", "Third description.") Group.add( self.__group_3) self.__group_3.add_server(self.__server_3) tests.utils.configure_decoupled_master(self.__group_3, self.__server_3) self.__options_4 = { "uuid" : _uuid.UUID("{bb45b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(3), "user" : MySQLInstances().user, "passwd": MySQLInstances().passwd, } uuid_server4 = MySQLServer.discover_uuid(self.__options_4["address"]) self.__options_4["uuid"] = _uuid.UUID(uuid_server4) self.__server_4 = MySQLServer(**self.__options_4) MySQLServer.add(self.__server_4) self.__server_4.connect() self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_4.exec_stmt("CREATE DATABASE db1") self.__server_4.exec_stmt("CREATE TABLE db1.t1" "(userID VARCHAR(20)PRIMARY KEY, name VARCHAR(30))") for i in range(1, 71): self.__server_4.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("a"+str(i), i)) for i in range(101, 301): self.__server_4.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("b"+str(i), i)) for i in range(1001, 1201): self.__server_4.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("c"+str(i), i)) for i in range(10001, 10201): self.__server_4.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("d"+str(i), i)) for i in range(100001, 100201): self.__server_4.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("e"+str(i), i)) self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db2") self.__server_4.exec_stmt("CREATE DATABASE db2") self.__server_4.exec_stmt("CREATE TABLE db2.t2" "(userID VARCHAR(20), salary INT, " "CONSTRAINT FOREIGN KEY(userID) " "REFERENCES db1.t1(userID))") for i in range(1, 71): self.__server_4.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("a"+str(i), i)) for i in range(101, 301): self.__server_4.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("b"+str(i), i)) for i in range(1001, 1201): self.__server_4.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("c"+str(i), i)) for i in range(10001, 10201): self.__server_4.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("d"+str(i), i)) for i in range(100001, 100201): self.__server_4.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("e"+str(i), i)) self.__group_4 = Group("GROUPID4", "Fourth description.") Group.add( self.__group_4) self.__group_4.add_server(self.__server_4) tests.utils.configure_decoupled_master(self.__group_4, self.__server_4) self.__options_5 = { "uuid" : _uuid.UUID("{cc75b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(4), "user" : MySQLInstances().user, "passwd": MySQLInstances().passwd, } uuid_server5 = MySQLServer.discover_uuid(self.__options_5["address"]) self.__options_5["uuid"] = _uuid.UUID(uuid_server5) self.__server_5 = MySQLServer(**self.__options_5) MySQLServer.add(self.__server_5) self.__server_5.connect() self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_5.exec_stmt("CREATE DATABASE db1") self.__server_5.exec_stmt("CREATE TABLE db1.t1" "(userID VARCHAR(20)PRIMARY KEY, name VARCHAR(30))") for i in range(1, 71): self.__server_5.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("a"+str(i), i)) for i in range(101, 301): self.__server_5.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("b"+str(i), i)) for i in range(1001, 1201): self.__server_5.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("c"+str(i), i)) for i in range(10001, 10201): self.__server_5.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("d"+str(i), i)) for i in range(100001, 100201): self.__server_5.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("e"+str(i), i)) self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db2") self.__server_5.exec_stmt("CREATE DATABASE db2") self.__server_5.exec_stmt("CREATE TABLE db2.t2" "(userID VARCHAR(20), salary INT, " "CONSTRAINT FOREIGN KEY(userID) " "REFERENCES db1.t1(userID))") for i in range(1, 71): self.__server_5.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("a"+str(i), i)) for i in range(101, 301): self.__server_5.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("b"+str(i), i)) for i in range(1001, 1201): self.__server_5.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("c"+str(i), i)) for i in range(10001, 10201): self.__server_5.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("d"+str(i), i)) for i in range(100001, 100201): self.__server_5.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("e"+str(i), i)) self.__group_5 = Group("GROUPID5", "Fifth description.") Group.add( self.__group_5) self.__group_5.add_server(self.__server_5) tests.utils.configure_decoupled_master(self.__group_5, self.__server_5) self.__options_6 = { "uuid" : _uuid.UUID("{cc45b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(5), "user" : MySQLInstances().user, "passwd": MySQLInstances().passwd, } uuid_server6 = MySQLServer.discover_uuid(self.__options_6["address"]) self.__options_6["uuid"] = _uuid.UUID(uuid_server6) self.__server_6 = MySQLServer(**self.__options_6) MySQLServer.add(self.__server_6) self.__server_6.connect() self.__group_6 = Group("GROUPID6", "Sixth description.") Group.add( self.__group_6) self.__group_6.add_server(self.__server_6) tests.utils.configure_decoupled_master(self.__group_6, self.__server_6) status = self.proxy.sharding.create_definition("RANGE_STRING", "GROUPID1") self.check_xmlrpc_command_result(status, returns=1) status = self.proxy.sharding.add_table(1, "db1.t1", "userID") self.check_xmlrpc_command_result(status) status = self.proxy.sharding.add_table(1, "db2.t2", "userID") self.check_xmlrpc_command_result(status) status = self.proxy.sharding.add_shard( 1, "GROUPID2/a,GROUPID3/b,GROUPID4/c,GROUPID5/d", "ENABLED" ) self.check_xmlrpc_command_result(status) status = self.proxy.sharding.prune_shard("db1.t1") self.check_xmlrpc_command_result(status)
class TestBackupMySQLDump(unittest.TestCase): """Test taking a backup from a source group to a destination group. The source group handles the source shard and the destination group handles the destination shard. The backup and restore helps take up a backup of the source shard and setup the destination shard. """ def setUp(self): """Configure the existing environment """ from __main__ import mysqldump_path, mysqlclient_path self.manager, self.proxy = tests.utils.setup_xmlrpc() self.__options_1 = { "uuid" : _uuid.UUID("{aa75b12b-98d1-414c-96af-9e9d4b179678}"), "address": MySQLInstances().get_address(0), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd, } uuid_server1 = MySQLServer.discover_uuid(self.__options_1["address"]) self.__options_1["uuid"] = _uuid.UUID(uuid_server1) self.__server_1 = MySQLServer(**self.__options_1) MySQLServer.add(self.__server_1) self.__server_1.connect() self.__options_2 = { "uuid" : _uuid.UUID("{aa45b12b-98d1-414c-96af-9e9d4b179678}"), "address":MySQLInstances().get_address(1), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd, } uuid_server2 = MySQLServer.discover_uuid(self.__options_2["address"]) self.__options_2["uuid"] = _uuid.UUID(uuid_server2) self.__server_2 = MySQLServer(**self.__options_2) MySQLServer.add(self.__server_2) self.__server_2.connect() self.__server_1.exec_stmt("DROP DATABASE IF EXISTS backup_db") self.__server_1.exec_stmt("CREATE DATABASE backup_db") self.__server_1.exec_stmt("CREATE TABLE backup_db.backup_table" "(userID INT, name VARCHAR(30))") self.__server_1.exec_stmt("CREATE TABLE backup_db.trigger_table" "(count INT)") self.__server_1.exec_stmt("INSERT INTO backup_db.trigger_table " "VALUES(0)") self.__server_1.exec_stmt("CREATE TRIGGER backup_db.backup_table_ai" " AFTER INSERT ON backup_db.backup_table" " FOR EACH ROW UPDATE backup_db.trigger_table" " SET count = count + 1") self.__server_1.exec_stmt("INSERT INTO backup_db.backup_table " "VALUES(101, 'TEST 1')") self.__server_1.exec_stmt("INSERT INTO backup_db.backup_table " "VALUES(202, 'TEST 2')") self.__server_1.exec_stmt("CREATE INDEX i1 ON" " backup_db.backup_table (userID)") self.__server_1.exec_stmt("CREATE EVENT backup_db.ev1" " ON SCHEDULE EVERY 1 DAY DO SELECT 1") self.mysqldump_path = mysqldump_path self.mysqlclient_path = mysqlclient_path def test_backup(self): image = MySQLDump.backup(self.__server_1, MySQLInstances().backup_user, MySQLInstances().backup_passwd, self.mysqldump_path) MySQLDump.restore_fabric_server(self.__server_2, MySQLInstances().restore_user, MySQLInstances().restore_passwd, image, self.mysqlclient_path) rows = self.__server_2.exec_stmt( "SELECT NAME FROM backup_db.backup_table", {"fetch" : True}) self.assertEqual(len(rows), 2) self.assertEqual(rows[0][0], 'TEST 1') self.assertEqual(rows[1][0], 'TEST 2') def tearDown(self): """Clean up the existing environment """ tests.utils.cleanup_environment()
class TestHashMoveGlobal(unittest.TestCase): """Contains unit tests for testing the shard move operation and for verifying that the global server configuration remains constant after the shard move configuration. """ def assertStatus(self, status, expect): items = (item['diagnosis'] for item in status[1] if item['diagnosis']) self.assertEqual(status[1][-1]["success"], expect, "\n".join(items)) def setUp(self): """Configure the existing environment """ tests.utils.cleanup_environment() self.manager, self.proxy = tests.utils.setup_xmlrpc() self.__options_1 = { "uuid" : _uuid.UUID("{aa75b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(0), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd, } uuid_server1 = MySQLServer.discover_uuid(self.__options_1["address"]) self.__options_1["uuid"] = _uuid.UUID(uuid_server1) self.__server_1 = MySQLServer(**self.__options_1) MySQLServer.add(self.__server_1) self.__server_1.connect() self.__group_1 = Group("GROUPID1", "First description.") Group.add(self.__group_1) self.__group_1.add_server(self.__server_1) tests.utils.configure_decoupled_master(self.__group_1, self.__server_1) self.__options_2 = { "uuid" : _uuid.UUID("{aa45b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(1), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd, } uuid_server2 = MySQLServer.discover_uuid(self.__options_2["address"]) self.__options_2["uuid"] = _uuid.UUID(uuid_server2) self.__server_2 = MySQLServer(**self.__options_2) MySQLServer.add(self.__server_2) self.__server_2.connect() self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_2.exec_stmt("CREATE DATABASE db1") self.__server_2.exec_stmt("CREATE TABLE db1.t1" "(userID INT PRIMARY KEY, name VARCHAR(30))") for i in range(1, 1001): self.__server_2.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db2") self.__server_2.exec_stmt("CREATE DATABASE db2") self.__server_2.exec_stmt("CREATE TABLE db2.t2" "(userID INT, salary INT, " "CONSTRAINT FOREIGN KEY(userID) " "REFERENCES db1.t1(userID))") for i in range(1, 1001): self.__server_2.exec_stmt("INSERT INTO db2.t2 " "VALUES(%s, %s)" % (i, i)) self.__group_2 = Group("GROUPID2", "Second description.") Group.add(self.__group_2) self.__group_2.add_server(self.__server_2) tests.utils.configure_decoupled_master(self.__group_2, self.__server_2) self.__options_3 = { "uuid" : _uuid.UUID("{bb75b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(2), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd, } uuid_server3 = MySQLServer.discover_uuid(self.__options_3["address"]) self.__options_3["uuid"] = _uuid.UUID(uuid_server3) self.__server_3 = MySQLServer(**self.__options_3) MySQLServer.add( self.__server_3) self.__server_3.connect() self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_3.exec_stmt("CREATE DATABASE db1") self.__server_3.exec_stmt("CREATE TABLE db1.t1" "(userID INT PRIMARY KEY, name VARCHAR(30))") for i in range(1, 1001): self.__server_3.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db2") self.__server_3.exec_stmt("CREATE DATABASE db2") self.__server_3.exec_stmt("CREATE TABLE db2.t2" "(userID INT, salary INT, " "CONSTRAINT FOREIGN KEY(userID) " "REFERENCES db1.t1(userID))") for i in range(1, 1001): self.__server_3.exec_stmt("INSERT INTO db2.t2 " "VALUES(%s, %s)" % (i, i)) self.__group_3 = Group("GROUPID3", "Third description.") Group.add( self.__group_3) self.__group_3.add_server(self.__server_3) tests.utils.configure_decoupled_master(self.__group_3, self.__server_3) self.__options_4 = { "uuid" : _uuid.UUID("{bb45b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(3), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd, } uuid_server4 = MySQLServer.discover_uuid(self.__options_4["address"]) self.__options_4["uuid"] = _uuid.UUID(uuid_server4) self.__server_4 = MySQLServer(**self.__options_4) MySQLServer.add(self.__server_4) self.__server_4.connect() self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_4.exec_stmt("CREATE DATABASE db1") self.__server_4.exec_stmt("CREATE TABLE db1.t1" "(userID INT PRIMARY KEY, name VARCHAR(30))") for i in range(1, 1001): self.__server_4.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db2") self.__server_4.exec_stmt("CREATE DATABASE db2") self.__server_4.exec_stmt("CREATE TABLE db2.t2" "(userID INT, salary INT, " "CONSTRAINT FOREIGN KEY(userID) " "REFERENCES db1.t1(userID))") for i in range(1, 1001): self.__server_4.exec_stmt("INSERT INTO db2.t2 " "VALUES(%s, %s)" % (i, i)) self.__group_4 = Group("GROUPID4", "Fourth description.") Group.add( self.__group_4) self.__group_4.add_server(self.__server_4) tests.utils.configure_decoupled_master(self.__group_4, self.__server_4) self.__options_5 = { "uuid" : _uuid.UUID("{cc75b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(4), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd, } uuid_server5 = MySQLServer.discover_uuid(self.__options_5["address"]) self.__options_5["uuid"] = _uuid.UUID(uuid_server5) self.__server_5 = MySQLServer(**self.__options_5) MySQLServer.add(self.__server_5) self.__server_5.connect() self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_5.exec_stmt("CREATE DATABASE db1") self.__server_5.exec_stmt("CREATE TABLE db1.t1" "(userID INT PRIMARY KEY, name VARCHAR(30))") for i in range(1, 1001): self.__server_5.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db2") self.__server_5.exec_stmt("CREATE DATABASE db2") self.__server_5.exec_stmt("CREATE TABLE db2.t2" "(userID INT, salary INT, " "CONSTRAINT FOREIGN KEY(userID) " "REFERENCES db1.t1(userID))") for i in range(1, 1001): self.__server_5.exec_stmt("INSERT INTO db2.t2 " "VALUES(%s, %s)" % (i, i)) self.__group_5 = Group("GROUPID5", "Fifth description.") Group.add( self.__group_5) self.__group_5.add_server(self.__server_5) tests.utils.configure_decoupled_master(self.__group_5, self.__server_5) self.__options_6 = { "uuid" : _uuid.UUID("{cc45b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(5), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd, } uuid_server6 = MySQLServer.discover_uuid(self.__options_6["address"]) self.__options_6["uuid"] = _uuid.UUID(uuid_server6) self.__server_6 = MySQLServer(**self.__options_6) MySQLServer.add(self.__server_6) self.__group_6 = Group("GROUPID6", "Sixth description.") Group.add( self.__group_6) self.__group_6.add_server(self.__server_6) tests.utils.configure_decoupled_master(self.__group_6, self.__server_6) status = self.proxy.sharding.create_definition("HASH", "GROUPID1") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_define_shard_mapping).") self.assertEqual(status[2], 1) status = self.proxy.sharding.add_table(1, "db1.t1", "userID") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_add_shard_mapping).") status = self.proxy.sharding.add_table(1, "db2.t2", "userID") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_add_shard_mapping).") status = self.proxy.sharding.add_shard( 1, "GROUPID2,GROUPID3,GROUPID4,GROUPID5", "ENABLED" ) self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_add_shard).") status = self.proxy.sharding.prune_shard("db1.t1") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_prune_shard_tables).") def test_move_shard_1(self): '''Test the move of shard 1 and the global server configuration after that. The test moves shard 1 from GROUPID2 to GROUPID6. After the move is done, it verifies the count on GROUPID6 to check that the group has all the tuples from the earlier group. Now it fires an INSERT on the global group and verifies that all the shards have got the inserted tuples. GROUPID2 should not have received the values since it has had the shard moved away from it. ''' row_cnt_shard_before_move_db1_t1 = self.__server_2.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch" : True} ) row_cnt_shard_before_move_db2_t2 = self.__server_2.exec_stmt( "SELECT COUNT(*) FROM db2.t2", {"fetch" : True} ) row_cnt_shard_before_move_db1_t1 = \ int(row_cnt_shard_before_move_db1_t1[0][0]) row_cnt_shard_before_move_db2_t2 = \ int(row_cnt_shard_before_move_db2_t2[0][0]) status = self.proxy.sharding.move_shard("1", "GROUPID6") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_setup_resharding_switch).") self.__server_6.connect() row_cnt_shard_after_move_db1_t1 = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch" : True} ) row_cnt_shard_after_move_db2_t2 = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM db2.t2", {"fetch" : True} ) row_cnt_shard_after_move_db1_t1 = \ int(row_cnt_shard_after_move_db1_t1[0][0]) row_cnt_shard_after_move_db2_t2 = \ int(row_cnt_shard_after_move_db2_t2[0][0]) self.assertTrue( row_cnt_shard_before_move_db1_t1 == row_cnt_shard_after_move_db1_t1 ) self.assertTrue( row_cnt_shard_before_move_db2_t2 == row_cnt_shard_after_move_db2_t2 ) #Enter data into the global server self.__server_1.exec_stmt("DROP DATABASE IF EXISTS global") self.__server_1.exec_stmt("CREATE DATABASE global") self.__server_1.exec_stmt("CREATE TABLE global.global_table" "(userID INT, name VARCHAR(30))") for i in range(1, 11): self.__server_1.exec_stmt("INSERT INTO global.global_table " "VALUES(%s, 'TEST %s')" % (i, i)) time.sleep(5) try: #Verify that the data is not there in the first shard. global_table_count = self.__server_2.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) raise Exception("Server should not be connected to global server") except _errors.DatabaseError: pass #Verify that the data is there in the second shard. global_table_count = self.__server_3.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the third shard. global_table_count = self.__server_4.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the fourth shard. global_table_count = self.__server_5.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the fifth shard. global_table_count = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) def test_move_shard_2(self): '''Test the move of shard 2 and the global server configuration after that. The test moves shard 2 from GROUPID3 to GROUPID6. After the move is done, it verifies the count on GROUPID6 to check that the group has all the tuples from the earlier group. Now it fires an INSERT on the global group and verifies that all the shards have got the inserted tuples. GROUPID3 should not have received the values since it has had the shard moved away from it. ''' row_cnt_shard_before_move_db1_t1 = self.__server_3.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch" : True} ) row_cnt_shard_before_move_db2_t2 = self.__server_3.exec_stmt( "SELECT COUNT(*) FROM db2.t2", {"fetch" : True} ) row_cnt_shard_before_move_db1_t1 = \ int(row_cnt_shard_before_move_db1_t1[0][0]) row_cnt_shard_before_move_db2_t2 = \ int(row_cnt_shard_before_move_db2_t2[0][0]) status = self.proxy.sharding.move_shard("2", "GROUPID6") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_setup_resharding_switch).") self.__server_6.connect() row_cnt_shard_after_move_db1_t1 = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch" : True} ) row_cnt_shard_after_move_db2_t2 = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM db2.t2", {"fetch" : True} ) row_cnt_shard_after_move_db1_t1 = \ int(row_cnt_shard_after_move_db1_t1[0][0]) row_cnt_shard_after_move_db2_t2 = \ int(row_cnt_shard_after_move_db2_t2[0][0]) self.assertTrue( row_cnt_shard_before_move_db1_t1 == row_cnt_shard_after_move_db1_t1 ) self.assertTrue( row_cnt_shard_before_move_db2_t2 == row_cnt_shard_after_move_db2_t2 ) #Enter data into the global server self.__server_1.exec_stmt("DROP DATABASE IF EXISTS global") self.__server_1.exec_stmt("CREATE DATABASE global") self.__server_1.exec_stmt("CREATE TABLE global.global_table" "(userID INT, name VARCHAR(30))") for i in range(1, 11): self.__server_1.exec_stmt("INSERT INTO global.global_table " "VALUES(%s, 'TEST %s')" % (i, i)) time.sleep(5) try: #Verify that the data is not there in the second shard. global_table_count = self.__server_3.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) raise Exception("Server should not be connected to global server") except _errors.DatabaseError: pass #Verify that the data is there in the first shard. global_table_count = self.__server_2.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the third shard. global_table_count = self.__server_4.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the fourth shard. global_table_count = self.__server_5.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the fifth shard. global_table_count = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) def test_move_shard_3(self): '''Test the move of shard 3 and the global server configuration after that. The test moves shard 3 from GROUPID4 to GROUPID6. After the move is done, it verifies the count on GROUPID6 to check that the group has all the tuples from the earlier group. Now it fires an INSERT on the global group and verifies that all the shards have got the inserted tuples. GROUPID4 should not have received the values since it has had the shard moved away from it. ''' row_cnt_shard_before_move_db1_t1 = self.__server_4.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch" : True} ) row_cnt_shard_before_move_db2_t2 = self.__server_4.exec_stmt( "SELECT COUNT(*) FROM db2.t2", {"fetch" : True} ) row_cnt_shard_before_move_db1_t1 = \ int(row_cnt_shard_before_move_db1_t1[0][0]) row_cnt_shard_before_move_db2_t2 = \ int(row_cnt_shard_before_move_db2_t2[0][0]) status = self.proxy.sharding.move_shard("3", "GROUPID6") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_setup_resharding_switch).") self.__server_6.connect() row_cnt_shard_after_move_db1_t1 = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch" : True} ) row_cnt_shard_after_move_db2_t2 = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM db2.t2", {"fetch" : True} ) row_cnt_shard_after_move_db1_t1 = \ int(row_cnt_shard_after_move_db1_t1[0][0]) row_cnt_shard_after_move_db2_t2 = \ int(row_cnt_shard_after_move_db2_t2[0][0]) self.assertTrue( row_cnt_shard_before_move_db1_t1 == row_cnt_shard_after_move_db1_t1 ) self.assertTrue( row_cnt_shard_before_move_db2_t2 == row_cnt_shard_after_move_db2_t2 ) #Enter data into the global server self.__server_1.exec_stmt("DROP DATABASE IF EXISTS global") self.__server_1.exec_stmt("CREATE DATABASE global") self.__server_1.exec_stmt("CREATE TABLE global.global_table" "(userID INT, name VARCHAR(30))") for i in range(1, 11): self.__server_1.exec_stmt("INSERT INTO global.global_table " "VALUES(%s, 'TEST %s')" % (i, i)) time.sleep(5) try: #Verify that the data is not there in the third shard's #original group. global_table_count = self.__server_4.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) raise Exception("Server should not be connected to global server") except _errors.DatabaseError: pass #Verify that the data is there in the first shard. global_table_count = self.__server_2.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the second shard. global_table_count = self.__server_3.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the fourth shard. global_table_count = self.__server_5.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the third shard #new group. global_table_count = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) def test_move_shard_4(self): '''Test the move of shard 4 and the global server configuration after that. The test moves shard 4 from GROUPID5 to GROUPID6. After the move is done, it verifies the count on GROUPID6 to check that the group has all the tuples from the earlier group. Now it fires an INSERT on the global group and verifies that all the shards have got the inserted tuples. GROUPID5 should not have received the values since it has had the shard moved away from it. ''' row_cnt_shard_before_move_db1_t1 = self.__server_5.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch" : True} ) row_cnt_shard_before_move_db2_t2 = self.__server_5.exec_stmt( "SELECT COUNT(*) FROM db2.t2", {"fetch" : True} ) row_cnt_shard_before_move_db1_t1 = \ int(row_cnt_shard_before_move_db1_t1[0][0]) row_cnt_shard_before_move_db2_t2 = \ int(row_cnt_shard_before_move_db2_t2[0][0]) status = self.proxy.sharding.move_shard("4", "GROUPID6") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_setup_resharding_switch).") self.__server_6.connect() row_cnt_shard_after_move_db1_t1 = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch" : True} ) row_cnt_shard_after_move_db2_t2 = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM db2.t2", {"fetch" : True} ) row_cnt_shard_after_move_db1_t1 = \ int(row_cnt_shard_after_move_db1_t1[0][0]) row_cnt_shard_after_move_db2_t2 = \ int(row_cnt_shard_after_move_db2_t2[0][0]) self.assertTrue( row_cnt_shard_before_move_db1_t1 == row_cnt_shard_after_move_db1_t1 ) self.assertTrue( row_cnt_shard_before_move_db2_t2 == row_cnt_shard_after_move_db2_t2 ) #Enter data into the global server self.__server_1.exec_stmt("DROP DATABASE IF EXISTS global") self.__server_1.exec_stmt("CREATE DATABASE global") self.__server_1.exec_stmt("CREATE TABLE global.global_table" "(userID INT, name VARCHAR(30))") for i in range(1, 11): self.__server_1.exec_stmt("INSERT INTO global.global_table " "VALUES(%s, 'TEST %s')" % (i, i)) time.sleep(5) try: #Verify that the data is not there in the fourth shard's #original group. global_table_count = self.__server_5.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) raise Exception("Server should not be connected to global server") except _errors.DatabaseError: pass #Verify that the data is there in the first shard. global_table_count = self.__server_2.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the second shard. global_table_count = self.__server_3.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the third shard. global_table_count = self.__server_4.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the fourth shard's new #group. global_table_count = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) def tearDown(self): """Clean up the existing environment """ self.proxy.sharding.disable_shard(1) self.proxy.sharding.remove_shard(1) self.proxy.sharding.disable_shard(2) self.proxy.sharding.remove_shard(2) self.proxy.sharding.disable_shard(3) self.proxy.sharding.remove_shard(3) self.proxy.sharding.disable_shard(4) self.proxy.sharding.remove_shard(4) self.proxy.sharding.disable_shard(5) self.proxy.sharding.remove_shard(5) tests.utils.cleanup_environment() tests.utils.teardown_xmlrpc(self.manager, self.proxy)
class TestShardingPrune(unittest.TestCase): def assertStatus(self, status, expect): items = (item['diagnosis'] for item in status[1] if item['diagnosis']) self.assertEqual(status[1][-1]["success"], expect, "\n".join(items)) def setUp(self): """Creates the following topology for testing, GROUPID1 - localhost:13001, localhost:13002 - Global Group GROUPID2 - localhost:13003, localhost:13004 - shard 1 GROUPID3 - localhost:13005, localhost:13006 - shard 2 """ self.manager, self.proxy = tests.utils.setup_xmlrpc() self.__options_1 = { "uuid" : _uuid.UUID("{aa75b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(0), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd } uuid_server1 = MySQLServer.discover_uuid(self.__options_1["address"]) self.__options_1["uuid"] = _uuid.UUID(uuid_server1) self.__server_1 = MySQLServer(**self.__options_1) MySQLServer.add(self.__server_1) self.__group_1 = Group("GROUPID1", "First description.") Group.add(self.__group_1) self.__group_1.add_server(self.__server_1) tests.utils.configure_decoupled_master(self.__group_1, self.__server_1) self.__options_2 = { "uuid" : _uuid.UUID("{aa45b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(1), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd } uuid_server2 = MySQLServer.discover_uuid(self.__options_2["address"]) self.__options_2["uuid"] = _uuid.UUID(uuid_server2) self.__server_2 = MySQLServer(**self.__options_2) MySQLServer.add(self.__server_2) self.__server_2.connect() self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_2.exec_stmt("CREATE DATABASE db1") self.__server_2.exec_stmt("CREATE TABLE db1.t1" "(userID INT, name VARCHAR(30))") for i in range(1, 601): self.__server_2.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__group_2 = Group("GROUPID2", "Second description.") Group.add(self.__group_2) self.__group_2.add_server(self.__server_2) tests.utils.configure_decoupled_master(self.__group_2, self.__server_2) self.__options_3 = { "uuid" : _uuid.UUID("{bb75b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(2), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd } uuid_server3 = MySQLServer.discover_uuid(self.__options_3["address"]) self.__options_3["uuid"] = _uuid.UUID(uuid_server3) self.__server_3 = MySQLServer(**self.__options_3) MySQLServer.add( self.__server_3) self.__server_3.connect() self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_3.exec_stmt("CREATE DATABASE db1") self.__server_3.exec_stmt("CREATE TABLE db1.t1" "(userID INT, name VARCHAR(30))") for i in range(1, 601): self.__server_3.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__group_3 = Group("GROUPID3", "Third description.") Group.add( self.__group_3) self.__group_3.add_server(self.__server_3) tests.utils.configure_decoupled_master(self.__group_3, self.__server_3) self.__options_4 = { "uuid" : _uuid.UUID("{bb45b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(3), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd } uuid_server4 = MySQLServer.discover_uuid(self.__options_4["address"]) self.__options_4["uuid"] = _uuid.UUID(uuid_server4) self.__server_4 = MySQLServer(**self.__options_4) MySQLServer.add(self.__server_4) self.__server_4.connect() self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_4.exec_stmt("CREATE DATABASE db1") self.__server_4.exec_stmt("CREATE TABLE db1.t1" "(userID INT, name VARCHAR(30))") for i in range(1, 601): self.__server_4.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__group_4 = Group("GROUPID4", "Fourth description.") Group.add( self.__group_4) self.__group_4.add_server(self.__server_4) tests.utils.configure_decoupled_master(self.__group_4, self.__server_4) self.__options_5 = { "uuid" : _uuid.UUID("{cc75b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(4), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd } uuid_server5 = MySQLServer.discover_uuid(self.__options_5["address"]) self.__options_5["uuid"] = _uuid.UUID(uuid_server5) self.__server_5 = MySQLServer(**self.__options_5) MySQLServer.add(self.__server_5) self.__server_5.connect() self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_5.exec_stmt("CREATE DATABASE db1") self.__server_5.exec_stmt("CREATE TABLE db1.t1" "(userID INT, name VARCHAR(30))") for i in range(1, 601): self.__server_5.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__group_5 = Group("GROUPID5", "Fifth description.") Group.add( self.__group_5) self.__group_5.add_server(self.__server_5) tests.utils.configure_decoupled_master(self.__group_5, self.__server_5) self.__options_6 = { "uuid" : _uuid.UUID("{cc45b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(5), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd } uuid_server6 = MySQLServer.discover_uuid(self.__options_6["address"]) self.__options_6["uuid"] = _uuid.UUID(uuid_server6) self.__server_6 = MySQLServer(**self.__options_6) MySQLServer.add(self.__server_6) self.__server_6.connect() self.__server_6.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_6.exec_stmt("CREATE DATABASE db1") self.__server_6.exec_stmt("CREATE TABLE db1.t1" "(userID INT, name VARCHAR(30))") for i in range(1, 601): self.__server_6.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__group_6 = Group("GROUPID6", "Sixth description.") Group.add( self.__group_6) self.__group_6.add_server(self.__server_6) tests.utils.configure_decoupled_master(self.__group_6, self.__server_6) status = self.proxy.sharding.create_definition("RANGE", "GROUPID1") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_define_shard_mapping).") self.assertEqual(status[2], 1) status = self.proxy.sharding.add_table(1, "db1.t1", "userID") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_add_shard_mapping).") status = self.proxy.sharding.add_shard( 1, "GROUPID2/1,GROUPID3/101,GROUPID4/201," "GROUPID5/301,GROUPID6/401", "ENABLED" ) self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_add_shard).") def test_prune_shard(self): status = self.proxy.sharding.prune_shard("db1.t1") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_prune_shard_tables).") status = self.proxy.sharding.lookup_servers("db1.t1", 1, "LOCAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] shard_uuid = obtained_server_list[0][0] shard_server = MySQLServer.fetch(shard_uuid) shard_server.connect() rows = shard_server.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch" : True}) self.assertTrue(int(rows[0][0]) == 100) rows = shard_server.exec_stmt( "SELECT MAX(userID) FROM db1.t1", {"fetch" : True}) self.assertTrue(int(rows[0][0]) == 100) rows = shard_server.exec_stmt( "SELECT MIN(userID) FROM db1.t1", {"fetch" : True}) self.assertTrue(int(rows[0][0]) == 1) status = self.proxy.sharding.lookup_servers("db1.t1", 101, "LOCAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] shard_uuid = obtained_server_list[0][0] shard_server = MySQLServer.fetch(shard_uuid) shard_server.connect() rows = shard_server.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch" : True}) self.assertTrue(int(rows[0][0]) == 100) rows = shard_server.exec_stmt( "SELECT MAX(userID) FROM db1.t1", {"fetch" : True}) self.assertTrue(int(rows[0][0]) == 200) rows = shard_server.exec_stmt( "SELECT MIN(userID) FROM db1.t1", {"fetch" : True}) self.assertTrue(int(rows[0][0]) == 101) status = self.proxy.sharding.lookup_servers("db1.t1", 202, "LOCAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] shard_uuid = obtained_server_list[0][0] shard_server = MySQLServer.fetch(shard_uuid) shard_server.connect() rows = shard_server.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch" : True}) self.assertTrue(int(rows[0][0]) == 100) rows = shard_server.exec_stmt( "SELECT MAX(userID) FROM db1.t1", {"fetch" : True}) self.assertTrue(int(rows[0][0]) == 300) rows = shard_server.exec_stmt( "SELECT MIN(userID) FROM db1.t1", {"fetch" : True}) self.assertTrue(int(rows[0][0]) == 201) status = self.proxy.sharding.lookup_servers("db1.t1", 303, "LOCAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] shard_uuid = obtained_server_list[0][0] shard_server = MySQLServer.fetch(shard_uuid) shard_server.connect() rows = shard_server.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch" : True}) self.assertTrue(int(rows[0][0]) == 100) rows = shard_server.exec_stmt( "SELECT MAX(userID) FROM db1.t1", {"fetch" : True}) self.assertTrue(int(rows[0][0]) == 400) rows = shard_server.exec_stmt( "SELECT MIN(userID) FROM db1.t1", {"fetch" : True}) self.assertTrue(int(rows[0][0]) == 301) status = self.proxy.sharding.lookup_servers("db1.t1", 404, "LOCAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] shard_uuid = obtained_server_list[0][0] shard_server = MySQLServer.fetch(shard_uuid) shard_server.connect() rows = shard_server.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch" : True}) self.assertTrue(int(rows[0][0]) == 200) rows = shard_server.exec_stmt( "SELECT MAX(userID) FROM db1.t1", {"fetch" : True}) self.assertTrue(int(rows[0][0]) == 600) rows = shard_server.exec_stmt( "SELECT MIN(userID) FROM db1.t1", {"fetch" : True}) self.assertTrue(int(rows[0][0]) == 401) def tearDown(self): self.__server_2.exec_stmt("DROP TABLE db1.t1") self.__server_2.exec_stmt("DROP DATABASE db1") self.__server_3.exec_stmt("DROP TABLE db1.t1") self.__server_3.exec_stmt("DROP DATABASE db1") self.__server_4.exec_stmt("DROP TABLE db1.t1") self.__server_4.exec_stmt("DROP DATABASE db1") self.__server_5.exec_stmt("DROP TABLE db1.t1") self.__server_5.exec_stmt("DROP DATABASE db1") self.__server_6.exec_stmt("DROP TABLE db1.t1") self.__server_6.exec_stmt("DROP DATABASE db1") status = self.proxy.sharding.disable_shard("1") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_disable_shard).") status = self.proxy.sharding.disable_shard("2") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_disable_shard).") status = self.proxy.sharding.disable_shard("3") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_disable_shard).") status = self.proxy.sharding.disable_shard("4") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_disable_shard).") status = self.proxy.sharding.disable_shard("5") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_disable_shard).") status = self.proxy.sharding.remove_shard("1") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_remove_shard).") status = self.proxy.sharding.remove_shard("2") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_remove_shard).") status = self.proxy.sharding.remove_shard("3") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_remove_shard).") status = self.proxy.sharding.remove_shard("4") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_remove_shard).") status = self.proxy.sharding.remove_shard("5") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_remove_shard).") status = self.proxy.sharding.remove_table("db1.t1") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_remove_shard_mapping).") status = self.proxy.sharding.remove_definition("1") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_remove_shard_mapping_defn).") self.proxy.group.demote("GROUPID1") self.proxy.group.demote("GROUPID2") self.proxy.group.demote("GROUPID3") self.proxy.group.demote("GROUPID4") self.proxy.group.demote("GROUPID5") self.proxy.group.demote("GROUPID6") for group_id in ("GROUPID1", "GROUPID2", "GROUPID3", "GROUPID4", "GROUPID5", "GROUPID6"): status = self.proxy.group.lookup_servers(group_id) self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] status = \ self.proxy.group.remove( group_id, obtained_server_list[0]["server_uuid"] ) self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_remove_server).") status = self.proxy.group.destroy(group_id) self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_destroy_group).") tests.utils.cleanup_environment() tests.utils.teardown_xmlrpc(self.manager, self.proxy)
def _setup_shard_switch_split(shard_id, source_group_id, destination_group_id, split_value, prune_limit, cmd, update_only): """Setup the moved shard to map to the new group. :param shard_id: The shard ID of the shard that needs to be moved. :param source_group_id: The group_id of the source shard. :param destn_group_id: The ID of the group to which the shard needs to be moved. :param split_value: Indicates the value at which the range for the particular shard will be split. Will be set only for shard split operations. :param prune_limit: The number of DELETEs that should be done in one batch. :param cmd: Indicates the type of re-sharding operation. :update_only: Only update the state store and skip provisioning. """ #Fetch the Range sharding specification. range_sharding_spec, source_shard, shard_mappings, shard_mapping_defn = \ _services_sharding.verify_and_fetch_shard(shard_id) #Disable the old shard source_shard.disable() #Remove the old shard. range_sharding_spec.remove() source_shard.remove() destination_group = Group.fetch(destination_group_id) if destination_group is None: raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND % (destination_group_id, )) destn_group_master = MySQLServer.fetch(destination_group.master) if destn_group_master is None: raise _errors.ShardingError( _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND) destn_group_master.connect() #Make the destination group as read only to disable updates until the #connectors update their caches, thus avoiding inconsistency. destn_group_master.read_only = True #Add the new shards. Generate new shard IDs for the shard being #split and also for the shard that is created as a result of the split. new_shard_1 = Shards.add(source_shard.group_id, "DISABLED") new_shard_2 = Shards.add(destination_group_id, "DISABLED") #Both of the shard mappings associated with this shard_id should #be of the same sharding type. Hence it is safe to use one of the #shard mappings. if shard_mappings[0].type_name == "HASH": #In the case of a split involving a HASH sharding scheme, #the shard that is split gets a new shard_id, while the split #gets the new computed lower_bound and also a new shard id. #NOTE: How the shard that is split retains its lower_bound. HashShardingSpecification.add_hash_split( range_sharding_spec.shard_mapping_id, new_shard_1.shard_id, range_sharding_spec.lower_bound ) HashShardingSpecification.add_hash_split( range_sharding_spec.shard_mapping_id, new_shard_2.shard_id, split_value ) else: #Add the new ranges. Note that the shard being split retains #its lower_bound, while the new shard gets the computed, #lower_bound. RangeShardingSpecification.add( range_sharding_spec.shard_mapping_id, range_sharding_spec.lower_bound, new_shard_1.shard_id ) RangeShardingSpecification.add( range_sharding_spec.shard_mapping_id, split_value, new_shard_2.shard_id ) #The sleep ensures that the connector have refreshed their caches with the #new shards that have been added as a result of the split. time.sleep(_utils.TTL) #The source shard group master would have been marked as read only #during the sync. Remove the read_only flag. source_group = Group.fetch(source_group_id) if source_group is None: raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND % (source_group_id, )) source_group_master = MySQLServer.fetch(source_group.master) if source_group_master is None: raise _errors.ShardingError( _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND) source_group_master.connect() #Kill all the existing connections on the servers source_group.kill_connections_on_servers() #Allow connections on the source group master source_group_master.read_only = False #Allow connections on the destination group master destn_group_master.read_only = False #Setup replication for the new group from the global server _group_replication.setup_group_replication \ (shard_mapping_defn[2], destination_group_id) #Enable the split shards new_shard_1.enable() new_shard_2.enable() #Trigger changing the mappings for the shard that was copied if not update_only: _events.trigger_within_procedure( PRUNE_SHARDS, new_shard_1.shard_id, new_shard_2.shard_id, prune_limit )
def _run(self): """Function that verifies servers' availabilities. """ from mysql.fabric.server import ( Group, MySQLServer, ConnectionManager, ) ignored_status = [MySQLServer.FAULTY] quarantine = {} interval = FailureDetector._DETECTION_INTERVAL detections = FailureDetector._DETECTIONS detection_timeout = FailureDetector._DETECTION_TIMEOUT connection_manager = ConnectionManager() slave_deep_checks = FailureDetector._SLAVE_DEEP_CHECKS _persistence.init_thread() while self.__check: try: unreachable = set() group = Group.fetch(self.__group_id) if group is not None: for server in group.servers(): if server.status in ignored_status: ### Server is FAULTY connection_manager.kill_connections(server) continue else: ### Server is Not FAULTY if MySQLServer.is_alive(server, detection_timeout): ### Server is alive ### check depends on `slave_deep_checks` parameter if slave_deep_checks: ### When server is alive and status != FAULTY is_master= (group.master == server.uuid) if not is_master: ### Checking master is dead or alive. master_server = MySQLServer.fetch(group.master) if MySQLServer.is_alive(master_server, detection_timeout): ### Checking is replication valid or not if master is alive. server.connect() slave_issues, why_slave_issues = \ _replication.check_slave_issues(server) if slave_issues: if (why_slave_issues['io_error'] and \ why_slave_issues['io_errno'] == 2003): ### Nothing to do during reconnecting, just logging _LOGGER.info(why_slave_issues) else: ### If slave threads are not running, set status to SPARE server.status = MySQLServer.SPARE ### Done slave_issues. server.disconnect() ### Endif MySQLServer.is_alive(master_server, detection_timeout) ### Endif not is_master ### Endif slave_deep_checks continue ### Else MySQLServer.is_alive(server, detection_timeout) else: unreachable.add(server.uuid) _LOGGER.warning( "Server (%s) in group (%s) is unreachable.", server.uuid, self.__group_id ) unstable = False failed_attempts = 0 if server.uuid not in quarantine: quarantine[server.uuid] = failed_attempts = 1 else: failed_attempts = quarantine[server.uuid] + 1 quarantine[server.uuid] = failed_attempts if failed_attempts >= detections: unstable = True can_set_faulty = group.can_set_server_faulty( server, get_time() ) if unstable and can_set_faulty: # We have to make this transactional and make the # failover (i.e. report failure) robust to failures. # Otherwise, a master might be set to faulty and # a new one never promoted. server.status = MySQLServer.FAULTY connection_manager.kill_connections(server) procedures = trigger("REPORT_FAILURE", None, str(server.uuid), threading.current_thread().name, MySQLServer.FAULTY, False ) executor = _executor.Executor() for procedure in procedures: executor.wait_for_procedure(procedure) ### Endif MySQLServer.is_alive(server, detection_timeout) ### Endif server.status in ignored_status ### End for server in group.servers() ### Endif group is not None for uuid in quarantine.keys(): if uuid not in unreachable: del quarantine[uuid] except (_errors.ExecutorError, _errors.DatabaseError): pass except Exception as error: _LOGGER.exception(error) time.sleep(interval) _persistence.deinit_thread()
class TestHashMoveGlobal(tests.utils.TestCase): """Contains unit tests for testing the shard move operation and for verifying that the global server configuration remains constant after the shard move configuration. """ def setUp(self): """Configure the existing environment """ tests.utils.cleanup_environment() self.manager, self.proxy = tests.utils.setup_xmlrpc() self.__options_1 = { "uuid" : _uuid.UUID("{aa75b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(0), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd, } uuid_server1 = MySQLServer.discover_uuid(self.__options_1["address"]) self.__options_1["uuid"] = _uuid.UUID(uuid_server1) self.__server_1 = MySQLServer(**self.__options_1) MySQLServer.add(self.__server_1) self.__server_1.connect() self.__group_1 = Group("GROUPID1", "First description.") Group.add(self.__group_1) self.__group_1.add_server(self.__server_1) tests.utils.configure_decoupled_master(self.__group_1, self.__server_1) self.__options_2 = { "uuid" : _uuid.UUID("{aa45b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(1), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd, } uuid_server2 = MySQLServer.discover_uuid(self.__options_2["address"]) self.__options_2["uuid"] = _uuid.UUID(uuid_server2) self.__server_2 = MySQLServer(**self.__options_2) MySQLServer.add(self.__server_2) self.__server_2.connect() self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_2.exec_stmt("CREATE DATABASE db1") self.__server_2.exec_stmt("CREATE TABLE db1.t1" "(userID INT PRIMARY KEY, name VARCHAR(30))") for i in range(1, 1001): self.__server_2.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db2") self.__server_2.exec_stmt("CREATE DATABASE db2") self.__server_2.exec_stmt("CREATE TABLE db2.t2" "(userID INT, salary INT, " "CONSTRAINT FOREIGN KEY(userID) " "REFERENCES db1.t1(userID))") for i in range(1, 1001): self.__server_2.exec_stmt("INSERT INTO db2.t2 " "VALUES(%s, %s)" % (i, i)) self.__group_2 = Group("GROUPID2", "Second description.") Group.add(self.__group_2) self.__group_2.add_server(self.__server_2) tests.utils.configure_decoupled_master(self.__group_2, self.__server_2) self.__options_3 = { "uuid" : _uuid.UUID("{bb75b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(2), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd, } uuid_server3 = MySQLServer.discover_uuid(self.__options_3["address"]) self.__options_3["uuid"] = _uuid.UUID(uuid_server3) self.__server_3 = MySQLServer(**self.__options_3) MySQLServer.add( self.__server_3) self.__server_3.connect() self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_3.exec_stmt("CREATE DATABASE db1") self.__server_3.exec_stmt("CREATE TABLE db1.t1" "(userID INT PRIMARY KEY, name VARCHAR(30))") for i in range(1, 1001): self.__server_3.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db2") self.__server_3.exec_stmt("CREATE DATABASE db2") self.__server_3.exec_stmt("CREATE TABLE db2.t2" "(userID INT, salary INT, " "CONSTRAINT FOREIGN KEY(userID) " "REFERENCES db1.t1(userID))") for i in range(1, 1001): self.__server_3.exec_stmt("INSERT INTO db2.t2 " "VALUES(%s, %s)" % (i, i)) self.__group_3 = Group("GROUPID3", "Third description.") Group.add( self.__group_3) self.__group_3.add_server(self.__server_3) tests.utils.configure_decoupled_master(self.__group_3, self.__server_3) self.__options_4 = { "uuid" : _uuid.UUID("{bb45b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(3), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd, } uuid_server4 = MySQLServer.discover_uuid(self.__options_4["address"]) self.__options_4["uuid"] = _uuid.UUID(uuid_server4) self.__server_4 = MySQLServer(**self.__options_4) MySQLServer.add(self.__server_4) self.__server_4.connect() self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_4.exec_stmt("CREATE DATABASE db1") self.__server_4.exec_stmt("CREATE TABLE db1.t1" "(userID INT PRIMARY KEY, name VARCHAR(30))") for i in range(1, 1001): self.__server_4.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db2") self.__server_4.exec_stmt("CREATE DATABASE db2") self.__server_4.exec_stmt("CREATE TABLE db2.t2" "(userID INT, salary INT, " "CONSTRAINT FOREIGN KEY(userID) " "REFERENCES db1.t1(userID))") for i in range(1, 1001): self.__server_4.exec_stmt("INSERT INTO db2.t2 " "VALUES(%s, %s)" % (i, i)) self.__group_4 = Group("GROUPID4", "Fourth description.") Group.add( self.__group_4) self.__group_4.add_server(self.__server_4) tests.utils.configure_decoupled_master(self.__group_4, self.__server_4) self.__options_5 = { "uuid" : _uuid.UUID("{cc75b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(4), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd, } uuid_server5 = MySQLServer.discover_uuid(self.__options_5["address"]) self.__options_5["uuid"] = _uuid.UUID(uuid_server5) self.__server_5 = MySQLServer(**self.__options_5) MySQLServer.add(self.__server_5) self.__server_5.connect() self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_5.exec_stmt("CREATE DATABASE db1") self.__server_5.exec_stmt("CREATE TABLE db1.t1" "(userID INT PRIMARY KEY, name VARCHAR(30))") for i in range(1, 1001): self.__server_5.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db2") self.__server_5.exec_stmt("CREATE DATABASE db2") self.__server_5.exec_stmt("CREATE TABLE db2.t2" "(userID INT, salary INT, " "CONSTRAINT FOREIGN KEY(userID) " "REFERENCES db1.t1(userID))") for i in range(1, 1001): self.__server_5.exec_stmt("INSERT INTO db2.t2 " "VALUES(%s, %s)" % (i, i)) self.__group_5 = Group("GROUPID5", "Fifth description.") Group.add( self.__group_5) self.__group_5.add_server(self.__server_5) tests.utils.configure_decoupled_master(self.__group_5, self.__server_5) self.__options_6 = { "uuid" : _uuid.UUID("{cc45b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(5), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd, } uuid_server6 = MySQLServer.discover_uuid(self.__options_6["address"]) self.__options_6["uuid"] = _uuid.UUID(uuid_server6) self.__server_6 = MySQLServer(**self.__options_6) MySQLServer.add(self.__server_6) self.__group_6 = Group("GROUPID6", "Sixth description.") Group.add( self.__group_6) self.__group_6.add_server(self.__server_6) tests.utils.configure_decoupled_master(self.__group_6, self.__server_6) status = self.proxy.sharding.create_definition("HASH", "GROUPID1") self.check_xmlrpc_command_result(status, returns=1) status = self.proxy.sharding.add_table(1, "db1.t1", "userID") self.check_xmlrpc_command_result(status) status = self.proxy.sharding.add_table(1, "db2.t2", "userID") self.check_xmlrpc_command_result(status) status = self.proxy.sharding.add_shard( 1, "GROUPID2,GROUPID3,GROUPID4,GROUPID5", "ENABLED" ) self.check_xmlrpc_command_result(status) status = self.proxy.sharding.prune_shard("db1.t1") self.check_xmlrpc_command_result(status) def test_move_shard_1(self): '''Test the move of shard 1 and the global server configuration after that. The test moves shard 1 from GROUPID2 to GROUPID6. After the move is done, it verifies the count on GROUPID6 to check that the group has all the tuples from the earlier group. Now it fires an INSERT on the global group and verifies that all the shards have got the inserted tuples. GROUPID2 should not have received the values since it has had the shard moved away from it. ''' row_cnt_shard_before_move_db1_t1 = self.__server_2.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch" : True} ) row_cnt_shard_before_move_db2_t2 = self.__server_2.exec_stmt( "SELECT COUNT(*) FROM db2.t2", {"fetch" : True} ) row_cnt_shard_before_move_db1_t1 = \ int(row_cnt_shard_before_move_db1_t1[0][0]) row_cnt_shard_before_move_db2_t2 = \ int(row_cnt_shard_before_move_db2_t2[0][0]) status = self.proxy.sharding.move_shard("1", "GROUPID6") self.check_xmlrpc_command_result(status) self.__server_6.connect() row_cnt_shard_after_move_db1_t1 = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch" : True} ) row_cnt_shard_after_move_db2_t2 = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM db2.t2", {"fetch" : True} ) row_cnt_shard_after_move_db1_t1 = \ int(row_cnt_shard_after_move_db1_t1[0][0]) row_cnt_shard_after_move_db2_t2 = \ int(row_cnt_shard_after_move_db2_t2[0][0]) self.assertTrue( row_cnt_shard_before_move_db1_t1 == row_cnt_shard_after_move_db1_t1 ) self.assertTrue( row_cnt_shard_before_move_db2_t2 == row_cnt_shard_after_move_db2_t2 ) #Enter data into the global server self.__server_1.exec_stmt("DROP DATABASE IF EXISTS global") self.__server_1.exec_stmt("CREATE DATABASE global") self.__server_1.exec_stmt("CREATE TABLE global.global_table" "(userID INT, name VARCHAR(30))") for i in range(1, 11): self.__server_1.exec_stmt("INSERT INTO global.global_table " "VALUES(%s, 'TEST %s')" % (i, i)) time.sleep(5) try: #Verify that the data is not there in the first shard. global_table_count = self.__server_2.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) raise Exception("Server should not be connected to global server") except _errors.DatabaseError: pass #Verify that the data is there in the second shard. global_table_count = self.__server_3.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the third shard. global_table_count = self.__server_4.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the fourth shard. global_table_count = self.__server_5.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the fifth shard. global_table_count = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) def test_move_shard_2(self): '''Test the move of shard 2 and the global server configuration after that. The test moves shard 2 from GROUPID3 to GROUPID6. After the move is done, it verifies the count on GROUPID6 to check that the group has all the tuples from the earlier group. Now it fires an INSERT on the global group and verifies that all the shards have got the inserted tuples. GROUPID3 should not have received the values since it has had the shard moved away from it. ''' row_cnt_shard_before_move_db1_t1 = self.__server_3.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch" : True} ) row_cnt_shard_before_move_db2_t2 = self.__server_3.exec_stmt( "SELECT COUNT(*) FROM db2.t2", {"fetch" : True} ) row_cnt_shard_before_move_db1_t1 = \ int(row_cnt_shard_before_move_db1_t1[0][0]) row_cnt_shard_before_move_db2_t2 = \ int(row_cnt_shard_before_move_db2_t2[0][0]) status = self.proxy.sharding.move_shard("2", "GROUPID6") self.check_xmlrpc_command_result(status) self.__server_6.connect() row_cnt_shard_after_move_db1_t1 = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch" : True} ) row_cnt_shard_after_move_db2_t2 = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM db2.t2", {"fetch" : True} ) row_cnt_shard_after_move_db1_t1 = \ int(row_cnt_shard_after_move_db1_t1[0][0]) row_cnt_shard_after_move_db2_t2 = \ int(row_cnt_shard_after_move_db2_t2[0][0]) self.assertTrue( row_cnt_shard_before_move_db1_t1 == row_cnt_shard_after_move_db1_t1 ) self.assertTrue( row_cnt_shard_before_move_db2_t2 == row_cnt_shard_after_move_db2_t2 ) #Enter data into the global server self.__server_1.exec_stmt("DROP DATABASE IF EXISTS global") self.__server_1.exec_stmt("CREATE DATABASE global") self.__server_1.exec_stmt("CREATE TABLE global.global_table" "(userID INT, name VARCHAR(30))") for i in range(1, 11): self.__server_1.exec_stmt("INSERT INTO global.global_table " "VALUES(%s, 'TEST %s')" % (i, i)) time.sleep(5) try: #Verify that the data is not there in the second shard. global_table_count = self.__server_3.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) raise Exception("Server should not be connected to global server") except _errors.DatabaseError: pass #Verify that the data is there in the first shard. global_table_count = self.__server_2.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the third shard. global_table_count = self.__server_4.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the fourth shard. global_table_count = self.__server_5.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the fifth shard. global_table_count = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) def test_move_shard_3(self): '''Test the move of shard 3 and the global server configuration after that. The test moves shard 3 from GROUPID4 to GROUPID6. After the move is done, it verifies the count on GROUPID6 to check that the group has all the tuples from the earlier group. Now it fires an INSERT on the global group and verifies that all the shards have got the inserted tuples. GROUPID4 should not have received the values since it has had the shard moved away from it. ''' row_cnt_shard_before_move_db1_t1 = self.__server_4.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch" : True} ) row_cnt_shard_before_move_db2_t2 = self.__server_4.exec_stmt( "SELECT COUNT(*) FROM db2.t2", {"fetch" : True} ) row_cnt_shard_before_move_db1_t1 = \ int(row_cnt_shard_before_move_db1_t1[0][0]) row_cnt_shard_before_move_db2_t2 = \ int(row_cnt_shard_before_move_db2_t2[0][0]) status = self.proxy.sharding.move_shard("3", "GROUPID6") self.check_xmlrpc_command_result(status) self.__server_6.connect() row_cnt_shard_after_move_db1_t1 = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch" : True} ) row_cnt_shard_after_move_db2_t2 = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM db2.t2", {"fetch" : True} ) row_cnt_shard_after_move_db1_t1 = \ int(row_cnt_shard_after_move_db1_t1[0][0]) row_cnt_shard_after_move_db2_t2 = \ int(row_cnt_shard_after_move_db2_t2[0][0]) self.assertTrue( row_cnt_shard_before_move_db1_t1 == row_cnt_shard_after_move_db1_t1 ) self.assertTrue( row_cnt_shard_before_move_db2_t2 == row_cnt_shard_after_move_db2_t2 ) #Enter data into the global server self.__server_1.exec_stmt("DROP DATABASE IF EXISTS global") self.__server_1.exec_stmt("CREATE DATABASE global") self.__server_1.exec_stmt("CREATE TABLE global.global_table" "(userID INT, name VARCHAR(30))") for i in range(1, 11): self.__server_1.exec_stmt("INSERT INTO global.global_table " "VALUES(%s, 'TEST %s')" % (i, i)) time.sleep(5) try: #Verify that the data is not there in the third shard's #original group. global_table_count = self.__server_4.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) raise Exception("Server should not be connected to global server") except _errors.DatabaseError: pass #Verify that the data is there in the first shard. global_table_count = self.__server_2.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the second shard. global_table_count = self.__server_3.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the fourth shard. global_table_count = self.__server_5.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the third shard #new group. global_table_count = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) def test_move_shard_4(self): '''Test the move of shard 4 and the global server configuration after that. The test moves shard 4 from GROUPID5 to GROUPID6. After the move is done, it verifies the count on GROUPID6 to check that the group has all the tuples from the earlier group. Now it fires an INSERT on the global group and verifies that all the shards have got the inserted tuples. GROUPID5 should not have received the values since it has had the shard moved away from it. ''' row_cnt_shard_before_move_db1_t1 = self.__server_5.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch" : True} ) row_cnt_shard_before_move_db2_t2 = self.__server_5.exec_stmt( "SELECT COUNT(*) FROM db2.t2", {"fetch" : True} ) row_cnt_shard_before_move_db1_t1 = \ int(row_cnt_shard_before_move_db1_t1[0][0]) row_cnt_shard_before_move_db2_t2 = \ int(row_cnt_shard_before_move_db2_t2[0][0]) status = self.proxy.sharding.move_shard("4", "GROUPID6") self.check_xmlrpc_command_result(status) self.__server_6.connect() row_cnt_shard_after_move_db1_t1 = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch" : True} ) row_cnt_shard_after_move_db2_t2 = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM db2.t2", {"fetch" : True} ) row_cnt_shard_after_move_db1_t1 = \ int(row_cnt_shard_after_move_db1_t1[0][0]) row_cnt_shard_after_move_db2_t2 = \ int(row_cnt_shard_after_move_db2_t2[0][0]) self.assertTrue( row_cnt_shard_before_move_db1_t1 == row_cnt_shard_after_move_db1_t1 ) self.assertTrue( row_cnt_shard_before_move_db2_t2 == row_cnt_shard_after_move_db2_t2 ) #Enter data into the global server self.__server_1.exec_stmt("DROP DATABASE IF EXISTS global") self.__server_1.exec_stmt("CREATE DATABASE global") self.__server_1.exec_stmt("CREATE TABLE global.global_table" "(userID INT, name VARCHAR(30))") for i in range(1, 11): self.__server_1.exec_stmt("INSERT INTO global.global_table " "VALUES(%s, 'TEST %s')" % (i, i)) time.sleep(5) try: #Verify that the data is not there in the fourth shard's #original group. global_table_count = self.__server_5.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) raise Exception("Server should not be connected to global server") except _errors.DatabaseError: pass #Verify that the data is there in the first shard. global_table_count = self.__server_2.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the second shard. global_table_count = self.__server_3.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the third shard. global_table_count = self.__server_4.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the fourth shard's new #group. global_table_count = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) def tearDown(self): """Clean up the existing environment """ tests.utils.cleanup_environment()
def _run(self): """Function that verifies servers' availabilities. """ from mysql.fabric.server import ( Group, MySQLServer, ConnectionManager, ) ignored_status = [MySQLServer.FAULTY] quarantine = {} interval = FailureDetector._DETECTION_INTERVAL detections = FailureDetector._DETECTIONS detection_timeout = FailureDetector._DETECTION_TIMEOUT connection_manager = ConnectionManager() _persistence.init_thread() while self.__check: try: unreachable = set() group = Group.fetch(self.__group_id) if group is not None: for server in group.servers(): if server.status in ignored_status or \ MySQLServer.is_alive(server, detection_timeout): if server.status == MySQLServer.FAULTY: connection_manager.kill_connections(server) continue unreachable.add(server.uuid) _LOGGER.warning( "Server (%s) in group (%s) is unreachable.", server.uuid, self.__group_id ) unstable = False failed_attempts = 0 if server.uuid not in quarantine: quarantine[server.uuid] = failed_attempts = 1 else: failed_attempts = quarantine[server.uuid] + 1 quarantine[server.uuid] = failed_attempts if failed_attempts >= detections: unstable = True can_set_faulty = group.can_set_server_faulty( server, get_time() ) if unstable and can_set_faulty: # We have to make this transactional and make the # failover (i.e. report failure) robust to failures. # Otherwise, a master might be set to faulty and # a new one never promoted. server.status = MySQLServer.FAULTY connection_manager.kill_connections(server) procedures = trigger("REPORT_FAILURE", None, str(server.uuid), threading.current_thread().name, MySQLServer.FAULTY, False ) executor = _executor.Executor() for procedure in procedures: executor.wait_for_procedure(procedure) for uuid in quarantine.keys(): if uuid not in unreachable: del quarantine[uuid] except (_errors.ExecutorError, _errors.DatabaseError): pass except Exception as error: _LOGGER.exception(error) time.sleep(interval / detections) _persistence.deinit_thread()
def setUp(self): """Configure the existing environment """ self.manager, self.proxy = tests.utils.setup_xmlrpc() self.__options_1 = { "uuid": _uuid.UUID("{bb75b12b-98d1-414c-96af-9e9d4b179678}"), "address": "server_1.mysql.com:3060", "user": MySQLInstances().user, "passwd": MySQLInstances().passwd, } self.__server_1 = MySQLServer(**self.__options_1) MySQLServer.add(self.__server_1) self.__options_2 = { "uuid": _uuid.UUID("{aa75a12a-98d1-414c-96af-9e9d4b179678}"), "address": "server_2.mysql.com:3060", "user": MySQLInstances().user, "passwd": MySQLInstances().passwd, } self.__server_2 = MySQLServer(**self.__options_2) MySQLServer.add(self.__server_2) self.__group_1 = Group("GROUPID1", "First description.") Group.add(self.__group_1) self.__group_1.add_server(self.__server_1) self.__group_1.add_server(self.__server_2) self.__options_3 = { "uuid": _uuid.UUID("{cc75b12b-98d1-414c-96af-9e9d4b179678}"), "address": MySQLInstances().get_address(0), "user": MySQLInstances().user, "passwd": MySQLInstances().passwd, } uuid_server3 = MySQLServer.discover_uuid(self.__options_3["address"]) self.__options_3["uuid"] = _uuid.UUID(uuid_server3) self.__server_3 = MySQLServer(**self.__options_3) MySQLServer.add(self.__server_3) self.__options_4 = { "uuid": _uuid.UUID("{dd75a12a-98d1-414c-96af-9e9d4b179678}"), "address": "server_4.mysql.com:3060", "user": MySQLInstances().user, "passwd": MySQLInstances().passwd, } self.__server_4 = MySQLServer(**self.__options_4) MySQLServer.add(self.__server_4) self.__group_2 = Group("GROUPID2", "Second description.") Group.add(self.__group_2) self.__group_2.add_server(self.__server_3) self.__group_2.add_server(self.__server_4) tests.utils.configure_decoupled_master(self.__group_2, self.__server_3) self.__options_5 = { "uuid": _uuid.UUID("{ee75b12b-98d1-414c-96af-9e9d4b179678}"), "address": MySQLInstances().get_address(2), "user": MySQLInstances().user, "passwd": MySQLInstances().passwd, } uuid_server5 = MySQLServer.discover_uuid(self.__options_5["address"]) self.__options_5["uuid"] = _uuid.UUID(uuid_server5) self.__server_5 = MySQLServer(**self.__options_5) MySQLServer.add(self.__server_5) self.__options_6 = { "uuid": _uuid.UUID("{ff75a12a-98d1-414c-96af-9e9d4b179678}"), "address": "server_6.mysql.com:3060", "user": MySQLInstances().user, "passwd": MySQLInstances().passwd, } self.__server_6 = MySQLServer(**self.__options_6) MySQLServer.add(self.__server_6) self.__group_3 = Group("GROUPID3", "Third description.") Group.add(self.__group_3) self.__group_3.add_server(self.__server_5) self.__group_3.add_server(self.__server_6) tests.utils.configure_decoupled_master(self.__group_3, self.__server_5) self.__options_1_host, self.__options_1_port = \ server_utils.split_host_port(self.__options_1["address"], 13001) self.__options_2_host, self.__options_2_port = \ server_utils.split_host_port(self.__options_2["address"], 13001) self.__options_3_host, self.__options_3_port = \ server_utils.split_host_port(self.__options_3["address"], 13001) self.__options_4_host, self.__options_4_port = \ server_utils.split_host_port(self.__options_4["address"], 13001) self.__options_5_host, self.__options_5_port = \ server_utils.split_host_port(self.__options_5["address"], 13001) self.__options_6_host, self.__options_6_port = \ server_utils.split_host_port(self.__options_6["address"], 13001) group_4 = Group("GROUPID4", "4TH description.") Group.add(group_4) group_5 = Group("GROUPID5", "5TH description.") Group.add(group_5) group_6 = Group("GROUPID6", "6TH description.") Group.add(group_6) group_7 = Group("GROUPID7", "7TH description.") Group.add(group_7) group_8 = Group("GROUPID8", "8TH description.") Group.add(group_8) group_9 = Group("GROUPID9", "9TH description.") Group.add(group_9) group_10 = Group("GROUPID10", "10TH description.") Group.add(group_10) group_11 = Group("GROUPID11", "11TH description.") Group.add(group_11) group_12 = Group("GROUPID12", "12TH description.") Group.add(group_12) group_13 = Group("GROUPID13", "13TH description.") Group.add(group_13) group_14 = Group("GROUPID14", "14TH description.") Group.add(group_14) self.__shard_mapping_list = ShardMapping.list_shard_mapping_defn() self.assertEquals(self.__shard_mapping_list, []) self.__shard_mapping_id_1 = ShardMapping.define("RANGE", "GROUPID10") self.__shard_mapping_id_2 = ShardMapping.define("RANGE", "GROUPID11") self.__shard_mapping_id_3 = ShardMapping.define("RANGE", "GROUPID12") #Test with sharding type values in lower case self.__shard_mapping_id_4 = ShardMapping.define("range", "GROUPID13") self.__shard_mapping_id_5 = ShardMapping.define("range", "GROUPID14") self.__shard_mapping_1 = \ ShardMapping.add(self.__shard_mapping_id_1, "db1.t1", "userID1") self.__shard_mapping_2 = \ ShardMapping.add(self.__shard_mapping_id_2, "db2.t2", "userID2") self.__shard_mapping_3 = \ ShardMapping.add(self.__shard_mapping_id_3, "db3.t3", "userID3") self.__shard_mapping_4 = \ ShardMapping.add(self.__shard_mapping_id_4, "db4.t4", "userID4") self.__shard_mapping_5 = \ ShardMapping.add(self.__shard_mapping_id_5, "prune_db.prune_table", "userID") self.__shard_id_1 = Shards.add("GROUPID1", "ENABLED") self.__shard_id_2 = Shards.add("GROUPID10", "ENABLED") self.__shard_id_3 = Shards.add("GROUPID11", "DISABLED") self.__shard_id_4 = Shards.add("GROUPID4", "ENABLED") self.__shard_id_5 = Shards.add("GROUPID5", "ENABLED") self.__shard_id_6 = Shards.add("GROUPID6", "ENABLED") self.__shard_id_7 = Shards.add("GROUPID7", "ENABLED") self.__shard_id_8 = Shards.add("GROUPID8", "ENABLED") self.__shard_id_9 = Shards.add("GROUPID9", "ENABLED") self.__shard_id_10 = Shards.add("GROUPID2", "ENABLED") self.__shard_id_11 = Shards.add("GROUPID3", "ENABLED") self.__range_sharding_specification_1 = RangeShardingSpecification.add( self.__shard_mapping_1.shard_mapping_id, 0, self.__shard_id_1.shard_id) self.__range_sharding_specification_2 = RangeShardingSpecification.add( self.__shard_mapping_1.shard_mapping_id, 1001, self.__shard_id_2.shard_id) self.__range_sharding_specification_3 = RangeShardingSpecification.add( self.__shard_mapping_1.shard_mapping_id, 2001, self.__shard_id_3.shard_id) self.__range_sharding_specification_4 = RangeShardingSpecification.add( self.__shard_mapping_2.shard_mapping_id, 3001, self.__shard_id_4.shard_id) self.__range_sharding_specification_5 = RangeShardingSpecification.add( self.__shard_mapping_2.shard_mapping_id, 4001, self.__shard_id_5.shard_id) self.__range_sharding_specification_6 = RangeShardingSpecification.add( self.__shard_mapping_3.shard_mapping_id, 6001, self.__shard_id_6.shard_id) self.__range_sharding_specification_7 = RangeShardingSpecification.add( self.__shard_mapping_3.shard_mapping_id, 7001, self.__shard_id_7.shard_id) self.__range_sharding_specification_8 = RangeShardingSpecification.add( self.__shard_mapping_4.shard_mapping_id, 8001, self.__shard_id_8.shard_id) self.__range_sharding_specification_9 = RangeShardingSpecification.add( self.__shard_mapping_4.shard_mapping_id, 10001, self.__shard_id_9.shard_id) self.__range_sharding_specification_10 = RangeShardingSpecification.add( self.__shard_mapping_5.shard_mapping_id, 100, self.__shard_id_10.shard_id) self.__range_sharding_specification_11 = RangeShardingSpecification.add( self.__shard_mapping_5.shard_mapping_id, 201, self.__shard_id_11.shard_id) READ_ONLY = MySQLServer.get_mode_idx(MySQLServer.READ_ONLY) READ_WRITE = MySQLServer.get_mode_idx(MySQLServer.READ_WRITE) SECONDARY = MySQLServer.get_status_idx(MySQLServer.SECONDARY) PRIMARY = MySQLServer.get_status_idx(MySQLServer.PRIMARY) self.__setofservers = tests.utils.make_servers_result( [[ str(self.__server_1.uuid), 'GROUPID1', self.__options_1_host, self.__options_1_port, READ_ONLY, SECONDARY, 1.0 ], [ str(self.__server_2.uuid), 'GROUPID1', self.__options_2_host, self.__options_2_port, READ_ONLY, SECONDARY, 1.0 ], [ str(self.__server_3.uuid), 'GROUPID2', self.__options_3_host, self.__options_3_port, READ_WRITE, PRIMARY, 1.0 ], [ str(self.__server_4.uuid), 'GROUPID2', self.__options_4_host, self.__options_4_port, READ_ONLY, SECONDARY, 1.0 ], [ str(self.__server_5.uuid), 'GROUPID3', self.__options_5_host, self.__options_5_port, READ_WRITE, PRIMARY, 1.0 ], [ str(self.__server_6.uuid), 'GROUPID3', self.__options_6_host, self.__options_6_port, READ_ONLY, SECONDARY, 1.0 ]]) self.__setofservers_1 = tests.utils.make_servers_result( [[ str(self.__server_1.uuid), 'GROUPID1', self.__options_1_host, self.__options_1_port, READ_ONLY, SECONDARY, 1.0 ], [ str(self.__server_2.uuid), 'GROUPID1', self.__options_2_host, self.__options_2_port, READ_ONLY, SECONDARY, 1.0 ]]) self.__setofservers_2 = tests.utils.make_servers_result( [[ str(self.__server_1.uuid), 'GROUPID1', self.__options_1_host, self.__options_1_port, READ_ONLY, SECONDARY, 1.0 ], [ str(self.__server_2.uuid), 'GROUPID1', self.__options_2_host, self.__options_2_port, READ_ONLY, SECONDARY, 1.0 ], [ str(self.__server_3.uuid), 'GROUPID2', self.__options_3_host, self.__options_3_port, READ_WRITE, PRIMARY, 1.0 ], [ str(self.__server_4.uuid), 'GROUPID2', self.__options_4_host, self.__options_4_port, READ_ONLY, SECONDARY, 1.0 ]]) self.__setofservers_3 = tests.utils.make_servers_result( [[ str(self.__server_1.uuid), 'GROUPID1', self.__options_1_host, self.__options_1_port, READ_ONLY, SECONDARY, 1.0 ], [ str(self.__server_2.uuid), 'GROUPID1', self.__options_2_host, self.__options_2_port, READ_ONLY, SECONDARY, 1.0 ], [ str(self.__server_3.uuid), 'GROUPID2', self.__options_3_host, self.__options_3_port, READ_WRITE, PRIMARY, 1.0 ], [ str(self.__server_4.uuid), 'GROUPID2', self.__options_4_host, self.__options_4_port, READ_ONLY, SECONDARY, 1.0 ], [ str(self.__server_5.uuid), 'GROUPID3', self.__options_5_host, self.__options_5_port, READ_WRITE, PRIMARY, 1.0 ], [ str(self.__server_6.uuid), 'GROUPID3', self.__options_6_host, self.__options_6_port, READ_ONLY, SECONDARY, 1.0 ]]) self.__setoftables = tests.utils.make_tables_result( [['db1', 't1', 'userID1', '1'], ['db2', 't2', 'userID2', '2'], ['db3', 't3', 'userID3', '3'], ['db4', 't4', 'userID4', '4'], ['prune_db', 'prune_table', 'userID', '5']]) self.__setoftables_1 = tests.utils.make_tables_result( [['db1', 't1', 'userID1', '1']]) self.__setoftables_2 = tests.utils.make_tables_result( [['db1', 't1', 'userID1', '1'], ['db2', 't2', 'userID2', '2']]) self.__setoftables_3 = tests.utils.make_tables_result( [['db1', 't1', 'userID1', '1'], ['db2', 't2', 'userID2', '2'], ['db3', 't3', 'userID3', '3']]) self.__setofshardmaps = tests.utils.make_mapping_result( [['1', 'RANGE', 'GROUPID10'], ['2', 'RANGE', 'GROUPID11'], ['3', 'RANGE', 'GROUPID12'], ['4', 'RANGE', 'GROUPID13'], ['5', 'RANGE', 'GROUPID14']]) self.__setofshardmaps_1 = tests.utils.make_mapping_result( [['1', 'RANGE', 'GROUPID10']]) self.__setofshardmaps_2 = tests.utils.make_mapping_result( [['1', 'RANGE', 'GROUPID10'], ['2', 'RANGE', 'GROUPID11']]) self.__setofshardmaps_3 = tests.utils.make_mapping_result( [['1', 'RANGE', 'GROUPID10'], ['2', 'RANGE', 'GROUPID11'], ['3', 'RANGE', 'GROUPID12']]) self.__setofshardindexes = tests.utils.make_index_result( [['0', '1', '1', 'GROUPID1'], ['1001', '1', '2', 'GROUPID10'], ['3001', '2', '4', 'GROUPID4'], ['4001', '2', '5', 'GROUPID5'], ['6001', '3', '6', 'GROUPID6'], ['7001', '3', '7', 'GROUPID7'], ['8001', '4', '8', 'GROUPID8'], ['10001', '4', '9', 'GROUPID9'], ['100', '5', '10', 'GROUPID2'], ['201', '5', '11', 'GROUPID3']]) self.__setofshardindexes_1 = tests.utils.make_index_result( [['0', '1', '1', 'GROUPID1'], ['1001', '1', '2', 'GROUPID10']]) self.__setofshardindexes_3 = tests.utils.make_index_result( [['0', '1', '1', 'GROUPID1'], ['1001', '1', '2', 'GROUPID10'], ['3001', '2', '4', 'GROUPID4'], ['4001', '2', '5', 'GROUPID5'], ['6001', '3', '6', 'GROUPID6'], ['7001', '3', '7', 'GROUPID7']]) self.__setofshardindexes_5 = tests.utils.make_index_result( [['0', '1', '1', 'GROUPID1'], ['1001', '1', '2', 'GROUPID10'], ['3001', '2', '4', 'GROUPID4'], ['4001', '2', '5', 'GROUPID5'], ['6001', '3', '6', 'GROUPID6'], ['7001', '3', '7', 'GROUPID7'], ['8001', '4', '8', 'GROUPID8'], ['10001', '4', '9', 'GROUPID9'], ['100', '5', '10', 'GROUPID2'], ['201', '5', '11', 'GROUPID3']]) self.__shardinginformation_1 = tests.utils.make_info_result( [[ 'db1', 't1', 'userID1', '0', '1', 'RANGE', 'GROUPID1', 'GROUPID10' ], [ 'db1', 't1', 'userID1', '1001', '2', 'RANGE', 'GROUPID10', 'GROUPID10' ]]) self.__shardinginformation_2 = tests.utils.make_info_result( [[ 'db1', 't1', 'userID1', '0', '1', 'RANGE', 'GROUPID1', 'GROUPID10' ], [ 'db1', 't1', 'userID1', '1001', '2', 'RANGE', 'GROUPID10', 'GROUPID10' ], [ 'db2', 't2', 'userID2', '3001', '4', 'RANGE', 'GROUPID4', 'GROUPID11' ], [ 'db2', 't2', 'userID2', '4001', '5', 'RANGE', 'GROUPID5', 'GROUPID11' ]]) self.__shardinginformation_3 = tests.utils.make_info_result( [[ 'db1', 't1', 'userID1', '0', '1', 'RANGE', 'GROUPID1', 'GROUPID10' ], [ 'db1', 't1', 'userID1', '1001', '2', 'RANGE', 'GROUPID10', 'GROUPID10' ], [ 'db2', 't2', 'userID2', '3001', '4', 'RANGE', 'GROUPID4', 'GROUPID11' ], [ 'db2', 't2', 'userID2', '4001', '5', 'RANGE', 'GROUPID5', 'GROUPID11' ], [ 'db3', 't3', 'userID3', '6001', '6', 'RANGE', 'GROUPID6', 'GROUPID12' ], [ 'db3', 't3', 'userID3', '7001', '7', 'RANGE', 'GROUPID7', 'GROUPID12' ]])
class TestShardingPrune(tests.utils.TestCase): def setUp(self): """Creates the following topology for testing, GROUPID1 - localhost:13001, localhost:13002 - Global Group GROUPID2 - localhost:13003, localhost:13004 - shard 1 GROUPID3 - localhost:13005, localhost:13006 - shard 2 """ self.manager, self.proxy = tests.utils.setup_xmlrpc() self.__options_1 = { "uuid": _uuid.UUID("{aa75b12b-98d1-414c-96af-9e9d4b179678}"), "address": MySQLInstances().get_address(0), "user": MySQLInstances().user, "passwd": MySQLInstances().passwd } uuid_server1 = MySQLServer.discover_uuid(self.__options_1["address"]) self.__options_1["uuid"] = _uuid.UUID(uuid_server1) self.__server_1 = MySQLServer(**self.__options_1) MySQLServer.add(self.__server_1) self.__group_1 = Group("GROUPID1", "First description.") Group.add(self.__group_1) self.__group_1.add_server(self.__server_1) tests.utils.configure_decoupled_master(self.__group_1, self.__server_1) self.__options_2 = { "uuid": _uuid.UUID("{aa45b12b-98d1-414c-96af-9e9d4b179678}"), "address": MySQLInstances().get_address(1), "user": MySQLInstances().user, "passwd": MySQLInstances().passwd } uuid_server2 = MySQLServer.discover_uuid(self.__options_2["address"]) self.__options_2["uuid"] = _uuid.UUID(uuid_server2) self.__server_2 = MySQLServer(**self.__options_2) MySQLServer.add(self.__server_2) self.__server_2.connect() self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_2.exec_stmt("CREATE DATABASE db1") self.__server_2.exec_stmt("CREATE TABLE db1.t1" "(userID INT, name VARCHAR(30))") for i in range(1, 71): self.__server_2.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(101, 301): self.__server_2.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(1001, 1201): self.__server_2.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(10001, 10201): self.__server_2.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(100001, 100201): self.__server_2.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__group_2 = Group("GROUPID2", "Second description.") Group.add(self.__group_2) self.__group_2.add_server(self.__server_2) tests.utils.configure_decoupled_master(self.__group_2, self.__server_2) self.__options_3 = { "uuid": _uuid.UUID("{bb75b12b-98d1-414c-96af-9e9d4b179678}"), "address": MySQLInstances().get_address(2), "user": MySQLInstances().user, "passwd": MySQLInstances().passwd } uuid_server3 = MySQLServer.discover_uuid(self.__options_3["address"]) self.__options_3["uuid"] = _uuid.UUID(uuid_server3) self.__server_3 = MySQLServer(**self.__options_3) MySQLServer.add(self.__server_3) self.__server_3.connect() self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_3.exec_stmt("CREATE DATABASE db1") self.__server_3.exec_stmt("CREATE TABLE db1.t1" "(userID INT, name VARCHAR(30))") for i in range(1, 71): self.__server_3.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(101, 301): self.__server_3.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(1001, 1201): self.__server_3.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(10001, 10201): self.__server_3.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(100001, 100201): self.__server_3.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__group_3 = Group("GROUPID3", "Third description.") Group.add(self.__group_3) self.__group_3.add_server(self.__server_3) tests.utils.configure_decoupled_master(self.__group_3, self.__server_3) self.__options_4 = { "uuid": _uuid.UUID("{bb45b12b-98d1-414c-96af-9e9d4b179678}"), "address": MySQLInstances().get_address(3), "user": MySQLInstances().user, "passwd": MySQLInstances().passwd } uuid_server4 = MySQLServer.discover_uuid(self.__options_4["address"]) self.__options_4["uuid"] = _uuid.UUID(uuid_server4) self.__server_4 = MySQLServer(**self.__options_4) MySQLServer.add(self.__server_4) self.__server_4.connect() self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_4.exec_stmt("CREATE DATABASE db1") self.__server_4.exec_stmt("CREATE TABLE db1.t1" "(userID INT, name VARCHAR(30))") for i in range(1, 71): self.__server_4.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(101, 301): self.__server_4.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(1001, 1201): self.__server_4.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(10001, 10201): self.__server_4.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(100001, 100201): self.__server_4.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__group_4 = Group("GROUPID4", "Fourth description.") Group.add(self.__group_4) self.__group_4.add_server(self.__server_4) tests.utils.configure_decoupled_master(self.__group_4, self.__server_4) self.__options_5 = { "uuid": _uuid.UUID("{cc75b12b-98d1-414c-96af-9e9d4b179678}"), "address": MySQLInstances().get_address(4), "user": MySQLInstances().user, "passwd": MySQLInstances().passwd } uuid_server5 = MySQLServer.discover_uuid(self.__options_5["address"]) self.__options_5["uuid"] = _uuid.UUID(uuid_server5) self.__server_5 = MySQLServer(**self.__options_5) MySQLServer.add(self.__server_5) self.__server_5.connect() self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_5.exec_stmt("CREATE DATABASE db1") self.__server_5.exec_stmt("CREATE TABLE db1.t1" "(userID INT, name VARCHAR(30))") for i in range(1, 71): self.__server_5.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(101, 301): self.__server_5.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(1001, 1201): self.__server_5.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(10001, 10201): self.__server_5.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(100001, 100201): self.__server_5.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__group_5 = Group("GROUPID5", "Fifth description.") Group.add(self.__group_5) self.__group_5.add_server(self.__server_5) tests.utils.configure_decoupled_master(self.__group_5, self.__server_5) self.__options_6 = { "uuid": _uuid.UUID("{cc45b12b-98d1-414c-96af-9e9d4b179678}"), "address": MySQLInstances().get_address(5), "user": MySQLInstances().user, "passwd": MySQLInstances().passwd } uuid_server6 = MySQLServer.discover_uuid(self.__options_6["address"]) self.__options_6["uuid"] = _uuid.UUID(uuid_server6) self.__server_6 = MySQLServer(**self.__options_6) MySQLServer.add(self.__server_6) self.__server_6.connect() self.__server_6.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_6.exec_stmt("CREATE DATABASE db1") self.__server_6.exec_stmt("CREATE TABLE db1.t1" "(userID INT, name VARCHAR(30))") for i in range(1, 71): self.__server_6.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(101, 301): self.__server_6.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(1001, 1201): self.__server_6.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(10001, 10201): self.__server_6.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(100001, 100201): self.__server_6.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__group_6 = Group("GROUPID6", "Sixth description.") Group.add(self.__group_6) self.__group_6.add_server(self.__server_6) tests.utils.configure_decoupled_master(self.__group_6, self.__server_6) status = self.proxy.sharding.create_definition("RANGE", "GROUPID1") self.check_xmlrpc_command_result(status, returns=1) status = self.proxy.sharding.add_table(1, "db1.t1", "userID") self.check_xmlrpc_command_result(status) status = self.proxy.sharding.add_shard( 1, "GROUPID2/1,GROUPID3/101,GROUPID4/1001," "GROUPID5/10001,GROUPID6/100001", "ENABLED") self.check_xmlrpc_command_result(status) def test_prune_table_not_exists(self): """Delete the data in the database and verify that the prune does not fail once the database has been removed. """ self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_6.exec_stmt("DROP DATABASE IF EXISTS db1") status = self.proxy.sharding.prune_shard("db1.t1") def test_prune_shard(self): status = self.proxy.sharding.prune_shard("db1.t1") self.check_xmlrpc_command_result(status) status = self.proxy.sharding.lookup_servers("db1.t1", 1, "LOCAL") info = self.check_xmlrpc_simple(status, {}, rowcount=1) shard_server = fetch_test_server(info['server_uuid']) shard_server.connect() rows = shard_server.exec_stmt("SELECT COUNT(*) FROM db1.t1", {"fetch": True}) self.assertTrue(int(rows[0][0]) == 70) rows = shard_server.exec_stmt("SELECT MAX(userID) FROM db1.t1", {"fetch": True}) self.assertTrue(int(rows[0][0]) == 70) rows = shard_server.exec_stmt("SELECT MIN(userID) FROM db1.t1", {"fetch": True}) self.assertTrue(int(rows[0][0]) == 1) status = self.proxy.sharding.lookup_servers("db1.t1", 101, "LOCAL") info = self.check_xmlrpc_simple(status, {}, rowcount=1) shard_server = fetch_test_server(info['server_uuid']) shard_server.connect() rows = shard_server.exec_stmt("SELECT COUNT(*) FROM db1.t1", {"fetch": True}) self.assertTrue(int(rows[0][0]) == 200) rows = shard_server.exec_stmt("SELECT MAX(userID) FROM db1.t1", {"fetch": True}) self.assertTrue(int(rows[0][0]) == 300) rows = shard_server.exec_stmt("SELECT MIN(userID) FROM db1.t1", {"fetch": True}) self.assertTrue(int(rows[0][0]) == 101) status = self.proxy.sharding.lookup_servers("db1.t1", 1202, "LOCAL") info = self.check_xmlrpc_simple(status, {}, rowcount=1) shard_server = fetch_test_server(info['server_uuid']) shard_server.connect() rows = shard_server.exec_stmt("SELECT COUNT(*) FROM db1.t1", {"fetch": True}) self.assertTrue(int(rows[0][0]) == 200) rows = shard_server.exec_stmt("SELECT MAX(userID) FROM db1.t1", {"fetch": True}) self.assertTrue(int(rows[0][0]) == 1200) rows = shard_server.exec_stmt("SELECT MIN(userID) FROM db1.t1", {"fetch": True}) self.assertTrue(int(rows[0][0]) == 1001) status = self.proxy.sharding.lookup_servers("db1.t1", 11000, "LOCAL") info = self.check_xmlrpc_simple(status, {}, rowcount=1) shard_server = fetch_test_server(info['server_uuid']) shard_server.connect() rows = shard_server.exec_stmt("SELECT COUNT(*) FROM db1.t1", {"fetch": True}) self.assertTrue(int(rows[0][0]) == 200) rows = shard_server.exec_stmt("SELECT MAX(userID) FROM db1.t1", {"fetch": True}) self.assertTrue(int(rows[0][0]) == 10200) rows = shard_server.exec_stmt("SELECT MIN(userID) FROM db1.t1", {"fetch": True}) self.assertTrue(int(rows[0][0]) == 10001) status = self.proxy.sharding.lookup_servers("db1.t1", 100100, "LOCAL") info = self.check_xmlrpc_simple(status, {}, rowcount=1) shard_server = fetch_test_server(info['server_uuid']) shard_server.connect() rows = shard_server.exec_stmt("SELECT COUNT(*) FROM db1.t1", {"fetch": True}) self.assertTrue(int(rows[0][0]) == 200) rows = shard_server.exec_stmt("SELECT MAX(userID) FROM db1.t1", {"fetch": True}) self.assertTrue(int(rows[0][0]) == 100200) rows = shard_server.exec_stmt("SELECT MIN(userID) FROM db1.t1", {"fetch": True}) self.assertTrue(int(rows[0][0]) == 100001) def tearDown(self): """Clean up the existing environment """ tests.utils.cleanup_environment()
def _setup_shard_switch_move(shard_id, source_group_id, destination_group_id, update_only): """Setup the moved shard to map to the new group. :param shard_id: The shard ID of the shard that needs to be moved. :param source_group_id: The group_id of the source shard. :param destination_group_id: The ID of the group to which the shard needs to be moved. :update_only: Only update the state store and skip provisioning. """ #Fetch the Range sharding specification. When we start implementing #heterogenous sharding schemes, we need to find out the type of #sharding scheme and we should use that to find out the sharding #implementation. _, source_shard, _, shard_mapping_defn = \ _services_sharding.verify_and_fetch_shard(shard_id) destination_group = Group.fetch(destination_group_id) if destination_group is None: raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND % (destination_group_id, )) destn_group_master = MySQLServer.fetch(destination_group.master) if destn_group_master is None: raise _errors.ShardingError( _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND) destn_group_master.connect() #Set the destination group master to read_only destn_group_master.read_only = True #Setup replication between the shard group and the global group. _group_replication.setup_group_replication \ (shard_mapping_defn[2], destination_group_id) #set the shard to point to the new group. source_shard.group_id = destination_group_id #Stop the replication between the global server and the original #group associated with the shard. _group_replication.stop_group_slave\ (shard_mapping_defn[2], source_group_id, True) #The sleep ensures that the connector have refreshed their caches with the #new shards that have been added as a result of the split. time.sleep(_utils.TTL) #Reset the read only flag on the source server. source_group = Group.fetch(source_group_id) if source_group is None: raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND % (source_group_id, )) master = MySQLServer.fetch(source_group.master) if master is None: raise _errors.ShardingError( _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND) if not update_only: master.connect() master.read_only = False #Kill all the existing connections on the servers source_group.kill_connections_on_servers() #allow updates in the destination group master destn_group_master.read_only = False
def setUp(self): """Creates the following topology for testing, GROUPID1 - localhost:13001, localhost:13002 - Global Group GROUPID2 - localhost:13003, localhost:13004 - shard 1 GROUPID3 - localhost:13005, localhost:13006 - shard 2 """ self.manager, self.proxy = tests.utils.setup_xmlrpc() self.__options_1 = { "uuid": _uuid.UUID("{aa75b12b-98d1-414c-96af-9e9d4b179678}"), "address": MySQLInstances().get_address(0), "user": MySQLInstances().user, "passwd": MySQLInstances().passwd } uuid_server1 = MySQLServer.discover_uuid(self.__options_1["address"]) self.__options_1["uuid"] = _uuid.UUID(uuid_server1) self.__server_1 = MySQLServer(**self.__options_1) MySQLServer.add(self.__server_1) self.__group_1 = Group("GROUPID1", "First description.") Group.add(self.__group_1) self.__group_1.add_server(self.__server_1) tests.utils.configure_decoupled_master(self.__group_1, self.__server_1) self.__options_2 = { "uuid": _uuid.UUID("{aa45b12b-98d1-414c-96af-9e9d4b179678}"), "address": MySQLInstances().get_address(1), "user": MySQLInstances().user, "passwd": MySQLInstances().passwd } uuid_server2 = MySQLServer.discover_uuid(self.__options_2["address"]) self.__options_2["uuid"] = _uuid.UUID(uuid_server2) self.__server_2 = MySQLServer(**self.__options_2) MySQLServer.add(self.__server_2) self.__server_2.connect() self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_2.exec_stmt("CREATE DATABASE db1") self.__server_2.exec_stmt("CREATE TABLE db1.t1" "(userID INT, name VARCHAR(30))") for i in range(1, 71): self.__server_2.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(101, 301): self.__server_2.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(1001, 1201): self.__server_2.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(10001, 10201): self.__server_2.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(100001, 100201): self.__server_2.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__group_2 = Group("GROUPID2", "Second description.") Group.add(self.__group_2) self.__group_2.add_server(self.__server_2) tests.utils.configure_decoupled_master(self.__group_2, self.__server_2) self.__options_3 = { "uuid": _uuid.UUID("{bb75b12b-98d1-414c-96af-9e9d4b179678}"), "address": MySQLInstances().get_address(2), "user": MySQLInstances().user, "passwd": MySQLInstances().passwd } uuid_server3 = MySQLServer.discover_uuid(self.__options_3["address"]) self.__options_3["uuid"] = _uuid.UUID(uuid_server3) self.__server_3 = MySQLServer(**self.__options_3) MySQLServer.add(self.__server_3) self.__server_3.connect() self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_3.exec_stmt("CREATE DATABASE db1") self.__server_3.exec_stmt("CREATE TABLE db1.t1" "(userID INT, name VARCHAR(30))") for i in range(1, 71): self.__server_3.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(101, 301): self.__server_3.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(1001, 1201): self.__server_3.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(10001, 10201): self.__server_3.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(100001, 100201): self.__server_3.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__group_3 = Group("GROUPID3", "Third description.") Group.add(self.__group_3) self.__group_3.add_server(self.__server_3) tests.utils.configure_decoupled_master(self.__group_3, self.__server_3) self.__options_4 = { "uuid": _uuid.UUID("{bb45b12b-98d1-414c-96af-9e9d4b179678}"), "address": MySQLInstances().get_address(3), "user": MySQLInstances().user, "passwd": MySQLInstances().passwd } uuid_server4 = MySQLServer.discover_uuid(self.__options_4["address"]) self.__options_4["uuid"] = _uuid.UUID(uuid_server4) self.__server_4 = MySQLServer(**self.__options_4) MySQLServer.add(self.__server_4) self.__server_4.connect() self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_4.exec_stmt("CREATE DATABASE db1") self.__server_4.exec_stmt("CREATE TABLE db1.t1" "(userID INT, name VARCHAR(30))") for i in range(1, 71): self.__server_4.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(101, 301): self.__server_4.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(1001, 1201): self.__server_4.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(10001, 10201): self.__server_4.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(100001, 100201): self.__server_4.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__group_4 = Group("GROUPID4", "Fourth description.") Group.add(self.__group_4) self.__group_4.add_server(self.__server_4) tests.utils.configure_decoupled_master(self.__group_4, self.__server_4) self.__options_5 = { "uuid": _uuid.UUID("{cc75b12b-98d1-414c-96af-9e9d4b179678}"), "address": MySQLInstances().get_address(4), "user": MySQLInstances().user, "passwd": MySQLInstances().passwd } uuid_server5 = MySQLServer.discover_uuid(self.__options_5["address"]) self.__options_5["uuid"] = _uuid.UUID(uuid_server5) self.__server_5 = MySQLServer(**self.__options_5) MySQLServer.add(self.__server_5) self.__server_5.connect() self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_5.exec_stmt("CREATE DATABASE db1") self.__server_5.exec_stmt("CREATE TABLE db1.t1" "(userID INT, name VARCHAR(30))") for i in range(1, 71): self.__server_5.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(101, 301): self.__server_5.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(1001, 1201): self.__server_5.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(10001, 10201): self.__server_5.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(100001, 100201): self.__server_5.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__group_5 = Group("GROUPID5", "Fifth description.") Group.add(self.__group_5) self.__group_5.add_server(self.__server_5) tests.utils.configure_decoupled_master(self.__group_5, self.__server_5) self.__options_6 = { "uuid": _uuid.UUID("{cc45b12b-98d1-414c-96af-9e9d4b179678}"), "address": MySQLInstances().get_address(5), "user": MySQLInstances().user, "passwd": MySQLInstances().passwd } uuid_server6 = MySQLServer.discover_uuid(self.__options_6["address"]) self.__options_6["uuid"] = _uuid.UUID(uuid_server6) self.__server_6 = MySQLServer(**self.__options_6) MySQLServer.add(self.__server_6) self.__server_6.connect() self.__server_6.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_6.exec_stmt("CREATE DATABASE db1") self.__server_6.exec_stmt("CREATE TABLE db1.t1" "(userID INT, name VARCHAR(30))") for i in range(1, 71): self.__server_6.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(101, 301): self.__server_6.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(1001, 1201): self.__server_6.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(10001, 10201): self.__server_6.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) for i in range(100001, 100201): self.__server_6.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__group_6 = Group("GROUPID6", "Sixth description.") Group.add(self.__group_6) self.__group_6.add_server(self.__server_6) tests.utils.configure_decoupled_master(self.__group_6, self.__server_6) status = self.proxy.sharding.create_definition("RANGE", "GROUPID1") self.check_xmlrpc_command_result(status, returns=1) status = self.proxy.sharding.add_table(1, "db1.t1", "userID") self.check_xmlrpc_command_result(status) status = self.proxy.sharding.add_shard( 1, "GROUPID2/1,GROUPID3/101,GROUPID4/1001," "GROUPID5/10001,GROUPID6/100001", "ENABLED") self.check_xmlrpc_command_result(status)
class TestShardingServices(unittest.TestCase): def assertStatus(self, status, expect): items = (item['diagnosis'] for item in status[1] if item['diagnosis']) self.assertEqual(status[1][-1]["success"], expect, "\n".join(items)) def setUp(self): """Configure the existing environment """ self.manager, self.proxy = tests.utils.setup_xmlrpc() self.__options_1 = { "uuid" : _uuid.UUID("{aa75b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(0), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd, } uuid_server1 = MySQLServer.discover_uuid(self.__options_1["address"]) self.__options_1["uuid"] = _uuid.UUID(uuid_server1) self.__server_1 = MySQLServer(**self.__options_1) MySQLServer.add(self.__server_1) self.__group_1 = Group("GROUPID1", "First description.") Group.add(self.__group_1) self.__group_1.add_server(self.__server_1) tests.utils.configure_decoupled_master(self.__group_1, self.__server_1) self.__options_2 = { "uuid" : _uuid.UUID("{aa45b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(1), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd, } uuid_server2 = MySQLServer.discover_uuid(self.__options_2["address"]) self.__options_2["uuid"] = _uuid.UUID(uuid_server2) self.__server_2 = MySQLServer(**self.__options_2) MySQLServer.add(self.__server_2) self.__group_2 = Group("GROUPID2", "Second description.") Group.add(self.__group_2) self.__group_2.add_server(self.__server_2) tests.utils.configure_decoupled_master(self.__group_2, self.__server_2) self.__options_3 = { "uuid" : _uuid.UUID("{bb75b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(2), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd, } uuid_server3 = MySQLServer.discover_uuid(self.__options_3["address"]) self.__options_3["uuid"] = _uuid.UUID(uuid_server3) self.__server_3 = MySQLServer(**self.__options_3) MySQLServer.add( self.__server_3) self.__group_3 = Group("GROUPID3", "Third description.") Group.add( self.__group_3) self.__group_3.add_server(self.__server_3) tests.utils.configure_decoupled_master(self.__group_3, self.__server_3) self.__options_4 = { "uuid" : _uuid.UUID("{bb45b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(3), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd, } uuid_server4 = MySQLServer.discover_uuid(self.__options_4["address"]) self.__options_4["uuid"] = _uuid.UUID(uuid_server4) self.__server_4 = MySQLServer(**self.__options_4) MySQLServer.add(self.__server_4) self.__group_4 = Group("GROUPID4", "Fourth description.") Group.add( self.__group_4) self.__group_4.add_server(self.__server_4) tests.utils.configure_decoupled_master(self.__group_4, self.__server_4) self.__server_4.connect() self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db2") self.__server_4.exec_stmt("CREATE DATABASE db2") self.__server_4.exec_stmt("CREATE TABLE db2.t2" "(userID2 INT, name VARCHAR(30))") for i in range(1, 201): self.__server_4.exec_stmt("INSERT INTO db2.t2 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__options_5 = { "uuid" : _uuid.UUID("{cc75b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(4), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd, } uuid_server5 = MySQLServer.discover_uuid(self.__options_5["address"]) self.__options_5["uuid"] = _uuid.UUID(uuid_server5) self.__server_5 = MySQLServer(**self.__options_5) MySQLServer.add(self.__server_5) self.__group_5 = Group("GROUPID5", "Fifth description.") Group.add( self.__group_5) self.__group_5.add_server(self.__server_5) tests.utils.configure_decoupled_master(self.__group_5, self.__server_5) self.__server_5.connect() self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db2") self.__server_5.exec_stmt("CREATE DATABASE db2") self.__server_5.exec_stmt("CREATE TABLE db2.t2" "(userID2 INT, name VARCHAR(30))") for i in range(1, 201): self.__server_5.exec_stmt("INSERT INTO db2.t2 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__options_6 = { "uuid" : _uuid.UUID("{cc45b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(5), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd, } uuid_server6 = MySQLServer.discover_uuid(self.__options_6["address"]) self.__options_6["uuid"] = _uuid.UUID(uuid_server6) self.__server_6 = MySQLServer(**self.__options_6) MySQLServer.add(self.__server_6) self.__group_6 = Group("GROUPID6", "Sixth description.") Group.add( self.__group_6) self.__group_6.add_server(self.__server_6) tests.utils.configure_decoupled_master(self.__group_6, self.__server_6) status = self.proxy.sharding.create_definition("HASH", "GROUPID1") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_define_shard_mapping).") self.assertEqual(status[2], 1) status = self.proxy.sharding.create_definition("RANGE", "GROUPID1") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_define_shard_mapping).") self.assertEqual(status[2], 2) status = self.proxy.sharding.create_definition("HASH", "GROUPID1") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_define_shard_mapping).") self.assertEqual(status[2], 3) status = self.proxy.sharding.add_table(1, "db1.t1", "userID1") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_add_shard_mapping).") status = self.proxy.sharding.add_table(2, "db2.t2", "userID2") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_add_shard_mapping).") status = self.proxy.sharding.add_table(3, "db3.t3", "userID3") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_add_shard_mapping).") status = self.proxy.sharding.add_shard(1, "GROUPID2,GROUPID3", "ENABLED") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_add_shard).") status = self.proxy.sharding.add_shard(2, "GROUPID4/0,GROUPID5/101", "ENABLED") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_add_shard).") status = self.proxy.sharding.add_shard(3, "GROUPID6", "ENABLED") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_add_shard).") def test_list_shard_defns(self): expected_shard_mapping_list1 = [ [1, "HASH", "GROUPID1"], [2, "RANGE", "GROUPID1"], [3, "HASH", "GROUPID1"] ] status = self.proxy.sharding.list_definitions() self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_shard_mapping_list = status[2] self.assertEqual(set(expected_shard_mapping_list1[0]), set(obtained_shard_mapping_list[0])) self.assertEqual(set(expected_shard_mapping_list1[1]), set(obtained_shard_mapping_list[1])) self.assertEqual(set(expected_shard_mapping_list1[2]), set(obtained_shard_mapping_list[2])) def test_list_shard_mappings(self): status = self.proxy.sharding.list_tables("HASH") self.assertEqual(status[0], True) self.assertEqual(status[1], "") self.assertEqual(status[2], [ {"shard_mapping_id":1, "table_name":"db1.t1", "column_name":"userID1", "type_name":"HASH", "global_group":"GROUPID1"}, {"shard_mapping_id":3, "table_name":"db3.t3", "column_name":"userID3", "type_name":"HASH", "global_group":"GROUPID1"} ]) status = self.proxy.sharding.list_tables("RANGE") self.assertEqual(status[0], True) self.assertEqual(status[1], "") self.assertEqual(status[2], [ {"shard_mapping_id":2, "table_name":"db2.t2", "column_name":"userID2", "type_name":"RANGE", "global_group":"GROUPID1"} ]) def test_shard_prune(self): status = self.proxy.sharding.prune_shard("db2.t2") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_prune_shard_tables).") status = self.proxy.sharding.lookup_servers("db2.t2", 1, "LOCAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] shard_uuid = obtained_server_list[0][0] shard_server = MySQLServer.fetch(shard_uuid) shard_server.connect() rows = shard_server.exec_stmt( "SELECT COUNT(*) FROM db2.t2", {"fetch" : True}) self.assertTrue(int(rows[0][0]) == 100) rows = shard_server.exec_stmt( "SELECT MAX(userID2) FROM db2.t2", {"fetch" : True}) self.assertTrue(int(rows[0][0]) == 100) rows = shard_server.exec_stmt( "SELECT MIN(userID2) FROM db2.t2", {"fetch" : True}) self.assertTrue(int(rows[0][0]) == 1) status = self.proxy.sharding.lookup_servers("db2.t2", 101, "LOCAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] shard_uuid = obtained_server_list[0][0] shard_server = MySQLServer.fetch(shard_uuid) shard_server.connect() rows = shard_server.exec_stmt( "SELECT COUNT(*) FROM db2.t2", {"fetch" : True}) self.assertTrue(int(rows[0][0]) == 100) rows = shard_server.exec_stmt( "SELECT MAX(userID2) FROM db2.t2", {"fetch" : True}) self.assertTrue(int(rows[0][0]) == 200) rows = shard_server.exec_stmt( "SELECT MIN(userID2) FROM db2.t2", {"fetch" : True}) self.assertTrue(int(rows[0][0]) == 101) def tearDown(self): """Clean up the existing environment """ self.proxy.sharding.disable_shard(1) self.proxy.sharding.remove_shard(1) self.proxy.sharding.disable_shard(2) self.proxy.sharding.remove_shard(2) self.proxy.sharding.disable_shard(3) self.proxy.sharding.remove_shard(3) self.proxy.sharding.disable_shard(4) self.proxy.sharding.remove_shard(4) self.proxy.sharding.disable_shard(5) self.proxy.sharding.remove_shard(5) tests.utils.cleanup_environment() tests.utils.teardown_xmlrpc(self.manager, self.proxy)
def configure_servers(options, config): """Check if some MySQL's addresses were specified and the number is greater than NUMBER_OF_SERVERS. """ import tests.utils as _test_utils from mysql.fabric.server import ( MySQLServer, ConnectionManager, ) from mysql.fabric.backup import ( MySQLDump, ) try: servers = _test_utils.MySQLInstances() # The administrative user as given in --user and --password options. # In simple use cases "root" is used. servers.user = options.user servers.passwd = options.password # Backing store - "fabric_store/storepw". servers.state_store_address = config.get("storage", "address") servers.store_user = config.get("storage", "user") servers.store_passwd = config.get("storage", "password") servers.store_db = config.get("storage", "database") # Server user - "fabric_server/serverpw". servers.server_user = config.get("servers", "user") servers.server_passwd = config.get("servers", "password") # Backup user - "fabric_backup/backuppw". servers.backup_user = config.get("servers", "backup_user") servers.backup_passwd = config.get("servers", "backup_password") # Restore user - "fabric_restore/restorepw". servers.restore_user = config.get("servers", "restore_user") servers.restore_passwd = config.get("servers", "restore_password") # Set up the backing store. from mysql.fabric import persistence uuid = MySQLServer.discover_uuid( address=servers.state_store_address, user=servers.user, passwd=servers.passwd ) server = MySQLServer( _uuid.UUID(uuid), address=servers.state_store_address, user=servers.user, passwd=servers.passwd ) server.connect() # Precautionary cleanup. server.exec_stmt("DROP DATABASE IF EXISTS %s" % (servers.store_db,)) # Create store user. _test_utils.create_test_user( server, servers.store_user, servers.store_passwd, [(persistence.required_privileges(), "{db}.*".format(db=servers.store_db))] ) # Set up managed servers. if options.servers: for address in options.servers.split(): servers.add_address(address) uuid = MySQLServer.discover_uuid( address=address, user=servers.user, passwd=servers.passwd ) server = MySQLServer( _uuid.UUID(uuid), address=address, user=servers.user, passwd=servers.passwd ) server.connect() server.set_session_binlog(False) server.read_only = False # Drop user databases server.set_foreign_key_checks(False) databases = server.exec_stmt("SHOW DATABASES") for database in databases: if database[0] not in MySQLServer.NO_USER_DATABASES: server.exec_stmt("DROP DATABASE IF EXISTS %s" % (database[0],)) server.set_foreign_key_checks(True) # Create server user. _test_utils.create_test_user( server, servers.server_user, servers.server_passwd, [(MySQLServer.SERVER_PRIVILEGES, "*.*"), (MySQLServer.SERVER_PRIVILEGES_DB, "mysql_fabric.*")] ) # Create backup user. _test_utils.create_test_user( server, servers.backup_user, servers.backup_passwd, [(MySQLDump.BACKUP_PRIVILEGES, "*.*")] ) # Create restore user. _test_utils.create_test_user( server, servers.restore_user, servers.restore_passwd, [(MySQLDump.RESTORE_PRIVILEGES, "*.*")] ) server.set_session_binlog(True) server.disconnect() ConnectionManager().purge_connections(server) if servers.get_number_addresses() < NUMBER_OF_SERVERS: sys.stderr.write( "<<<<<<<<<< Some unit tests need {0} MySQL " \ "Instances. >>>>>>>>>>\n".format(NUMBER_OF_SERVERS) ) return False except Exception as error: sys.stderr.write( "Error configuring servers: {0}\n".format(str(error)) ) import traceback traceback.print_exc() return False return True
def setUp(self): """Creates the following topology for testing, GROUPID1 - localhost:13001, localhost:13002 - Global Group GROUPID2 - localhost:13003, localhost:13004 - shard 1 GROUPID3 - localhost:13005, localhost:13006 - shard 2 """ self.manager, self.proxy = tests.utils.setup_xmlrpc() self.__options_1 = { "uuid" : _uuid.UUID("{aa75b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(0), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd } uuid_server1 = MySQLServer.discover_uuid(self.__options_1["address"]) self.__options_1["uuid"] = _uuid.UUID(uuid_server1) self.__server_1 = MySQLServer(**self.__options_1) MySQLServer.add(self.__server_1) self.__group_1 = Group("GROUPID1", "First description.") Group.add(self.__group_1) self.__group_1.add_server(self.__server_1) tests.utils.configure_decoupled_master(self.__group_1, self.__server_1) self.__options_2 = { "uuid" : _uuid.UUID("{aa45b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(1), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd } uuid_server2 = MySQLServer.discover_uuid(self.__options_2["address"]) self.__options_2["uuid"] = _uuid.UUID(uuid_server2) self.__server_2 = MySQLServer(**self.__options_2) MySQLServer.add(self.__server_2) self.__server_2.connect() self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_2.exec_stmt("CREATE DATABASE db1") self.__server_2.exec_stmt("CREATE TABLE db1.t1" "(userID INT, name VARCHAR(30))") for i in range(1, 601): self.__server_2.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__group_2 = Group("GROUPID2", "Second description.") Group.add(self.__group_2) self.__group_2.add_server(self.__server_2) tests.utils.configure_decoupled_master(self.__group_2, self.__server_2) self.__options_3 = { "uuid" : _uuid.UUID("{bb75b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(2), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd } uuid_server3 = MySQLServer.discover_uuid(self.__options_3["address"]) self.__options_3["uuid"] = _uuid.UUID(uuid_server3) self.__server_3 = MySQLServer(**self.__options_3) MySQLServer.add( self.__server_3) self.__server_3.connect() self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_3.exec_stmt("CREATE DATABASE db1") self.__server_3.exec_stmt("CREATE TABLE db1.t1" "(userID INT, name VARCHAR(30))") for i in range(1, 601): self.__server_3.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__group_3 = Group("GROUPID3", "Third description.") Group.add( self.__group_3) self.__group_3.add_server(self.__server_3) tests.utils.configure_decoupled_master(self.__group_3, self.__server_3) self.__options_4 = { "uuid" : _uuid.UUID("{bb45b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(3), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd } uuid_server4 = MySQLServer.discover_uuid(self.__options_4["address"]) self.__options_4["uuid"] = _uuid.UUID(uuid_server4) self.__server_4 = MySQLServer(**self.__options_4) MySQLServer.add(self.__server_4) self.__server_4.connect() self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_4.exec_stmt("CREATE DATABASE db1") self.__server_4.exec_stmt("CREATE TABLE db1.t1" "(userID INT, name VARCHAR(30))") for i in range(1, 601): self.__server_4.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__group_4 = Group("GROUPID4", "Fourth description.") Group.add( self.__group_4) self.__group_4.add_server(self.__server_4) tests.utils.configure_decoupled_master(self.__group_4, self.__server_4) self.__options_5 = { "uuid" : _uuid.UUID("{cc75b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(4), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd } uuid_server5 = MySQLServer.discover_uuid(self.__options_5["address"]) self.__options_5["uuid"] = _uuid.UUID(uuid_server5) self.__server_5 = MySQLServer(**self.__options_5) MySQLServer.add(self.__server_5) self.__server_5.connect() self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_5.exec_stmt("CREATE DATABASE db1") self.__server_5.exec_stmt("CREATE TABLE db1.t1" "(userID INT, name VARCHAR(30))") for i in range(1, 601): self.__server_5.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__group_5 = Group("GROUPID5", "Fifth description.") Group.add( self.__group_5) self.__group_5.add_server(self.__server_5) tests.utils.configure_decoupled_master(self.__group_5, self.__server_5) self.__options_6 = { "uuid" : _uuid.UUID("{cc45b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(5), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd } uuid_server6 = MySQLServer.discover_uuid(self.__options_6["address"]) self.__options_6["uuid"] = _uuid.UUID(uuid_server6) self.__server_6 = MySQLServer(**self.__options_6) MySQLServer.add(self.__server_6) self.__server_6.connect() self.__server_6.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_6.exec_stmt("CREATE DATABASE db1") self.__server_6.exec_stmt("CREATE TABLE db1.t1" "(userID INT, name VARCHAR(30))") for i in range(1, 601): self.__server_6.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__group_6 = Group("GROUPID6", "Sixth description.") Group.add( self.__group_6) self.__group_6.add_server(self.__server_6) tests.utils.configure_decoupled_master(self.__group_6, self.__server_6) status = self.proxy.sharding.create_definition("RANGE", "GROUPID1") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_define_shard_mapping).") self.assertEqual(status[2], 1) status = self.proxy.sharding.add_table(1, "db1.t1", "userID") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_add_shard_mapping).") status = self.proxy.sharding.add_shard( 1, "GROUPID2/1,GROUPID3/101,GROUPID4/201," "GROUPID5/301,GROUPID6/401", "ENABLED" ) self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_add_shard).")
class TestShardingPrune(tests.utils.TestCase): """Contains unit tests for testing the shard split operation and for verifying that the global server configuration remains constant after the shard split configuration. """ def setUp(self): """Creates the topology for testing. """ tests.utils.cleanup_environment() self.manager, self.proxy = tests.utils.setup_xmlrpc() self.__options_1 = { "uuid" : _uuid.UUID("{aa75b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(0), "user" : MySQLInstances().user, "passwd": MySQLInstances().passwd, } uuid_server1 = MySQLServer.discover_uuid(self.__options_1["address"]) self.__options_1["uuid"] = _uuid.UUID(uuid_server1) self.__server_1 = MySQLServer(**self.__options_1) MySQLServer.add(self.__server_1) self.__server_1.connect() self.__group_1 = Group("GROUPID1", "First description.") Group.add(self.__group_1) self.__group_1.add_server(self.__server_1) tests.utils.configure_decoupled_master(self.__group_1, self.__server_1) self.__options_2 = { "uuid" : _uuid.UUID("{aa45b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(1), "user" : MySQLInstances().user, "passwd": MySQLInstances().passwd, } uuid_server2 = MySQLServer.discover_uuid(self.__options_2["address"]) self.__options_2["uuid"] = _uuid.UUID(uuid_server2) self.__server_2 = MySQLServer(**self.__options_2) MySQLServer.add(self.__server_2) self.__server_2.connect() self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_2.exec_stmt("CREATE DATABASE db1") self.__server_2.exec_stmt("CREATE TABLE db1.t1" "(userID VARCHAR(20) PRIMARY KEY, name VARCHAR(30))") for i in range(1, 71): self.__server_2.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("a"+str(i), i)) for i in range(101, 301): self.__server_2.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("b"+str(i), i)) for i in range(1001, 1201): self.__server_2.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("c"+str(i), i)) for i in range(10001, 10201): self.__server_2.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("d"+str(i), i)) for i in range(100001, 100201): self.__server_2.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("e"+str(i), i)) self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db2") self.__server_2.exec_stmt("CREATE DATABASE db2") self.__server_2.exec_stmt("CREATE TABLE db2.t2" "(userID VARCHAR(20), salary INT, " "CONSTRAINT FOREIGN KEY(userID) " "REFERENCES db1.t1(userID))") for i in range(1, 71): self.__server_2.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("a"+str(i), i)) for i in range(101, 301): self.__server_2.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("b"+str(i), i)) for i in range(1001, 1201): self.__server_2.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("c"+str(i), i)) for i in range(10001, 10201): self.__server_2.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("d"+str(i), i)) for i in range(100001, 100201): self.__server_2.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("e"+str(i), i)) self.__group_2 = Group("GROUPID2", "Second description.") Group.add(self.__group_2) self.__group_2.add_server(self.__server_2) tests.utils.configure_decoupled_master(self.__group_2, self.__server_2) self.__options_3 = { "uuid" : _uuid.UUID("{bb75b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(2), "user" : MySQLInstances().user, "passwd": MySQLInstances().passwd, } uuid_server3 = MySQLServer.discover_uuid(self.__options_3["address"]) self.__options_3["uuid"] = _uuid.UUID(uuid_server3) self.__server_3 = MySQLServer(**self.__options_3) MySQLServer.add( self.__server_3) self.__server_3.connect() self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_3.exec_stmt("CREATE DATABASE db1") self.__server_3.exec_stmt("CREATE TABLE db1.t1" "(userID VARCHAR(20)PRIMARY KEY, name VARCHAR(30))") for i in range(1, 71): self.__server_3.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("a"+str(i), i)) for i in range(101, 301): self.__server_3.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("b"+str(i), i)) for i in range(1001, 1201): self.__server_3.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("c"+str(i), i)) for i in range(10001, 10201): self.__server_3.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("d"+str(i), i)) for i in range(100001, 100201): self.__server_3.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("e"+str(i), i)) self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db2") self.__server_3.exec_stmt("CREATE DATABASE db2") self.__server_3.exec_stmt("CREATE TABLE db2.t2" "(userID VARCHAR(20), salary INT, " "CONSTRAINT FOREIGN KEY(userID) " "REFERENCES db1.t1(userID))") for i in range(1, 71): self.__server_3.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("a"+str(i), i)) for i in range(101, 301): self.__server_3.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("b"+str(i), i)) for i in range(1001, 1201): self.__server_3.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("c"+str(i), i)) for i in range(10001, 10201): self.__server_3.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("d"+str(i), i)) for i in range(100001, 100201): self.__server_3.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("e"+str(i), i)) self.__group_3 = Group("GROUPID3", "Third description.") Group.add( self.__group_3) self.__group_3.add_server(self.__server_3) tests.utils.configure_decoupled_master(self.__group_3, self.__server_3) self.__options_4 = { "uuid" : _uuid.UUID("{bb45b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(3), "user" : MySQLInstances().user, "passwd": MySQLInstances().passwd, } uuid_server4 = MySQLServer.discover_uuid(self.__options_4["address"]) self.__options_4["uuid"] = _uuid.UUID(uuid_server4) self.__server_4 = MySQLServer(**self.__options_4) MySQLServer.add(self.__server_4) self.__server_4.connect() self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_4.exec_stmt("CREATE DATABASE db1") self.__server_4.exec_stmt("CREATE TABLE db1.t1" "(userID VARCHAR(20)PRIMARY KEY, name VARCHAR(30))") for i in range(1, 71): self.__server_4.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("a"+str(i), i)) for i in range(101, 301): self.__server_4.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("b"+str(i), i)) for i in range(1001, 1201): self.__server_4.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("c"+str(i), i)) for i in range(10001, 10201): self.__server_4.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("d"+str(i), i)) for i in range(100001, 100201): self.__server_4.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("e"+str(i), i)) self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db2") self.__server_4.exec_stmt("CREATE DATABASE db2") self.__server_4.exec_stmt("CREATE TABLE db2.t2" "(userID VARCHAR(20), salary INT, " "CONSTRAINT FOREIGN KEY(userID) " "REFERENCES db1.t1(userID))") for i in range(1, 71): self.__server_4.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("a"+str(i), i)) for i in range(101, 301): self.__server_4.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("b"+str(i), i)) for i in range(1001, 1201): self.__server_4.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("c"+str(i), i)) for i in range(10001, 10201): self.__server_4.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("d"+str(i), i)) for i in range(100001, 100201): self.__server_4.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("e"+str(i), i)) self.__group_4 = Group("GROUPID4", "Fourth description.") Group.add( self.__group_4) self.__group_4.add_server(self.__server_4) tests.utils.configure_decoupled_master(self.__group_4, self.__server_4) self.__options_5 = { "uuid" : _uuid.UUID("{cc75b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(4), "user" : MySQLInstances().user, "passwd": MySQLInstances().passwd, } uuid_server5 = MySQLServer.discover_uuid(self.__options_5["address"]) self.__options_5["uuid"] = _uuid.UUID(uuid_server5) self.__server_5 = MySQLServer(**self.__options_5) MySQLServer.add(self.__server_5) self.__server_5.connect() self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_5.exec_stmt("CREATE DATABASE db1") self.__server_5.exec_stmt("CREATE TABLE db1.t1" "(userID VARCHAR(20)PRIMARY KEY, name VARCHAR(30))") for i in range(1, 71): self.__server_5.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("a"+str(i), i)) for i in range(101, 301): self.__server_5.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("b"+str(i), i)) for i in range(1001, 1201): self.__server_5.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("c"+str(i), i)) for i in range(10001, 10201): self.__server_5.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("d"+str(i), i)) for i in range(100001, 100201): self.__server_5.exec_stmt("INSERT INTO db1.t1 " "VALUES('%s', 'TEST %s')" % ("e"+str(i), i)) self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db2") self.__server_5.exec_stmt("CREATE DATABASE db2") self.__server_5.exec_stmt("CREATE TABLE db2.t2" "(userID VARCHAR(20), salary INT, " "CONSTRAINT FOREIGN KEY(userID) " "REFERENCES db1.t1(userID))") for i in range(1, 71): self.__server_5.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("a"+str(i), i)) for i in range(101, 301): self.__server_5.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("b"+str(i), i)) for i in range(1001, 1201): self.__server_5.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("c"+str(i), i)) for i in range(10001, 10201): self.__server_5.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("d"+str(i), i)) for i in range(100001, 100201): self.__server_5.exec_stmt("INSERT INTO db2.t2 " "VALUES('%s', %s)" % ("e"+str(i), i)) self.__group_5 = Group("GROUPID5", "Fifth description.") Group.add( self.__group_5) self.__group_5.add_server(self.__server_5) tests.utils.configure_decoupled_master(self.__group_5, self.__server_5) self.__options_6 = { "uuid" : _uuid.UUID("{cc45b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(5), "user" : MySQLInstances().user, "passwd": MySQLInstances().passwd, } uuid_server6 = MySQLServer.discover_uuid(self.__options_6["address"]) self.__options_6["uuid"] = _uuid.UUID(uuid_server6) self.__server_6 = MySQLServer(**self.__options_6) MySQLServer.add(self.__server_6) self.__server_6.connect() self.__group_6 = Group("GROUPID6", "Sixth description.") Group.add( self.__group_6) self.__group_6.add_server(self.__server_6) tests.utils.configure_decoupled_master(self.__group_6, self.__server_6) status = self.proxy.sharding.create_definition("RANGE_STRING", "GROUPID1") self.check_xmlrpc_command_result(status, returns=1) status = self.proxy.sharding.add_table(1, "db1.t1", "userID") self.check_xmlrpc_command_result(status) status = self.proxy.sharding.add_table(1, "db2.t2", "userID") self.check_xmlrpc_command_result(status) status = self.proxy.sharding.add_shard( 1, "GROUPID2/a,GROUPID3/b,GROUPID4/c,GROUPID5/d", "ENABLED" ) self.check_xmlrpc_command_result(status) status = self.proxy.sharding.prune_shard("db1.t1") self.check_xmlrpc_command_result(status) def test_split_shard_4(self): '''Test the split of shard 4 and the global server configuration after that. The test splits shard 4 between GROUPID5 and GROUPID6. After the split is done, it verifies the count on GROUPID6 to check that the group has tuples from the earlier group. Now it fires an INSERT on the global group and verifies that all the shards have got the inserted tuples. ''' row_cnt_shard_before_split_db1_t1 = self.__server_5.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch" : True} ) row_cnt_shard_before_split_db2_t2 = self.__server_5.exec_stmt( "SELECT COUNT(*) FROM db2.t2", {"fetch" : True} ) row_cnt_shard_before_split_db1_t1 = \ int(row_cnt_shard_before_split_db1_t1[0][0]) row_cnt_shard_before_split_db2_t2 = \ int(row_cnt_shard_before_split_db2_t2[0][0]) self.assertEqual(row_cnt_shard_before_split_db1_t1, 400) self.assertEqual(row_cnt_shard_before_split_db2_t2, 400) status = self.proxy.sharding.split_shard("4", "GROUPID6", "e") self.check_xmlrpc_command_result(status) self.__server_5.connect() row_cnt_shard_after_split_db1_t1_a = self.__server_5.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch" : True} ) row_cnt_shard_after_split_db1_t1_a = \ int(row_cnt_shard_after_split_db1_t1_a[0][0]) self.assertEqual(row_cnt_shard_after_split_db1_t1_a, 200) row_cnt_shard_after_split_db2_t2_a = self.__server_5.exec_stmt( "SELECT COUNT(*) FROM db2.t2", {"fetch" : True} ) row_cnt_shard_after_split_db2_t2_a = \ int(row_cnt_shard_after_split_db2_t2_a[0][0]) self.assertEqual(row_cnt_shard_after_split_db2_t2_a, 200) row_cnt_shard_after_split_db1_t1_b = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch" : True} ) row_cnt_shard_after_split_db1_t1_b = \ int(row_cnt_shard_after_split_db1_t1_b[0][0]) self.assertEqual(row_cnt_shard_after_split_db1_t1_b, 200) row_cnt_shard_after_split_db2_t2_b = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM db2.t2", {"fetch" : True} ) row_cnt_shard_after_split_db2_t2_b = \ int(row_cnt_shard_after_split_db2_t2_b[0][0]) self.assertEqual(row_cnt_shard_after_split_db2_t2_b, 200) #Enter data into the global server self.__server_1.exec_stmt("DROP DATABASE IF EXISTS global") self.__server_1.exec_stmt("CREATE DATABASE global") self.__server_1.exec_stmt("CREATE TABLE global.global_table" "(userID VARCHAR(20), name VARCHAR(30))") for i in range(1, 11): self.__server_1.exec_stmt("INSERT INTO global.global_table " "VALUES('%s', 'TEST %s')" % (i, i)) time.sleep(5) #Verify that the data is there in the first shard. global_table_count = self.__server_2.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the second shard. global_table_count = self.__server_3.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the third shard. global_table_count = self.__server_4.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the fourth shard. global_table_count = self.__server_5.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the fifth shard. global_table_count = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch" : True} ) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) def tearDown(self): """Clean up the existing environment """ tests.utils.cleanup_environment()
def setUp(self): """Configure the existing environment """ tests.utils.cleanup_environment() self.manager, self.proxy = tests.utils.setup_xmlrpc() self.__options_1 = { "uuid" : _uuid.UUID("{aa75b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(0), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd, } uuid_server1 = MySQLServer.discover_uuid(self.__options_1["address"]) self.__options_1["uuid"] = _uuid.UUID(uuid_server1) self.__server_1 = MySQLServer(**self.__options_1) MySQLServer.add(self.__server_1) self.__server_1.connect() self.__group_1 = Group("GROUPID1", "First description.") Group.add(self.__group_1) self.__group_1.add_server(self.__server_1) tests.utils.configure_decoupled_master(self.__group_1, self.__server_1) self.__options_2 = { "uuid" : _uuid.UUID("{aa45b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(1), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd, } uuid_server2 = MySQLServer.discover_uuid(self.__options_2["address"]) self.__options_2["uuid"] = _uuid.UUID(uuid_server2) self.__server_2 = MySQLServer(**self.__options_2) MySQLServer.add(self.__server_2) self.__server_2.connect() self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_2.exec_stmt("CREATE DATABASE db1") self.__server_2.exec_stmt("CREATE TABLE db1.t1" "(userID INT PRIMARY KEY, name VARCHAR(30))") for i in range(1, 1001): self.__server_2.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db2") self.__server_2.exec_stmt("CREATE DATABASE db2") self.__server_2.exec_stmt("CREATE TABLE db2.t2" "(userID INT, salary INT, " "CONSTRAINT FOREIGN KEY(userID) " "REFERENCES db1.t1(userID))") for i in range(1, 1001): self.__server_2.exec_stmt("INSERT INTO db2.t2 " "VALUES(%s, %s)" % (i, i)) self.__group_2 = Group("GROUPID2", "Second description.") Group.add(self.__group_2) self.__group_2.add_server(self.__server_2) tests.utils.configure_decoupled_master(self.__group_2, self.__server_2) self.__options_3 = { "uuid" : _uuid.UUID("{bb75b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(2), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd, } uuid_server3 = MySQLServer.discover_uuid(self.__options_3["address"]) self.__options_3["uuid"] = _uuid.UUID(uuid_server3) self.__server_3 = MySQLServer(**self.__options_3) MySQLServer.add( self.__server_3) self.__server_3.connect() self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_3.exec_stmt("CREATE DATABASE db1") self.__server_3.exec_stmt("CREATE TABLE db1.t1" "(userID INT PRIMARY KEY, name VARCHAR(30))") for i in range(1, 1001): self.__server_3.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db2") self.__server_3.exec_stmt("CREATE DATABASE db2") self.__server_3.exec_stmt("CREATE TABLE db2.t2" "(userID INT, salary INT, " "CONSTRAINT FOREIGN KEY(userID) " "REFERENCES db1.t1(userID))") for i in range(1, 1001): self.__server_3.exec_stmt("INSERT INTO db2.t2 " "VALUES(%s, %s)" % (i, i)) self.__group_3 = Group("GROUPID3", "Third description.") Group.add( self.__group_3) self.__group_3.add_server(self.__server_3) tests.utils.configure_decoupled_master(self.__group_3, self.__server_3) self.__options_4 = { "uuid" : _uuid.UUID("{bb45b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(3), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd, } uuid_server4 = MySQLServer.discover_uuid(self.__options_4["address"]) self.__options_4["uuid"] = _uuid.UUID(uuid_server4) self.__server_4 = MySQLServer(**self.__options_4) MySQLServer.add(self.__server_4) self.__server_4.connect() self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_4.exec_stmt("CREATE DATABASE db1") self.__server_4.exec_stmt("CREATE TABLE db1.t1" "(userID INT PRIMARY KEY, name VARCHAR(30))") for i in range(1, 1001): self.__server_4.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db2") self.__server_4.exec_stmt("CREATE DATABASE db2") self.__server_4.exec_stmt("CREATE TABLE db2.t2" "(userID INT, salary INT, " "CONSTRAINT FOREIGN KEY(userID) " "REFERENCES db1.t1(userID))") for i in range(1, 1001): self.__server_4.exec_stmt("INSERT INTO db2.t2 " "VALUES(%s, %s)" % (i, i)) self.__group_4 = Group("GROUPID4", "Fourth description.") Group.add( self.__group_4) self.__group_4.add_server(self.__server_4) tests.utils.configure_decoupled_master(self.__group_4, self.__server_4) self.__options_5 = { "uuid" : _uuid.UUID("{cc75b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(4), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd, } uuid_server5 = MySQLServer.discover_uuid(self.__options_5["address"]) self.__options_5["uuid"] = _uuid.UUID(uuid_server5) self.__server_5 = MySQLServer(**self.__options_5) MySQLServer.add(self.__server_5) self.__server_5.connect() self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_5.exec_stmt("CREATE DATABASE db1") self.__server_5.exec_stmt("CREATE TABLE db1.t1" "(userID INT PRIMARY KEY, name VARCHAR(30))") for i in range(1, 1001): self.__server_5.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db2") self.__server_5.exec_stmt("CREATE DATABASE db2") self.__server_5.exec_stmt("CREATE TABLE db2.t2" "(userID INT, salary INT, " "CONSTRAINT FOREIGN KEY(userID) " "REFERENCES db1.t1(userID))") for i in range(1, 1001): self.__server_5.exec_stmt("INSERT INTO db2.t2 " "VALUES(%s, %s)" % (i, i)) self.__group_5 = Group("GROUPID5", "Fifth description.") Group.add( self.__group_5) self.__group_5.add_server(self.__server_5) tests.utils.configure_decoupled_master(self.__group_5, self.__server_5) self.__options_6 = { "uuid" : _uuid.UUID("{cc45b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(5), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd, } uuid_server6 = MySQLServer.discover_uuid(self.__options_6["address"]) self.__options_6["uuid"] = _uuid.UUID(uuid_server6) self.__server_6 = MySQLServer(**self.__options_6) MySQLServer.add(self.__server_6) self.__group_6 = Group("GROUPID6", "Sixth description.") Group.add( self.__group_6) self.__group_6.add_server(self.__server_6) tests.utils.configure_decoupled_master(self.__group_6, self.__server_6) status = self.proxy.sharding.create_definition("HASH", "GROUPID1") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_define_shard_mapping).") self.assertEqual(status[2], 1) status = self.proxy.sharding.add_table(1, "db1.t1", "userID") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_add_shard_mapping).") status = self.proxy.sharding.add_table(1, "db2.t2", "userID") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_add_shard_mapping).") status = self.proxy.sharding.add_shard( 1, "GROUPID2,GROUPID3,GROUPID4,GROUPID5", "ENABLED" ) self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_add_shard).") status = self.proxy.sharding.prune_shard("db1.t1") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_prune_shard_tables).")
class TestHashSplitGlobal(tests.utils.TestCase): """Contains unit tests for testing the shard split operation and for verifying that the global server configuration remains constant after the shard split configuration. """ def setUp(self): """Configure the existing environment """ tests.utils.cleanup_environment() self.manager, self.proxy = tests.utils.setup_xmlrpc() self.__options_1 = { "uuid": _uuid.UUID("{aa75b12b-98d1-414c-96af-9e9d4b179678}"), "address": MySQLInstances().get_address(0), "user": MySQLInstances().user, "passwd": MySQLInstances().passwd, } uuid_server1 = MySQLServer.discover_uuid(self.__options_1["address"]) self.__options_1["uuid"] = _uuid.UUID(uuid_server1) self.__server_1 = MySQLServer(**self.__options_1) MySQLServer.add(self.__server_1) self.__server_1.connect() self.__group_1 = Group("GROUPID1", "First description.") Group.add(self.__group_1) self.__group_1.add_server(self.__server_1) tests.utils.configure_decoupled_master(self.__group_1, self.__server_1) self.__options_2 = { "uuid": _uuid.UUID("{aa45b12b-98d1-414c-96af-9e9d4b179678}"), "address": MySQLInstances().get_address(1), "user": MySQLInstances().user, "passwd": MySQLInstances().passwd, } uuid_server2 = MySQLServer.discover_uuid(self.__options_2["address"]) self.__options_2["uuid"] = _uuid.UUID(uuid_server2) self.__server_2 = MySQLServer(**self.__options_2) MySQLServer.add(self.__server_2) self.__server_2.connect() self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_2.exec_stmt("CREATE DATABASE db1") self.__server_2.exec_stmt("CREATE TABLE db1.t1" "(userID INT PRIMARY KEY, name VARCHAR(30))") for i in range(1, 1001): self.__server_2.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db2") self.__server_2.exec_stmt("CREATE DATABASE db2") self.__server_2.exec_stmt("CREATE TABLE db2.t2" "(userID INT, salary INT, " "CONSTRAINT FOREIGN KEY(userID) " "REFERENCES db1.t1(userID))") for i in range(1, 1001): self.__server_2.exec_stmt("INSERT INTO db2.t2 " "VALUES(%s, %s)" % (i, i)) self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db3") self.__server_2.exec_stmt("CREATE DATABASE db3") self.__server_2.exec_stmt("CREATE TABLE db3.t3" "(userID INT, Department INT, " "CONSTRAINT FOREIGN KEY(userID) " "REFERENCES db1.t1(userID))") for i in range(1, 1001): self.__server_2.exec_stmt("INSERT INTO db3.t3 " "VALUES(%s, %s)" % (i, i)) self.__group_2 = Group("GROUPID2", "Second description.") Group.add(self.__group_2) self.__group_2.add_server(self.__server_2) tests.utils.configure_decoupled_master(self.__group_2, self.__server_2) self.__options_3 = { "uuid": _uuid.UUID("{bb75b12b-98d1-414c-96af-9e9d4b179678}"), "address": MySQLInstances().get_address(2), "user": MySQLInstances().user, "passwd": MySQLInstances().passwd, } uuid_server3 = MySQLServer.discover_uuid(self.__options_3["address"]) self.__options_3["uuid"] = _uuid.UUID(uuid_server3) self.__server_3 = MySQLServer(**self.__options_3) MySQLServer.add(self.__server_3) self.__server_3.connect() self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_3.exec_stmt("CREATE DATABASE db1") self.__server_3.exec_stmt("CREATE TABLE db1.t1" "(userID INT PRIMARY KEY, name VARCHAR(30))") for i in range(1, 1001): self.__server_3.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__server_3.exec_stmt("CREATE DATABASE db2") self.__server_3.exec_stmt("CREATE TABLE db2.t2" "(userID INT, salary INT, " "CONSTRAINT FOREIGN KEY(userID) " "REFERENCES db1.t1(userID))") for i in range(1, 1001): self.__server_3.exec_stmt("INSERT INTO db2.t2 " "VALUES(%s, %s)" % (i, i)) self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db3") self.__server_3.exec_stmt("CREATE DATABASE db3") self.__server_3.exec_stmt("CREATE TABLE db3.t3" "(userID INT, Department INT, " "CONSTRAINT FOREIGN KEY(userID) " "REFERENCES db1.t1(userID))") for i in range(1, 1001): self.__server_3.exec_stmt("INSERT INTO db3.t3 " "VALUES(%s, %s)" % (i, i)) self.__group_3 = Group("GROUPID3", "Third description.") Group.add(self.__group_3) self.__group_3.add_server(self.__server_3) tests.utils.configure_decoupled_master(self.__group_3, self.__server_3) self.__options_4 = { "uuid": _uuid.UUID("{bb45b12b-98d1-414c-96af-9e9d4b179678}"), "address": MySQLInstances().get_address(3), "user": MySQLInstances().user, "passwd": MySQLInstances().passwd, } uuid_server4 = MySQLServer.discover_uuid(self.__options_4["address"]) self.__options_4["uuid"] = _uuid.UUID(uuid_server4) self.__server_4 = MySQLServer(**self.__options_4) MySQLServer.add(self.__server_4) self.__server_4.connect() self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_4.exec_stmt("CREATE DATABASE db1") self.__server_4.exec_stmt("CREATE TABLE db1.t1" "(userID INT PRIMARY KEY, name VARCHAR(30))") for i in range(1, 1001): self.__server_4.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__server_4.exec_stmt("CREATE DATABASE db2") self.__server_4.exec_stmt("CREATE TABLE db2.t2" "(userID INT, salary INT, " "CONSTRAINT FOREIGN KEY(userID) " "REFERENCES db1.t1(userID))") for i in range(1, 1001): self.__server_4.exec_stmt("INSERT INTO db2.t2 " "VALUES(%s, %s)" % (i, i)) self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db3") self.__server_4.exec_stmt("CREATE DATABASE db3") self.__server_4.exec_stmt("CREATE TABLE db3.t3" "(userID INT, Department INT, " "CONSTRAINT FOREIGN KEY(userID) " "REFERENCES db1.t1(userID))") for i in range(1, 1001): self.__server_4.exec_stmt("INSERT INTO db3.t3 " "VALUES(%s, %s)" % (i, i)) self.__group_4 = Group("GROUPID4", "Fourth description.") Group.add(self.__group_4) self.__group_4.add_server(self.__server_4) tests.utils.configure_decoupled_master(self.__group_4, self.__server_4) self.__options_5 = { "uuid": _uuid.UUID("{cc75b12b-98d1-414c-96af-9e9d4b179678}"), "address": MySQLInstances().get_address(4), "user": MySQLInstances().user, "passwd": MySQLInstances().passwd, } uuid_server5 = MySQLServer.discover_uuid(self.__options_5["address"]) self.__options_5["uuid"] = _uuid.UUID(uuid_server5) self.__server_5 = MySQLServer(**self.__options_5) MySQLServer.add(self.__server_5) self.__server_5.connect() self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_5.exec_stmt("CREATE DATABASE db1") self.__server_5.exec_stmt("CREATE TABLE db1.t1" "(userID INT PRIMARY KEY, name VARCHAR(30))") for i in range(1, 1001): self.__server_5.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__server_5.exec_stmt("CREATE DATABASE db2") self.__server_5.exec_stmt("CREATE TABLE db2.t2" "(userID INT, salary INT, " "CONSTRAINT FOREIGN KEY(userID) " "REFERENCES db1.t1(userID))") for i in range(1, 1001): self.__server_5.exec_stmt("INSERT INTO db2.t2 " "VALUES(%s, %s)" % (i, i)) self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db3") self.__server_5.exec_stmt("CREATE DATABASE db3") self.__server_5.exec_stmt("CREATE TABLE db3.t3" "(userID INT, Department INT, " "CONSTRAINT FOREIGN KEY(userID) " "REFERENCES db1.t1(userID))") for i in range(1, 1001): self.__server_5.exec_stmt("INSERT INTO db3.t3 " "VALUES(%s, %s)" % (i, i)) self.__group_5 = Group("GROUPID5", "Fifth description.") Group.add(self.__group_5) self.__group_5.add_server(self.__server_5) tests.utils.configure_decoupled_master(self.__group_5, self.__server_5) self.__options_6 = { "uuid": _uuid.UUID("{cc45b12b-98d1-414c-96af-9e9d4b179678}"), "address": MySQLInstances().get_address(5), "user": MySQLInstances().user, "passwd": MySQLInstances().passwd, } uuid_server6 = MySQLServer.discover_uuid(self.__options_6["address"]) self.__options_6["uuid"] = _uuid.UUID(uuid_server6) self.__server_6 = MySQLServer(**self.__options_6) MySQLServer.add(self.__server_6) self.__group_6 = Group("GROUPID6", "Sixth description.") Group.add(self.__group_6) self.__group_6.add_server(self.__server_6) tests.utils.configure_decoupled_master(self.__group_6, self.__server_6) status = self.proxy.sharding.create_definition("HASH", "GROUPID1") self.check_xmlrpc_command_result(status, returns=1) status = self.proxy.sharding.add_table(1, "db1.t1", "userID") self.check_xmlrpc_command_result(status) status = self.proxy.sharding.add_table(1, "db2.t2", "userID") self.check_xmlrpc_command_result(status) status = self.proxy.sharding.add_table(1, "db3.t3", "userID") self.check_xmlrpc_command_result(status) status = self.proxy.sharding.add_shard( 1, "GROUPID2,GROUPID3,GROUPID4,GROUPID5", "ENABLED") self.check_xmlrpc_command_result(status) status = self.proxy.sharding.prune_shard("db1.t1") self.check_xmlrpc_command_result(status) status = self.proxy.sharding.prune_shard("db2.t2") self.check_xmlrpc_command_result(status) status = self.proxy.sharding.prune_shard("db3.t3") self.check_xmlrpc_command_result(status) def test_split_shard_1(self): '''Test the split of shard 1 and the global server configuration after that. The test splits shard 1 between GROUPID2 and GROUPID6. After the split is done, it verifies the count on GROUPID6 to check that the group has tuples from the earlier group. Now it fires an INSERT on the global group and verifies that all the shards have got the inserted tuples. ''' row_cnt_shard_db1_t1 = self.__server_2.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch": True}) row_cnt_shard_db2_t2 = self.__server_2.exec_stmt( "SELECT COUNT(*) FROM db2.t2", {"fetch": True}) row_cnt_shard_db3_t3 = self.__server_2.exec_stmt( "SELECT COUNT(*) FROM db3.t3", {"fetch": True}) row_cnt_shard_db1_t1 = int(row_cnt_shard_db1_t1[0][0]) row_cnt_shard_db2_t2 = int(row_cnt_shard_db2_t2[0][0]) row_cnt_shard_db3_t3 = int(row_cnt_shard_db3_t3[0][0]) status = self.proxy.sharding.split_shard("1", "GROUPID6") self.check_xmlrpc_command_result(status) self.__server_6.connect() self.__server_2.connect() row_cnt_shard_after_split_1_db1_t1 = self.__server_2.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch": True}) row_cnt_shard_after_split_1_db2_t2 = self.__server_2.exec_stmt( "SELECT COUNT(*) FROM db2.t2", {"fetch": True}) row_cnt_shard_after_split_1_db3_t3 = self.__server_2.exec_stmt( "SELECT COUNT(*) FROM db3.t3", {"fetch": True}) row_cnt_shard_after_split_1_db1_t1_a = \ int(row_cnt_shard_after_split_1_db1_t1[0][0]) row_cnt_shard_after_split_1_db2_t2_a = \ int(row_cnt_shard_after_split_1_db2_t2[0][0]) row_cnt_shard_after_split_1_db3_t3_a = \ int(row_cnt_shard_after_split_1_db3_t3[0][0]) row_cnt_shard_after_split_1_db1_t1_b = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch": True}) row_cnt_shard_after_split_1_db2_t2_b = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM db2.t2", {"fetch": True}) row_cnt_shard_after_split_1_db3_t3_b = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM db3.t3", {"fetch": True}) row_cnt_shard_after_split_1_db1_t1_b = \ int(row_cnt_shard_after_split_1_db1_t1_b[0][0]) row_cnt_shard_after_split_1_db2_t2_b = \ int(row_cnt_shard_after_split_1_db2_t2_b[0][0]) row_cnt_shard_after_split_1_db3_t3_b = \ int(row_cnt_shard_after_split_1_db3_t3_b[0][0]) self.assertTrue( row_cnt_shard_db1_t1 == (row_cnt_shard_after_split_1_db1_t1_a + row_cnt_shard_after_split_1_db1_t1_b)) self.assertTrue( row_cnt_shard_db2_t2 == (row_cnt_shard_after_split_1_db2_t2_a + row_cnt_shard_after_split_1_db2_t2_b)) self.assertTrue( row_cnt_shard_db3_t3 == (row_cnt_shard_after_split_1_db3_t3_a + row_cnt_shard_after_split_1_db3_t3_b)) #Enter data into the global server self.__server_1.exec_stmt("DROP DATABASE IF EXISTS global") self.__server_1.exec_stmt("CREATE DATABASE global") self.__server_1.exec_stmt("CREATE TABLE global.global_table" "(userID INT, name VARCHAR(30))") for i in range(1, 11): self.__server_1.exec_stmt("INSERT INTO global.global_table " "VALUES(%s, 'TEST %s')" % (i, i)) time.sleep(5) #Verify that the data is there in the first shard. global_table_count = self.__server_2.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch": True}) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the second shard. global_table_count = self.__server_3.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch": True}) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the third shard. global_table_count = self.__server_4.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch": True}) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the fourth shard. global_table_count = self.__server_5.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch": True}) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the fifth shard. global_table_count = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch": True}) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) def test_split_shard_2(self): '''Test the split of shard 2 and the global server configuration after that. The test splits shard 2 between GROUPID3 and GROUPID6. After the split is done, it verifies the count on GROUPID6 to check that the group has tuples from the earlier group. Now it fires an INSERT on the global group and verifies that all the shards have got the inserted tuples. ''' row_cnt_shard_db1_t1 = self.__server_3.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch": True}) row_cnt_shard_db2_t2 = self.__server_3.exec_stmt( "SELECT COUNT(*) FROM db2.t2", {"fetch": True}) row_cnt_shard_db3_t3 = self.__server_3.exec_stmt( "SELECT COUNT(*) FROM db3.t3", {"fetch": True}) row_cnt_shard_db1_t1 = int(row_cnt_shard_db1_t1[0][0]) row_cnt_shard_db2_t2 = int(row_cnt_shard_db2_t2[0][0]) row_cnt_shard_db3_t3 = int(row_cnt_shard_db3_t3[0][0]) status = self.proxy.sharding.split_shard("2", "GROUPID6") self.check_xmlrpc_command_result(status) self.__server_6.connect() self.__server_3.connect() row_cnt_shard_after_split_1_db1_t1 = self.__server_3.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch": True}) row_cnt_shard_after_split_1_db2_t2 = self.__server_3.exec_stmt( "SELECT COUNT(*) FROM db2.t2", {"fetch": True}) row_cnt_shard_after_split_1_db3_t3 = self.__server_3.exec_stmt( "SELECT COUNT(*) FROM db3.t3", {"fetch": True}) row_cnt_shard_after_split_1_db1_t1_a = \ int(row_cnt_shard_after_split_1_db1_t1[0][0]) row_cnt_shard_after_split_1_db2_t2_a = \ int(row_cnt_shard_after_split_1_db2_t2[0][0]) row_cnt_shard_after_split_1_db3_t3_a = \ int(row_cnt_shard_after_split_1_db3_t3[0][0]) row_cnt_shard_after_split_1_db1_t1_b = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch": True}) row_cnt_shard_after_split_1_db2_t2_b = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM db2.t2", {"fetch": True}) row_cnt_shard_after_split_1_db3_t3_b = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM db3.t3", {"fetch": True}) row_cnt_shard_after_split_1_db1_t1_b = \ int(row_cnt_shard_after_split_1_db1_t1_b[0][0]) row_cnt_shard_after_split_1_db2_t2_b = \ int(row_cnt_shard_after_split_1_db2_t2_b[0][0]) row_cnt_shard_after_split_1_db3_t3_b = \ int(row_cnt_shard_after_split_1_db3_t3_b[0][0]) self.assertTrue( row_cnt_shard_db1_t1 == (row_cnt_shard_after_split_1_db1_t1_a + row_cnt_shard_after_split_1_db1_t1_b)) self.assertTrue( row_cnt_shard_db2_t2 == (row_cnt_shard_after_split_1_db2_t2_a + row_cnt_shard_after_split_1_db2_t2_b)) self.assertTrue( row_cnt_shard_db3_t3 == (row_cnt_shard_after_split_1_db3_t3_a + row_cnt_shard_after_split_1_db3_t3_b)) #Enter data into the global server self.__server_1.exec_stmt("DROP DATABASE IF EXISTS global") self.__server_1.exec_stmt("CREATE DATABASE global") self.__server_1.exec_stmt("CREATE TABLE global.global_table" "(userID INT, name VARCHAR(30))") for i in range(1, 11): self.__server_1.exec_stmt("INSERT INTO global.global_table " "VALUES(%s, 'TEST %s')" % (i, i)) time.sleep(5) #Verify that the data is there in the first shard. global_table_count = self.__server_2.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch": True}) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the second shard. global_table_count = self.__server_3.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch": True}) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the third shard. global_table_count = self.__server_4.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch": True}) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the fourth shard. global_table_count = self.__server_5.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch": True}) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the fifth shard. global_table_count = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch": True}) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) def test_split_shard_3(self): '''Test the split of shard 3 and the global server configuration after that. The test splits shard 3 between GROUPID4 and GROUPID6. After the split is done, it verifies the count on GROUPID6 to check that the group has tuples from the earlier group. Now it fires an INSERT on the global group and verifies that all the shards have got the inserted tuples. ''' row_cnt_shard_db1_t1 = self.__server_4.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch": True}) row_cnt_shard_db2_t2 = self.__server_4.exec_stmt( "SELECT COUNT(*) FROM db2.t2", {"fetch": True}) row_cnt_shard_db3_t3 = self.__server_4.exec_stmt( "SELECT COUNT(*) FROM db3.t3", {"fetch": True}) row_cnt_shard_db1_t1 = int(row_cnt_shard_db1_t1[0][0]) row_cnt_shard_db2_t2 = int(row_cnt_shard_db2_t2[0][0]) row_cnt_shard_db3_t3 = int(row_cnt_shard_db3_t3[0][0]) status = self.proxy.sharding.split_shard("3", "GROUPID6") self.check_xmlrpc_command_result(status) self.__server_6.connect() self.__server_4.connect() row_cnt_shard_after_split_1_db1_t1 = self.__server_4.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch": True}) row_cnt_shard_after_split_1_db2_t2 = self.__server_4.exec_stmt( "SELECT COUNT(*) FROM db2.t2", {"fetch": True}) row_cnt_shard_after_split_1_db3_t3 = self.__server_4.exec_stmt( "SELECT COUNT(*) FROM db3.t3", {"fetch": True}) row_cnt_shard_after_split_1_db1_t1_a = \ int(row_cnt_shard_after_split_1_db1_t1[0][0]) row_cnt_shard_after_split_1_db2_t2_a = \ int(row_cnt_shard_after_split_1_db2_t2[0][0]) row_cnt_shard_after_split_1_db3_t3_a = \ int(row_cnt_shard_after_split_1_db3_t3[0][0]) row_cnt_shard_after_split_1_db1_t1_b = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch": True}) row_cnt_shard_after_split_1_db2_t2_b = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM db2.t2", {"fetch": True}) row_cnt_shard_after_split_1_db3_t3_b = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM db3.t3", {"fetch": True}) row_cnt_shard_after_split_1_db1_t1_b = \ int(row_cnt_shard_after_split_1_db1_t1_b[0][0]) row_cnt_shard_after_split_1_db2_t2_b = \ int(row_cnt_shard_after_split_1_db2_t2_b[0][0]) row_cnt_shard_after_split_1_db3_t3_b = \ int(row_cnt_shard_after_split_1_db3_t3_b[0][0]) self.assertTrue( row_cnt_shard_db1_t1 == (row_cnt_shard_after_split_1_db1_t1_a + row_cnt_shard_after_split_1_db1_t1_b)) self.assertTrue( row_cnt_shard_db2_t2 == (row_cnt_shard_after_split_1_db2_t2_a + row_cnt_shard_after_split_1_db2_t2_b)) self.assertTrue( row_cnt_shard_db3_t3 == (row_cnt_shard_after_split_1_db3_t3_a + row_cnt_shard_after_split_1_db3_t3_b)) #Enter data into the global server self.__server_1.exec_stmt("DROP DATABASE IF EXISTS global") self.__server_1.exec_stmt("CREATE DATABASE global") self.__server_1.exec_stmt("CREATE TABLE global.global_table" "(userID INT, name VARCHAR(30))") for i in range(1, 11): self.__server_1.exec_stmt("INSERT INTO global.global_table " "VALUES(%s, 'TEST %s')" % (i, i)) time.sleep(5) #Verify that the data is there in the first shard. global_table_count = self.__server_2.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch": True}) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the second shard. global_table_count = self.__server_3.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch": True}) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the third shard. global_table_count = self.__server_4.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch": True}) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the fourth shard. global_table_count = self.__server_5.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch": True}) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the fifth shard. global_table_count = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch": True}) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) def test_split_shard_4(self): '''Test the split of shard 4 and the global server configuration after that. The test splits shard 4 between GROUPID5 and GROUPID6. After the split is done, it verifies the count on GROUPID6 to check that the group has tuples from the earlier group. Now it fires an INSERT on the global group and verifies that all the shards have got the inserted tuples. ''' row_cnt_shard_db1_t1 = self.__server_5.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch": True}) row_cnt_shard_db2_t2 = self.__server_5.exec_stmt( "SELECT COUNT(*) FROM db2.t2", {"fetch": True}) row_cnt_shard_db3_t3 = self.__server_5.exec_stmt( "SELECT COUNT(*) FROM db3.t3", {"fetch": True}) row_cnt_shard_db1_t1 = int(row_cnt_shard_db1_t1[0][0]) row_cnt_shard_db2_t2 = int(row_cnt_shard_db2_t2[0][0]) row_cnt_shard_db3_t3 = int(row_cnt_shard_db3_t3[0][0]) status = self.proxy.sharding.split_shard("4", "GROUPID6") self.check_xmlrpc_command_result(status) self.__server_6.connect() self.__server_5.connect() row_cnt_shard_after_split_1_db1_t1 = self.__server_5.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch": True}) row_cnt_shard_after_split_1_db2_t2 = self.__server_5.exec_stmt( "SELECT COUNT(*) FROM db2.t2", {"fetch": True}) row_cnt_shard_after_split_1_db3_t3 = self.__server_5.exec_stmt( "SELECT COUNT(*) FROM db3.t3", {"fetch": True}) row_cnt_shard_after_split_1_db1_t1_a = \ int(row_cnt_shard_after_split_1_db1_t1[0][0]) row_cnt_shard_after_split_1_db2_t2_a = \ int(row_cnt_shard_after_split_1_db2_t2[0][0]) row_cnt_shard_after_split_1_db3_t3_a = \ int(row_cnt_shard_after_split_1_db3_t3[0][0]) row_cnt_shard_after_split_1_db1_t1_b = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch": True}) row_cnt_shard_after_split_1_db2_t2_b = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM db2.t2", {"fetch": True}) row_cnt_shard_after_split_1_db3_t3_b = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM db3.t3", {"fetch": True}) row_cnt_shard_after_split_1_db1_t1_b = \ int(row_cnt_shard_after_split_1_db1_t1_b[0][0]) row_cnt_shard_after_split_1_db2_t2_b = \ int(row_cnt_shard_after_split_1_db2_t2_b[0][0]) row_cnt_shard_after_split_1_db3_t3_b = \ int(row_cnt_shard_after_split_1_db3_t3_b[0][0]) self.assertTrue( row_cnt_shard_db1_t1 == (row_cnt_shard_after_split_1_db1_t1_a + row_cnt_shard_after_split_1_db1_t1_b)) self.assertTrue( row_cnt_shard_db2_t2 == (row_cnt_shard_after_split_1_db2_t2_a + row_cnt_shard_after_split_1_db2_t2_b)) self.assertTrue( row_cnt_shard_db3_t3 == (row_cnt_shard_after_split_1_db3_t3_a + row_cnt_shard_after_split_1_db3_t3_b)) #Enter data into the global server self.__server_1.exec_stmt("DROP DATABASE IF EXISTS global") self.__server_1.exec_stmt("CREATE DATABASE global") self.__server_1.exec_stmt("CREATE TABLE global.global_table" "(userID INT, name VARCHAR(30))") for i in range(1, 11): self.__server_1.exec_stmt("INSERT INTO global.global_table " "VALUES(%s, 'TEST %s')" % (i, i)) time.sleep(5) #Verify that the data is there in the first shard. global_table_count = self.__server_2.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch": True}) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the second shard. global_table_count = self.__server_3.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch": True}) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the third shard. global_table_count = self.__server_4.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch": True}) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the fourth shard. global_table_count = self.__server_5.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch": True}) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) #Verify that the data is there in the fifth shard. global_table_count = self.__server_6.exec_stmt( "SELECT COUNT(*) FROM global.global_table", {"fetch": True}) global_table_count = int(global_table_count[0][0]) self.assertTrue(global_table_count == 10) def test_hash_dump(self): """Test the dump of the HASH sharding information for the table. """ shardinginformation_1 = tests.utils.make_info_result( [[ 'db1', 't1', 'userID', 'E2996A7B8509367020B55A4ACD2AE46A', '1', 'HASH', 'GROUPID2', 'GROUPID1' ], [ 'db1', 't1', 'userID', 'E88F547DF45C99F27646C76316FB21DF', '2', 'HASH', 'GROUPID3', 'GROUPID1' ], [ 'db1', 't1', 'userID', '7A2E76448FF04233F3851A492BEF1090', '3', 'HASH', 'GROUPID4', 'GROUPID1' ], [ 'db1', 't1', 'userID', '97427AA63E300F56536710F5D73A35FA', '4', 'HASH', 'GROUPID5', 'GROUPID1' ]]) packet = self.proxy.dump.sharding_information(0, "db1.t1") self.check_xmlrpc_result(packet, shardinginformation_1) def tearDown(self): """Clean up the existing environment """ tests.utils.cleanup_environment()