def test_prune_lookup_shard2(self): '''Verify that after the prune the lookup of any pruned value in the shard results in looking up the same shard. ''' status = self.proxy.sharding.prune_shard("db2.t2") self.check_xmlrpc_command_result(status) rows = self.__server_2.exec_stmt("SELECT userID2 FROM db2.t2", {"fetch" : True}) for val in rows[0:len(rows)][0]: hash_sharding_spec_1 = HashShardingSpecification.lookup(val, 1, "HASH") self.assertEqual( hash_sharding_spec_1.shard_id, 1 ) rows = self.__server_3.exec_stmt( "SELECT userID2 FROM db2.t2", {"fetch" : True}) for val in rows[0:len(rows)][0]: hash_sharding_spec_2 = HashShardingSpecification.lookup(val, 1, "HASH") self.assertEqual( hash_sharding_spec_2.shard_id, 2 ) rows = self.__server_4.exec_stmt( "SELECT userID2 FROM db2.t2", {"fetch" : True}) for val in rows[0:len(rows)][0]: hash_sharding_spec_3 = HashShardingSpecification.lookup(val, 1, "HASH") self.assertEqual( hash_sharding_spec_3.shard_id, 3 ) rows = self.__server_5.exec_stmt( "SELECT userID2 FROM db2.t2", {"fetch" : True}) for val in rows[0:len(rows)][0]: hash_sharding_spec_4 = HashShardingSpecification.lookup(val, 1, "HASH") self.assertEqual( hash_sharding_spec_4.shard_id, 4 ) rows = self.__server_6.exec_stmt( "SELECT userID2 FROM db2.t2", {"fetch" : True}) for val in rows[0:len(rows)][0]: hash_sharding_spec_5 = HashShardingSpecification.lookup(val, 1, "HASH") self.assertEqual( hash_sharding_spec_5.shard_id, 5 )
def test_prune_lookup_shard2(self): '''Verify that after the prune the lookup of any pruned value in the shard results in looking up the same shard. ''' self.proxy.sharding.prune_shard("db2.t2") rows = self.__server_2.exec_stmt("SELECT userID2 FROM db2.t2", {"fetch" : True}) for val in rows[0:len(rows)][0]: hash_sharding_spec_1 = HashShardingSpecification.lookup(val, 1, "HASH") self.assertEqual( hash_sharding_spec_1.shard_id, 1 ) rows = self.__server_3.exec_stmt( "SELECT userID2 FROM db2.t2", {"fetch" : True}) for val in rows[0:len(rows)][0]: hash_sharding_spec_2 = HashShardingSpecification.lookup(val, 1, "HASH") self.assertEqual( hash_sharding_spec_2.shard_id, 2 ) rows = self.__server_4.exec_stmt( "SELECT userID2 FROM db2.t2", {"fetch" : True}) for val in rows[0:len(rows)][0]: hash_sharding_spec_3 = HashShardingSpecification.lookup(val, 1, "HASH") self.assertEqual( hash_sharding_spec_3.shard_id, 3 ) rows = self.__server_5.exec_stmt( "SELECT userID2 FROM db2.t2", {"fetch" : True}) for val in rows[0:len(rows)][0]: hash_sharding_spec_4 = HashShardingSpecification.lookup(val, 1, "HASH") self.assertEqual( hash_sharding_spec_4.shard_id, 4 ) rows = self.__server_6.exec_stmt( "SELECT userID2 FROM db2.t2", {"fetch" : True}) for val in rows[0:len(rows)][0]: hash_sharding_spec_5 = HashShardingSpecification.lookup(val, 1, "HASH") self.assertEqual( hash_sharding_spec_5.shard_id, 5 )
def test_hash_lookup(self): """Test the hash sharding lookup. """ shard_1_cnt = 0 shard_2_cnt = 0 shard_3_cnt = 0 shard_4_cnt = 0 shard_5_cnt = 0 #Lookup a range of keys to ensure that all the shards are #utilized. for i in range(0, 1000): hash_sharding_spec_1 = HashShardingSpecification.lookup( i, self.__shard_mapping_id_1, "HASH") if self.__shard_1.shard_id == hash_sharding_spec_1.shard_id: shard_1_cnt = shard_1_cnt + 1 elif self.__shard_2.shard_id == hash_sharding_spec_1.shard_id: shard_2_cnt = shard_2_cnt + 1 elif self.__shard_3.shard_id == hash_sharding_spec_1.shard_id: shard_3_cnt = shard_3_cnt + 1 elif self.__shard_4.shard_id == hash_sharding_spec_1.shard_id: shard_4_cnt = shard_4_cnt + 1 elif self.__shard_5.shard_id == hash_sharding_spec_1.shard_id: shard_5_cnt = shard_5_cnt + 1 #The following will ensure that both the hash shards are utilized #to store the keys and the values are not skewed in one shard. self.assertTrue(shard_1_cnt > 0) self.assertTrue(shard_2_cnt > 0) self.assertTrue(shard_3_cnt > 0) self.assertTrue(shard_4_cnt > 0) self.assertTrue(shard_5_cnt > 0)
def test_hash_lookup(self): """Test the hash sharding lookup. """ shard_1_cnt = 0 shard_2_cnt = 0 shard_3_cnt = 0 shard_4_cnt = 0 shard_5_cnt = 0 #Lookup a range of keys to ensure that all the shards are #utilized. for i in range(0, 1000): hash_sharding_spec_1 = HashShardingSpecification.lookup( i, self.__shard_mapping_id_1, "HASH" ) if self.__shard_1.shard_id == hash_sharding_spec_1.shard_id: shard_1_cnt = shard_1_cnt + 1 elif self.__shard_2.shard_id == hash_sharding_spec_1.shard_id: shard_2_cnt = shard_2_cnt + 1 elif self.__shard_3.shard_id == hash_sharding_spec_1.shard_id: shard_3_cnt = shard_3_cnt + 1 elif self.__shard_4.shard_id == hash_sharding_spec_1.shard_id: shard_4_cnt = shard_4_cnt + 1 elif self.__shard_5.shard_id == hash_sharding_spec_1.shard_id: shard_5_cnt = shard_5_cnt + 1 #The following will ensure that both the hash shards are utilized #to store the keys and the values are not skewed in one shard. self.assertTrue(shard_1_cnt > 0) self.assertTrue(shard_2_cnt > 0) self.assertTrue(shard_3_cnt > 0) self.assertTrue(shard_4_cnt > 0) self.assertTrue(shard_5_cnt > 0)
def verify_and_fetch_shard(shard_id): """Find out if the shard_id exists and return the sharding specification for it. If it does not exist throw an exception. :param shard_id: The ID for the shard whose specification needs to be fetched. :return: The sharding specification class representing the shard ID. :raises: ShardingError if the shard ID is not found. """ #Here the underlying sharding specification might be a RANGE #or a HASH. The type of sharding specification is obtained from the #shard mapping. range_sharding_spec = RangeShardingSpecification.fetch(shard_id) if range_sharding_spec is None: raise _errors.ShardingError(SHARD_NOT_FOUND % (shard_id, )) #Fetch the shard mappings and use them to find the type of sharding #scheme. shard_mappings = ShardMapping.fetch_by_id( range_sharding_spec.shard_mapping_id ) if shard_mappings is None: raise _errors.ShardingError( SHARD_MAPPING_NOT_FOUND % ( range_sharding_spec.shard_mapping_id, ) ) #Fetch the shard mapping definition. There is only one shard mapping #definition associated with all of the shard mappings. shard_mapping_defn = ShardMapping.fetch_shard_mapping_defn( range_sharding_spec.shard_mapping_id ) if shard_mapping_defn is None: raise _errors.ShardingError( SHARD_MAPPING_DEFN_NOT_FOUND % ( range_sharding_spec.shard_mapping_id, ) ) shard = Shards.fetch(shard_id) if shard is None: raise _errors.ShardingError(SHARD_NOT_FOUND % (shard_id, )) #Both of the shard_mappings retrieved will be of the same sharding #type. Hence it is safe to use one of them to retireve the sharding type. if shard_mappings[0].type_name == "HASH": return HashShardingSpecification.fetch(shard_id), \ shard, shard_mappings, shard_mapping_defn else: return range_sharding_spec, shard, shard_mappings, shard_mapping_defn
def test_hash_remove(self): """Test the removal of hash shards. """ hash_sharding_specification_1 = HashShardingSpecification.fetch(1) hash_sharding_specification_2 = HashShardingSpecification.fetch(2) hash_sharding_specification_3 = HashShardingSpecification.fetch(3) hash_sharding_specification_4 = HashShardingSpecification.fetch(4) hash_sharding_specification_5 = HashShardingSpecification.fetch(5) hash_sharding_specification_1.remove() hash_sharding_specification_2.remove() hash_sharding_specification_3.remove() hash_sharding_specification_4.remove() hash_sharding_specification_5.remove() self.__shard_1.remove() self.__shard_2.remove() self.__shard_3.remove() self.__shard_4.remove() self.__shard_5.remove() for i in range(0, 10): hash_sharding_spec = HashShardingSpecification.lookup( i, self.__shard_mapping_id_1, "HASH" ) self.assertEqual(hash_sharding_spec, None)
def test_fetch_sharding_scheme(self): """Test the fetch method of the HASH sharding scheme. """ hash_sharding_specification_1 = HashShardingSpecification.fetch(1) hash_sharding_specification_2 = HashShardingSpecification.fetch(2) hash_sharding_specification_3 = HashShardingSpecification.fetch(3) hash_sharding_specification_4 = HashShardingSpecification.fetch(4) hash_sharding_specification_5 = HashShardingSpecification.fetch(5) hash_sharding_specifications = HashShardingSpecification.list(1) #list does not return the hashing specifications in order of shard_id, #hence a direct comparison is not posssible. self.assertTrue( self.hash_sharding_specification_in_list( hash_sharding_specification_1, hash_sharding_specifications)) self.assertTrue( self.hash_sharding_specification_in_list( hash_sharding_specification_2, hash_sharding_specifications)) self.assertTrue( self.hash_sharding_specification_in_list( hash_sharding_specification_3, hash_sharding_specifications)) self.assertTrue( self.hash_sharding_specification_in_list( hash_sharding_specification_4, hash_sharding_specifications)) self.assertTrue( self.hash_sharding_specification_in_list( hash_sharding_specification_5, hash_sharding_specifications))
def test_prune_lookup(self): self.proxy.sharding.prune_shard("db1.t1") rows = self.__server_2.exec_stmt("SELECT userID FROM db1.t1", {"fetch": True}) for val in rows[0:len(rows)][0]: hash_sharding_spec_1 = HashShardingSpecification.lookup( val, self.__shard_mapping_id_1, "HASH") self.assertEqual(hash_sharding_spec_1.shard_id, 1) rows = self.__server_3.exec_stmt("SELECT userID FROM db1.t1", {"fetch": True}) for val in rows[0:len(rows)][0]: hash_sharding_spec_2 = HashShardingSpecification.lookup( val, self.__shard_mapping_id_1, "HASH") self.assertEqual(hash_sharding_spec_2.shard_id, 2) rows = self.__server_4.exec_stmt("SELECT userID FROM db1.t1", {"fetch": True}) for val in rows[0:len(rows)][0]: hash_sharding_spec_3 = HashShardingSpecification.lookup( val, self.__shard_mapping_id_1, "HASH") self.assertEqual(hash_sharding_spec_3.shard_id, 3) rows = self.__server_5.exec_stmt("SELECT userID FROM db1.t1", {"fetch": True}) for val in rows[0:len(rows)][0]: hash_sharding_spec_4 = HashShardingSpecification.lookup( val, self.__shard_mapping_id_1, "HASH") self.assertEqual(hash_sharding_spec_4.shard_id, 4) rows = self.__server_6.exec_stmt("SELECT userID FROM db1.t1", {"fetch": True}) for val in rows[0:len(rows)][0]: hash_sharding_spec_5 = HashShardingSpecification.lookup( val, self.__shard_mapping_id_1, "HASH") self.assertEqual(hash_sharding_spec_5.shard_id, 5)
def verify_and_fetch_shard(shard_id): """Find out if the shard_id exists and return the sharding specification for it. If it does not exist throw an exception. :param shard_id: The ID for the shard whose specification needs to be fetched. :return: The sharding specification class representing the shard ID. :raises: ShardingError if the shard ID is not found. """ #Here the underlying sharding specification might be a RANGE #or a HASH. The type of sharding specification is obtained from the #shard mapping. range_sharding_spec = RangeShardingSpecification.fetch(shard_id) if range_sharding_spec is None: raise _errors.ShardingError(SHARD_NOT_FOUND % (shard_id, )) #Fetch the shard mappings and use them to find the type of sharding #scheme. shard_mappings = ShardMapping.fetch_by_id( range_sharding_spec.shard_mapping_id) if shard_mappings is None: raise _errors.ShardingError(SHARD_MAPPING_NOT_FOUND % (range_sharding_spec.shard_mapping_id, )) #Fetch the shard mapping definition. There is only one shard mapping #definition associated with all of the shard mappings. shard_mapping_defn = ShardMapping.fetch_shard_mapping_defn( range_sharding_spec.shard_mapping_id) if shard_mapping_defn is None: raise _errors.ShardingError(SHARD_MAPPING_DEFN_NOT_FOUND % (range_sharding_spec.shard_mapping_id, )) shard = Shards.fetch(shard_id) if shard is None: raise _errors.ShardingError(SHARD_NOT_FOUND % (shard_id, )) #Both of the shard_mappings retrieved will be of the same sharding #type. Hence it is safe to use one of them to retireve the sharding type. if shard_mappings[0].type_name == "HASH": return HashShardingSpecification.fetch(shard_id), \ shard, shard_mappings, shard_mapping_defn else: return range_sharding_spec, shard, shard_mappings, shard_mapping_defn
def test_fetch_sharding_scheme(self): """Test the fetch method of the HASH sharding scheme. """ hash_sharding_specification_1 = HashShardingSpecification.fetch(1) hash_sharding_specification_2 = HashShardingSpecification.fetch(2) hash_sharding_specification_3 = HashShardingSpecification.fetch(3) hash_sharding_specification_4 = HashShardingSpecification.fetch(4) hash_sharding_specification_5 = HashShardingSpecification.fetch(5) hash_sharding_specifications = HashShardingSpecification.list(1) #list does not return the hashing specifications in order of shard_id, #hence a direct comparison is not posssible. self.assertTrue( self.hash_sharding_specification_in_list( hash_sharding_specification_1, hash_sharding_specifications ) ) self.assertTrue( self.hash_sharding_specification_in_list( hash_sharding_specification_2, hash_sharding_specifications ) ) self.assertTrue( self.hash_sharding_specification_in_list( hash_sharding_specification_3, hash_sharding_specifications ) ) self.assertTrue( self.hash_sharding_specification_in_list( hash_sharding_specification_4, hash_sharding_specifications ) ) self.assertTrue( self.hash_sharding_specification_in_list( hash_sharding_specification_5, hash_sharding_specifications ) )
def test_hash_remove(self): """Test the removal of hash shards. """ hash_sharding_specification_1 = HashShardingSpecification.fetch(1) hash_sharding_specification_2 = HashShardingSpecification.fetch(2) hash_sharding_specification_3 = HashShardingSpecification.fetch(3) hash_sharding_specification_4 = HashShardingSpecification.fetch(4) hash_sharding_specification_5 = HashShardingSpecification.fetch(5) hash_sharding_specification_1.remove() hash_sharding_specification_2.remove() hash_sharding_specification_3.remove() hash_sharding_specification_4.remove() hash_sharding_specification_5.remove() self.__shard_1.remove() self.__shard_2.remove() self.__shard_3.remove() self.__shard_4.remove() self.__shard_5.remove() for i in range(0, 10): hash_sharding_spec = HashShardingSpecification.lookup( i, self.__shard_mapping_id_1, "HASH") self.assertEqual(hash_sharding_spec, None)
def _add_shard(shard_mapping_id, groupid_lb_list, state, update_only=False): """Add the RANGE shard specification. This represents a single instance of a shard specification that maps a key RANGE to a server. :param shard_mapping_id: The unique identification for a shard mapping. :param groupid_lb_list: The list of group_id, lower_bounds pairs in the format, group_id/lower_bound, group_id/lower_bound... . :param state: Indicates whether a given shard is ENABLED or DISABLED :param update_only: Only update the state store and skip adding range checks. :return: True if the add succeeded. False otherwise. :raises: ShardingError If the group on which the shard is being created does not exist, If the shard_mapping_id is not found, If adding the shard definition fails, If the state of the shard is an invalid value, If the range definition is invalid. """ shard_mapping = ShardMapping.fetch_shard_mapping_defn(shard_mapping_id) if shard_mapping is None: raise _errors.ShardingError(SHARD_MAPPING_NOT_FOUND % \ (shard_mapping_id, )) schema_type = shard_mapping[1] if len(RangeShardingSpecification.list(shard_mapping_id)) != 0: raise _errors.ShardingError(SHARDS_ALREADY_EXIST) group_id_list, lower_bound_list = \ _utils.get_group_lower_bound_list(groupid_lb_list) if (len(group_id_list) != len(lower_bound_list)) and\ schema_type == "RANGE": raise _errors.ShardingError(LOWER_BOUND_GROUP_ID_COUNT_MISMATCH) if len(lower_bound_list) != 0 and schema_type == "HASH": raise _errors.ShardingError(LOWER_BOUND_AUTO_GENERATED) if schema_type in Shards.VALID_RANGE_SHARDING_TYPES: for lower_bound in lower_bound_list: if not SHARDING_DATATYPE_HANDLER[schema_type].\ is_valid_lower_bound(lower_bound): raise _errors.ShardingError( INVALID_LOWER_BOUND_VALUE % (lower_bound, )) state = state.upper() if state not in Shards.VALID_SHARD_STATES: raise _errors.ShardingError(INVALID_SHARD_STATE % (state, )) for index, group_id in enumerate(group_id_list): shard = Shards.add(group_id, state) shard_id = shard.shard_id if schema_type == "HASH": HashShardingSpecification.add( shard_mapping_id, shard_id ) _LOGGER.debug( "Added Shard (map id = %s, id = %s).", shard_mapping_id, shard_id ) else: range_sharding_specification = \ SHARDING_SPECIFICATION_HANDLER[schema_type].add( shard_mapping_id, lower_bound_list[index], shard_id ) _LOGGER.debug( "Added Shard (map id = %s, lower bound = %s, id = %s).", range_sharding_specification.shard_mapping_id, range_sharding_specification.lower_bound, range_sharding_specification.shard_id ) if not update_only: #If the shard is added in a DISABLED state do not setup replication #with the primary of the global group. Basically setup replication only #if the shard is ENABLED. if state == "ENABLED": _setup_shard_group_replication(shard_id) if not update_only: #Add the shard limits into the metadata present in each of the shards. _events.trigger_within_procedure( ADD_SHARD_RANGE_CHECK, shard_mapping_id, schema_type )
def setUp(self): self.manager, self.proxy = tests.utils.setup_xmlrpc() self.__options_1 = { "uuid" : _uuid.UUID("{aa75b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(0), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd, } uuid_server1 = MySQLServer.discover_uuid(self.__options_1["address"]) self.__options_1["uuid"] = _uuid.UUID(uuid_server1) self.__server_1 = MySQLServer(**self.__options_1) MySQLServer.add(self.__server_1) self.__group_1 = Group("GROUPID1", "First description.") Group.add(self.__group_1) self.__group_1.add_server(self.__server_1) tests.utils.configure_decoupled_master(self.__group_1, self.__server_1) self.__options_2 = { "uuid" : _uuid.UUID("{aa45b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(1), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd, } uuid_server2 = MySQLServer.discover_uuid(self.__options_2["address"]) self.__options_2["uuid"] = _uuid.UUID(uuid_server2) self.__server_2 = MySQLServer(**self.__options_2) MySQLServer.add(self.__server_2) self.__server_2.connect() self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_2.exec_stmt("CREATE DATABASE db1") self.__server_2.exec_stmt("CREATE TABLE db1.t1" "(userID INT, name VARCHAR(30))") for i in range(1, 501): self.__server_2.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__group_2 = Group("GROUPID2", "Second description.") Group.add(self.__group_2) self.__group_2.add_server(self.__server_2) tests.utils.configure_decoupled_master(self.__group_2, self.__server_2) self.__options_3 = { "uuid" : _uuid.UUID("{bb75b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(2), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd, } uuid_server3 = MySQLServer.discover_uuid(self.__options_3["address"]) self.__options_3["uuid"] = _uuid.UUID(uuid_server3) self.__server_3 = MySQLServer(**self.__options_3) MySQLServer.add( self.__server_3) self.__server_3.connect() self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_3.exec_stmt("CREATE DATABASE db1") self.__server_3.exec_stmt("CREATE TABLE db1.t1" "(userID INT, name VARCHAR(30))") for i in range(1, 501): self.__server_3.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__group_3 = Group("GROUPID3", "Third description.") Group.add( self.__group_3) self.__group_3.add_server(self.__server_3) tests.utils.configure_decoupled_master(self.__group_3, self.__server_3) self.__options_4 = { "uuid" : _uuid.UUID("{bb45b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(3), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd, } uuid_server4 = MySQLServer.discover_uuid(self.__options_4["address"]) self.__options_4["uuid"] = _uuid.UUID(uuid_server4) self.__server_4 = MySQLServer(**self.__options_4) MySQLServer.add(self.__server_4) self.__server_4.connect() self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_4.exec_stmt("CREATE DATABASE db1") self.__server_4.exec_stmt("CREATE TABLE db1.t1" "(userID INT, name VARCHAR(30))") for i in range(1, 501): self.__server_4.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__group_4 = Group("GROUPID4", "Fourth description.") Group.add( self.__group_4) self.__group_4.add_server(self.__server_4) tests.utils.configure_decoupled_master(self.__group_4, self.__server_4) self.__options_5 = { "uuid" : _uuid.UUID("{cc75b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(4), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd, } uuid_server5 = MySQLServer.discover_uuid(self.__options_5["address"]) self.__options_5["uuid"] = _uuid.UUID(uuid_server5) self.__server_5 = MySQLServer(**self.__options_5) MySQLServer.add(self.__server_5) self.__server_5.connect() self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_5.exec_stmt("CREATE DATABASE db1") self.__server_5.exec_stmt("CREATE TABLE db1.t1" "(userID INT, name VARCHAR(30))") for i in range(1, 501): self.__server_5.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__group_5 = Group("GROUPID5", "Fifth description.") Group.add( self.__group_5) self.__group_5.add_server(self.__server_5) tests.utils.configure_decoupled_master(self.__group_5, self.__server_5) self.__options_6 = { "uuid" : _uuid.UUID("{cc45b12b-98d1-414c-96af-9e9d4b179678}"), "address" : MySQLInstances().get_address(5), "user" : MySQLInstances().user, "passwd" : MySQLInstances().passwd, } uuid_server6 = MySQLServer.discover_uuid(self.__options_6["address"]) self.__options_6["uuid"] = _uuid.UUID(uuid_server6) self.__server_6 = MySQLServer(**self.__options_6) MySQLServer.add(self.__server_6) self.__server_6.connect() self.__server_6.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_6.exec_stmt("CREATE DATABASE db1") self.__server_6.exec_stmt("CREATE TABLE db1.t1" "(userID INT, name VARCHAR(30))") for i in range(1, 501): self.__server_6.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__group_6 = Group("GROUPID6", "Sixth description.") Group.add( self.__group_6) self.__group_6.add_server(self.__server_6) tests.utils.configure_decoupled_master(self.__group_6, self.__server_6) self.__shard_mapping_list = ShardMapping.list_shard_mapping_defn() self.assertEquals( self.__shard_mapping_list, []) self.__shard_mapping_id_1 = ShardMapping.define("HASH", "GROUPID1") self.__shard_mapping_1 = ShardMapping.add( self.__shard_mapping_id_1, "db1.t1", "userID" ) self.__shard_1 = Shards.add("GROUPID2") self.__shard_2 = Shards.add("GROUPID3") self.__shard_3 = Shards.add("GROUPID4") self.__shard_4 = Shards.add("GROUPID5") self.__shard_5 = Shards.add("GROUPID6") self.__hash_sharding_specification_1 = HashShardingSpecification.add( self.__shard_mapping_1.shard_mapping_id, self.__shard_1.shard_id ) self.__hash_sharding_specification_2 = HashShardingSpecification.add( self.__shard_mapping_1.shard_mapping_id, self.__shard_2.shard_id ) self.__hash_sharding_specification_3 = HashShardingSpecification.add( self.__shard_mapping_1.shard_mapping_id, self.__shard_3.shard_id ) self.__hash_sharding_specification_4 = HashShardingSpecification.add( self.__shard_mapping_1.shard_mapping_id, self.__shard_4.shard_id ) self.__hash_sharding_specification_5 = HashShardingSpecification.add( self.__shard_mapping_1.shard_mapping_id, self.__shard_5.shard_id )
def test_prune_lookup(self): self.proxy.sharding.prune_shard("db1.t1") rows = self.__server_2.exec_stmt( "SELECT userID FROM db1.t1", {"fetch" : True}) for val in rows[0:len(rows)][0]: hash_sharding_spec_1 = HashShardingSpecification.lookup( val, self.__shard_mapping_id_1, "HASH" ) self.assertEqual( hash_sharding_spec_1.shard_id, 1 ) rows = self.__server_3.exec_stmt( "SELECT userID FROM db1.t1", {"fetch" : True}) for val in rows[0:len(rows)][0]: hash_sharding_spec_2 = HashShardingSpecification.lookup( val, self.__shard_mapping_id_1, "HASH" ) self.assertEqual( hash_sharding_spec_2.shard_id, 2 ) rows = self.__server_4.exec_stmt( "SELECT userID FROM db1.t1", {"fetch" : True}) for val in rows[0:len(rows)][0]: hash_sharding_spec_3 = HashShardingSpecification.lookup( val, self.__shard_mapping_id_1, "HASH" ) self.assertEqual( hash_sharding_spec_3.shard_id, 3 ) rows = self.__server_5.exec_stmt( "SELECT userID FROM db1.t1", {"fetch" : True}) for val in rows[0:len(rows)][0]: hash_sharding_spec_4 = HashShardingSpecification.lookup( val, self.__shard_mapping_id_1, "HASH" ) self.assertEqual( hash_sharding_spec_4.shard_id, 4 ) rows = self.__server_6.exec_stmt( "SELECT userID FROM db1.t1", {"fetch" : True}) for val in rows[0:len(rows)][0]: hash_sharding_spec_5 = HashShardingSpecification.lookup( val, self.__shard_mapping_id_1, "HASH" ) self.assertEqual( hash_sharding_spec_5.shard_id, 5 )
def _check_shard_information(shard_id, destn_group_id, mysqldump_binary, mysqlclient_binary, split_value, config_file, prune_limit, cmd, update_only): """Verify the sharding information before starting a re-sharding operation. :param shard_id: The destination shard ID. :param destn_group_id: The Destination group ID. :param mysqldump_binary: The path to the mysqldump binary. :param mysqlclient_binary: The path to the mysqlclient binary. :param split_value: The point at which the sharding definition should be split. :param config_file: The complete path to the fabric configuration file. :param prune_limit: The number of DELETEs that should be done in one batch. :param cmd: Indicates if it is a split or a move being executed. :param update_only: If the operation is a update only operation. """ if not _services_utils.is_valid_binary(mysqldump_binary): raise _errors.ShardingError( _services_sharding.MYSQLDUMP_NOT_FOUND % mysqldump_binary) if not _services_utils.is_valid_binary(mysqlclient_binary): raise _errors.ShardingError( _services_sharding.MYSQLCLIENT_NOT_FOUND % mysqlclient_binary) if cmd == "SPLIT": range_sharding_spec, _, shard_mappings, _ = \ _services_sharding.verify_and_fetch_shard(shard_id) upper_bound = \ SHARDING_SPECIFICATION_HANDLER[shard_mappings[0].type_name].\ get_upper_bound( range_sharding_spec.lower_bound, range_sharding_spec.shard_mapping_id, shard_mappings[0].type_name ) #If the underlying sharding scheme is a HASH. When a shard is split, #all the tables that are part of the shard, have the same sharding #scheme. All the shard mappings associated with this shard_id will be #of the same sharding type. Hence it is safe to use one of the shard #mappings. if shard_mappings[0].type_name == "HASH": if split_value is not None: raise _errors.ShardingError( _services_sharding.NO_LOWER_BOUND_FOR_HASH_SHARDING ) if upper_bound is None: #While splitting a range, retrieve the next upper bound and #find the mid-point, in the case where the next upper_bound #is unavailable pick the maximum value in the set of values in #the shard. upper_bound = HashShardingSpecification.fetch_max_key(shard_id) #Calculate the split value. split_value = \ SHARDING_DATATYPE_HANDLER[shard_mappings[0].type_name].\ split_value( range_sharding_spec.lower_bound, upper_bound ) elif split_value is not None: if not (SHARDING_DATATYPE_HANDLER[shard_mappings[0].type_name].\ is_valid_split_value( split_value, range_sharding_spec.lower_bound, upper_bound ) ): raise _errors.ShardingError( _services_sharding.INVALID_LOWER_BOUND_VALUE % (split_value, ) ) elif split_value is None: raise _errors.ShardingError( _services_sharding.SPLIT_VALUE_NOT_DEFINED ) #Ensure that the group does not already contain a shard. if Shards.lookup_shard_id(destn_group_id) is not None: raise _errors.ShardingError( _services_sharding.SHARD_MOVE_DESTINATION_NOT_EMPTY % (destn_group_id, ) ) #Fetch the group information for the source shard that #needs to be moved. source_shard = Shards.fetch(shard_id) if source_shard is None: raise _errors.ShardingError( _services_sharding.SHARD_NOT_FOUND % (shard_id, )) #Fetch the group_id and the group that hosts the source shard. source_group_id = source_shard.group_id destn_group = Group.fetch(destn_group_id) if destn_group is None: raise _errors.ShardingError( _services_sharding.SHARD_GROUP_NOT_FOUND % (destn_group_id, )) if not update_only: _events.trigger_within_procedure( BACKUP_SOURCE_SHARD, shard_id, source_group_id, destn_group_id, mysqldump_binary, mysqlclient_binary, split_value, config_file, prune_limit, cmd, update_only ) else: _events.trigger_within_procedure( SETUP_RESHARDING_SWITCH, shard_id, source_group_id, destn_group_id, split_value, prune_limit, cmd, update_only )
def _add_shard(shard_mapping_id, groupid_lb_list, state): """Add the RANGE shard specification. This represents a single instance of a shard specification that maps a key RANGE to a server. :param shard_mapping_id: The unique identification for a shard mapping. :param groupid_lb_list: The list of group_id, lower_bounds pairs in the format, group_id/lower_bound, group_id/lower_bound... . :param state: Indicates whether a given shard is ENABLED or DISABLED :return: True if the add succeeded. False otherwise. :raises: ShardingError If the group on which the shard is being created does not exist, If the shard_mapping_id is not found, If adding the shard definition fails, If the state of the shard is an invalid value, If the range definition is invalid. """ shard_mapping = ShardMapping.fetch_shard_mapping_defn(shard_mapping_id) if shard_mapping is None: raise _errors.ShardingError(SHARD_MAPPING_NOT_FOUND % \ (shard_mapping_id, )) schema_type = shard_mapping[1] if len(RangeShardingSpecification.list(shard_mapping_id)) != 0: raise _errors.ShardingError(SHARDS_ALREADY_EXIST) group_id_list, lower_bound_list = \ _utils.get_group_lower_bound_list(groupid_lb_list) if (len(group_id_list) != len(lower_bound_list)) and\ schema_type == "RANGE": raise _errors.ShardingError(LOWER_BOUND_GROUP_ID_COUNT_MISMATCH) if len(lower_bound_list) != 0 and schema_type == "HASH": raise _errors.ShardingError(LOWER_BOUND_AUTO_GENERATED) if schema_type in Shards.VALID_RANGE_SHARDING_TYPES: for lower_bound in lower_bound_list: if not SHARDING_DATATYPE_HANDLER[schema_type].\ is_valid_lower_bound(lower_bound): raise _errors.ShardingError(INVALID_LOWER_BOUND_VALUE % (lower_bound, )) state = state.upper() if state not in Shards.VALID_SHARD_STATES: raise _errors.ShardingError(INVALID_SHARD_STATE % (state, )) for index, group_id in enumerate(group_id_list): shard = Shards.add(group_id, state) shard_id = shard.shard_id if schema_type == "HASH": HashShardingSpecification.add(shard_mapping_id, shard_id) _LOGGER.debug("Added Shard (map id = %s, id = %s).", shard_mapping_id, shard_id) else: range_sharding_specification = \ SHARDING_SPECIFICATION_HANDLER[schema_type].add( shard_mapping_id, lower_bound_list[index], shard_id ) _LOGGER.debug( "Added Shard (map id = %s, lower bound = %s, id = %s).", range_sharding_specification.shard_mapping_id, range_sharding_specification.lower_bound, range_sharding_specification.shard_id) #If the shard is added in a DISABLED state do not setup replication #with the primary of the global group. Basically setup replication only #if the shard is ENABLED. if state == "ENABLED": _setup_shard_group_replication(shard_id)
def test_shard_split(self): split_cnt_1 = 0 split_cnt_2 = 0 shard_server_1 = None shard_server_2 = None expected_address_list_1 = \ [MySQLInstances().get_address(2), MySQLInstances().get_address(3)] expected_address_list_2 = \ [MySQLInstances().get_address(4), MySQLInstances().get_address(5)] status = self.proxy.sharding.split_shard("1", "GROUPID3") self.check_xmlrpc_command_result(status) for i in range(1, 100): status = self.proxy.sharding.lookup_servers("db1.t1", i, "LOCAL") obtained_uuid_list = [ info['server_uuid'] for info in self.check_xmlrpc_iter(status) ] obtained_address_list = [ info['address'] for info in self.check_xmlrpc_iter(status) ] try: self.assertEqual(set(expected_address_list_1), set(obtained_address_list)) split_cnt_1 = split_cnt_1 + 1 if shard_server_1 is None: shard_server_1 = fetch_test_server(obtained_uuid_list[0]) except AssertionError: self.assertEqual(set(expected_address_list_2), set(obtained_address_list)) split_cnt_2 = split_cnt_2 + 1 if shard_server_2 is None: shard_server_2 = fetch_test_server(obtained_uuid_list[0]) #Ensure that both the splits have been utilized. self.assertTrue(split_cnt_1 > 0) self.assertTrue(split_cnt_2 > 0) shard_server_1.connect() shard_server_2.connect() row_cnt_shard_1 = shard_server_1.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch": True}) row_cnt_shard_2 = shard_server_2.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch": True}) #Ensure that the split has happened, the number of values in #each shard should be less than the original. self.assertTrue(int(row_cnt_shard_1[0][0]) < 100) self.assertTrue(int(row_cnt_shard_2[0][0]) < 100) #Ensure tha two new shard_ids have been generated. hash_sharding_specifications = HashShardingSpecification.list(1) self.assertTrue( ShardingUtils.compare_hash_specifications( hash_sharding_specifications[1], HashShardingSpecification.fetch(2))) self.assertTrue( ShardingUtils.compare_hash_specifications( hash_sharding_specifications[0], HashShardingSpecification.fetch(3)))
def setUp(self): self.manager, self.proxy = tests.utils.setup_xmlrpc() self.__options_1 = { "uuid": _uuid.UUID("{aa75b12b-98d1-414c-96af-9e9d4b179678}"), "address": MySQLInstances().get_address(0), "user": MySQLInstances().user, "passwd": MySQLInstances().passwd, } uuid_server1 = MySQLServer.discover_uuid(self.__options_1["address"]) self.__options_1["uuid"] = _uuid.UUID(uuid_server1) self.__server_1 = MySQLServer(**self.__options_1) MySQLServer.add(self.__server_1) self.__group_1 = Group("GROUPID1", "First description.") Group.add(self.__group_1) self.__group_1.add_server(self.__server_1) tests.utils.configure_decoupled_master(self.__group_1, self.__server_1) self.__options_2 = { "uuid": _uuid.UUID("{aa45b12b-98d1-414c-96af-9e9d4b179678}"), "address": MySQLInstances().get_address(1), "user": MySQLInstances().user, "passwd": MySQLInstances().passwd, } uuid_server2 = MySQLServer.discover_uuid(self.__options_2["address"]) self.__options_2["uuid"] = _uuid.UUID(uuid_server2) self.__server_2 = MySQLServer(**self.__options_2) MySQLServer.add(self.__server_2) self.__server_2.connect() self.__server_2.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_2.exec_stmt("CREATE DATABASE db1") self.__server_2.exec_stmt("CREATE TABLE db1.t1" "(userID INT, name VARCHAR(30))") for i in range(1, 501): self.__server_2.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__group_2 = Group("GROUPID2", "Second description.") Group.add(self.__group_2) self.__group_2.add_server(self.__server_2) tests.utils.configure_decoupled_master(self.__group_2, self.__server_2) self.__options_3 = { "uuid": _uuid.UUID("{bb75b12b-98d1-414c-96af-9e9d4b179678}"), "address": MySQLInstances().get_address(2), "user": MySQLInstances().user, "passwd": MySQLInstances().passwd, } uuid_server3 = MySQLServer.discover_uuid(self.__options_3["address"]) self.__options_3["uuid"] = _uuid.UUID(uuid_server3) self.__server_3 = MySQLServer(**self.__options_3) MySQLServer.add(self.__server_3) self.__server_3.connect() self.__server_3.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_3.exec_stmt("CREATE DATABASE db1") self.__server_3.exec_stmt("CREATE TABLE db1.t1" "(userID INT, name VARCHAR(30))") for i in range(1, 501): self.__server_3.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__group_3 = Group("GROUPID3", "Third description.") Group.add(self.__group_3) self.__group_3.add_server(self.__server_3) tests.utils.configure_decoupled_master(self.__group_3, self.__server_3) self.__options_4 = { "uuid": _uuid.UUID("{bb45b12b-98d1-414c-96af-9e9d4b179678}"), "address": MySQLInstances().get_address(3), "user": MySQLInstances().user, "passwd": MySQLInstances().passwd, } uuid_server4 = MySQLServer.discover_uuid(self.__options_4["address"]) self.__options_4["uuid"] = _uuid.UUID(uuid_server4) self.__server_4 = MySQLServer(**self.__options_4) MySQLServer.add(self.__server_4) self.__server_4.connect() self.__server_4.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_4.exec_stmt("CREATE DATABASE db1") self.__server_4.exec_stmt("CREATE TABLE db1.t1" "(userID INT, name VARCHAR(30))") for i in range(1, 501): self.__server_4.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__group_4 = Group("GROUPID4", "Fourth description.") Group.add(self.__group_4) self.__group_4.add_server(self.__server_4) tests.utils.configure_decoupled_master(self.__group_4, self.__server_4) self.__options_5 = { "uuid": _uuid.UUID("{cc75b12b-98d1-414c-96af-9e9d4b179678}"), "address": MySQLInstances().get_address(4), "user": MySQLInstances().user, "passwd": MySQLInstances().passwd, } uuid_server5 = MySQLServer.discover_uuid(self.__options_5["address"]) self.__options_5["uuid"] = _uuid.UUID(uuid_server5) self.__server_5 = MySQLServer(**self.__options_5) MySQLServer.add(self.__server_5) self.__server_5.connect() self.__server_5.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_5.exec_stmt("CREATE DATABASE db1") self.__server_5.exec_stmt("CREATE TABLE db1.t1" "(userID INT, name VARCHAR(30))") for i in range(1, 501): self.__server_5.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__group_5 = Group("GROUPID5", "Fifth description.") Group.add(self.__group_5) self.__group_5.add_server(self.__server_5) tests.utils.configure_decoupled_master(self.__group_5, self.__server_5) self.__options_6 = { "uuid": _uuid.UUID("{cc45b12b-98d1-414c-96af-9e9d4b179678}"), "address": MySQLInstances().get_address(5), "user": MySQLInstances().user, "passwd": MySQLInstances().passwd, } uuid_server6 = MySQLServer.discover_uuid(self.__options_6["address"]) self.__options_6["uuid"] = _uuid.UUID(uuid_server6) self.__server_6 = MySQLServer(**self.__options_6) MySQLServer.add(self.__server_6) self.__server_6.connect() self.__server_6.exec_stmt("DROP DATABASE IF EXISTS db1") self.__server_6.exec_stmt("CREATE DATABASE db1") self.__server_6.exec_stmt("CREATE TABLE db1.t1" "(userID INT, name VARCHAR(30))") for i in range(1, 501): self.__server_6.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i)) self.__group_6 = Group("GROUPID6", "Sixth description.") Group.add(self.__group_6) self.__group_6.add_server(self.__server_6) tests.utils.configure_decoupled_master(self.__group_6, self.__server_6) self.__shard_mapping_list = ShardMapping.list_shard_mapping_defn() self.assertEquals(self.__shard_mapping_list, []) self.__shard_mapping_id_1 = ShardMapping.define("HASH", "GROUPID1") self.__shard_mapping_1 = ShardMapping.add(self.__shard_mapping_id_1, "db1.t1", "userID") self.__shard_1 = Shards.add("GROUPID2") self.__shard_2 = Shards.add("GROUPID3") self.__shard_3 = Shards.add("GROUPID4") self.__shard_4 = Shards.add("GROUPID5") self.__shard_5 = Shards.add("GROUPID6") self.__hash_sharding_specification_1 = HashShardingSpecification.add( self.__shard_mapping_1.shard_mapping_id, self.__shard_1.shard_id) self.__hash_sharding_specification_2 = HashShardingSpecification.add( self.__shard_mapping_1.shard_mapping_id, self.__shard_2.shard_id) self.__hash_sharding_specification_3 = HashShardingSpecification.add( self.__shard_mapping_1.shard_mapping_id, self.__shard_3.shard_id) self.__hash_sharding_specification_4 = HashShardingSpecification.add( self.__shard_mapping_1.shard_mapping_id, self.__shard_4.shard_id) self.__hash_sharding_specification_5 = HashShardingSpecification.add( self.__shard_mapping_1.shard_mapping_id, self.__shard_5.shard_id)
def test_shard_split(self): split_cnt_1 = 0 split_cnt_2 = 0 shard_server_1 = None shard_server_2 = None expected_address_list_1 = \ [MySQLInstances().get_address(2), MySQLInstances().get_address(3)] expected_address_list_2 = \ [MySQLInstances().get_address(4), MySQLInstances().get_address(5)] status = self.proxy.sharding.split_shard("1", "GROUPID3") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_prune_shard_tables_after_split).") for i in range(1, 100): status = self.proxy.sharding.lookup_servers("db1.t1", i, "LOCAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] obtained_uuid_list = [obtained_server_list[0][0], obtained_server_list[1][0]] obtained_address_list = [obtained_server_list[0][1], obtained_server_list[1][1]] try: self.assertEqual( set(expected_address_list_1), set(obtained_address_list) ) split_cnt_1 = split_cnt_1 + 1 if shard_server_1 is None: shard_server_1 = MySQLServer.fetch(obtained_uuid_list[0]) except AssertionError: self.assertEqual( set(expected_address_list_2), set(obtained_address_list) ) split_cnt_2 = split_cnt_2 + 1 if shard_server_2 is None: shard_server_2 = MySQLServer.fetch(obtained_uuid_list[0]) #Ensure that both the splits have been utilized. self.assertTrue(split_cnt_1 > 0) self.assertTrue(split_cnt_2 > 0) shard_server_1.connect() shard_server_2.connect() row_cnt_shard_1 = shard_server_1.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch" : True} ) row_cnt_shard_2 = shard_server_2.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch" : True} ) #Ensure that the split has happened, the number of values in #each shard should be less than the original. self.assertTrue(int(row_cnt_shard_1[0][0]) < 100) self.assertTrue(int(row_cnt_shard_2[0][0]) < 100) #Ensure tha two new shard_ids have been generated. hash_sharding_specifications = HashShardingSpecification.list(1) self.assertTrue(ShardingUtils.compare_hash_specifications( hash_sharding_specifications[1], HashShardingSpecification.fetch(2))) self.assertTrue(ShardingUtils.compare_hash_specifications( hash_sharding_specifications[0], HashShardingSpecification.fetch(3)))
def _check_shard_information(shard_id, destn_group_id, mysqldump_binary, mysqlclient_binary, split_value, config_file, prune_limit, cmd, update_only): """Verify the sharding information before starting a re-sharding operation. :param shard_id: The destination shard ID. :param destn_group_id: The Destination group ID. :param mysqldump_binary: The path to the mysqldump binary. :param mysqlclient_binary: The path to the mysqlclient binary. :param split_value: The point at which the sharding definition should be split. :param config_file: The complete path to the fabric configuration file. :param prune_limit: The number of DELETEs that should be done in one batch. :param cmd: Indicates if it is a split or a move being executed. :param update_only: If the operation is a update only operation. """ if not _services_utils.is_valid_binary(mysqldump_binary): raise _errors.ShardingError(_services_sharding.MYSQLDUMP_NOT_FOUND % mysqldump_binary) if not _services_utils.is_valid_binary(mysqlclient_binary): raise _errors.ShardingError(_services_sharding.MYSQLCLIENT_NOT_FOUND % mysqlclient_binary) if cmd == "SPLIT": range_sharding_spec, _, shard_mappings, _ = \ _services_sharding.verify_and_fetch_shard(shard_id) upper_bound = \ SHARDING_SPECIFICATION_HANDLER[shard_mappings[0].type_name].\ get_upper_bound( range_sharding_spec.lower_bound, range_sharding_spec.shard_mapping_id, shard_mappings[0].type_name ) #If the underlying sharding scheme is a HASH. When a shard is split, #all the tables that are part of the shard, have the same sharding #scheme. All the shard mappings associated with this shard_id will be #of the same sharding type. Hence it is safe to use one of the shard #mappings. if shard_mappings[0].type_name == "HASH": if split_value is not None: raise _errors.ShardingError( _services_sharding.NO_LOWER_BOUND_FOR_HASH_SHARDING) if upper_bound is None: #While splitting a range, retrieve the next upper bound and #find the mid-point, in the case where the next upper_bound #is unavailable pick the maximum value in the set of values in #the shard. upper_bound = HashShardingSpecification.fetch_max_key(shard_id) #Calculate the split value. split_value = \ SHARDING_DATATYPE_HANDLER[shard_mappings[0].type_name].\ split_value( range_sharding_spec.lower_bound, upper_bound ) elif split_value is not None: if not (SHARDING_DATATYPE_HANDLER[shard_mappings[0].type_name].\ is_valid_split_value( split_value, range_sharding_spec.lower_bound, upper_bound ) ): raise _errors.ShardingError( _services_sharding.INVALID_LOWER_BOUND_VALUE % (split_value, )) elif split_value is None: raise _errors.ShardingError( _services_sharding.SPLIT_VALUE_NOT_DEFINED) #Ensure that the group does not already contain a shard. if Shards.lookup_shard_id(destn_group_id) is not None: raise _errors.ShardingError( _services_sharding.SHARD_MOVE_DESTINATION_NOT_EMPTY % (destn_group_id, )) #Fetch the group information for the source shard that #needs to be moved. source_shard = Shards.fetch(shard_id) if source_shard is None: raise _errors.ShardingError(_services_sharding.SHARD_NOT_FOUND % (shard_id, )) #Fetch the group_id and the group that hosts the source shard. source_group_id = source_shard.group_id destn_group = Group.fetch(destn_group_id) if destn_group is None: raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND % (destn_group_id, )) if not update_only: _events.trigger_within_procedure(BACKUP_SOURCE_SHARD, shard_id, source_group_id, destn_group_id, mysqldump_binary, mysqlclient_binary, split_value, config_file, prune_limit, cmd, update_only) else: _events.trigger_within_procedure(SETUP_RESHARDING_SWITCH, shard_id, source_group_id, destn_group_id, split_value, prune_limit, cmd, update_only)
def _setup_shard_switch_split(shard_id, source_group_id, destination_group_id, split_value, prune_limit, cmd, update_only): """Setup the moved shard to map to the new group. :param shard_id: The shard ID of the shard that needs to be moved. :param source_group_id: The group_id of the source shard. :param destn_group_id: The ID of the group to which the shard needs to be moved. :param split_value: Indicates the value at which the range for the particular shard will be split. Will be set only for shard split operations. :param prune_limit: The number of DELETEs that should be done in one batch. :param cmd: Indicates the type of re-sharding operation. :update_only: Only update the state store and skip provisioning. """ #Fetch the Range sharding specification. range_sharding_spec, source_shard, shard_mappings, shard_mapping_defn = \ _services_sharding.verify_and_fetch_shard(shard_id) #Disable the old shard source_shard.disable() #Remove the old shard. range_sharding_spec.remove() source_shard.remove() destination_group = Group.fetch(destination_group_id) if destination_group is None: raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND % (destination_group_id, )) destn_group_master = MySQLServer.fetch(destination_group.master) if destn_group_master is None: raise _errors.ShardingError( _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND) destn_group_master.connect() #Make the destination group as read only to disable updates until the #connectors update their caches, thus avoiding inconsistency. destn_group_master.read_only = True #Add the new shards. Generate new shard IDs for the shard being #split and also for the shard that is created as a result of the split. new_shard_1 = Shards.add(source_shard.group_id, "DISABLED") new_shard_2 = Shards.add(destination_group_id, "DISABLED") #Both of the shard mappings associated with this shard_id should #be of the same sharding type. Hence it is safe to use one of the #shard mappings. if shard_mappings[0].type_name == "HASH": #In the case of a split involving a HASH sharding scheme, #the shard that is split gets a new shard_id, while the split #gets the new computed lower_bound and also a new shard id. #NOTE: How the shard that is split retains its lower_bound. HashShardingSpecification.add_hash_split( range_sharding_spec.shard_mapping_id, new_shard_1.shard_id, range_sharding_spec.lower_bound ) HashShardingSpecification.add_hash_split( range_sharding_spec.shard_mapping_id, new_shard_2.shard_id, split_value ) else: #Add the new ranges. Note that the shard being split retains #its lower_bound, while the new shard gets the computed, #lower_bound. RangeShardingSpecification.add( range_sharding_spec.shard_mapping_id, range_sharding_spec.lower_bound, new_shard_1.shard_id ) RangeShardingSpecification.add( range_sharding_spec.shard_mapping_id, split_value, new_shard_2.shard_id ) #The sleep ensures that the connector have refreshed their caches with the #new shards that have been added as a result of the split. time.sleep(_utils.TTL) #The source shard group master would have been marked as read only #during the sync. Remove the read_only flag. source_group = Group.fetch(source_group_id) if source_group is None: raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND % (source_group_id, )) source_group_master = MySQLServer.fetch(source_group.master) if source_group_master is None: raise _errors.ShardingError( _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND) source_group_master.connect() #Kill all the existing connections on the servers source_group.kill_connections_on_servers() #Allow connections on the source group master source_group_master.read_only = False #Allow connections on the destination group master destn_group_master.read_only = False #Setup replication for the new group from the global server _group_replication.setup_group_replication \ (shard_mapping_defn[2], destination_group_id) #Enable the split shards new_shard_1.enable() new_shard_2.enable() #Trigger changing the mappings for the shard that was copied if not update_only: _events.trigger_within_procedure( PRUNE_SHARDS, new_shard_1.shard_id, new_shard_2.shard_id, prune_limit )
def _setup_shard_switch_split(shard_id, source_group_id, destination_group_id, split_value, prune_limit, cmd, update_only): """Setup the moved shard to map to the new group. :param shard_id: The shard ID of the shard that needs to be moved. :param source_group_id: The group_id of the source shard. :param destn_group_id: The ID of the group to which the shard needs to be moved. :param split_value: Indicates the value at which the range for the particular shard will be split. Will be set only for shard split operations. :param prune_limit: The number of DELETEs that should be done in one batch. :param cmd: Indicates the type of re-sharding operation. :update_only: Only update the state store and skip provisioning. """ #Fetch the Range sharding specification. range_sharding_spec, source_shard, shard_mappings, shard_mapping_defn = \ _services_sharding.verify_and_fetch_shard(shard_id) #Disable the old shard source_shard.disable() #Remove the old shard. range_sharding_spec.remove() source_shard.remove() destination_group = Group.fetch(destination_group_id) if destination_group is None: raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND % (destination_group_id, )) destn_group_master = MySQLServer.fetch(destination_group.master) if destn_group_master is None: raise _errors.ShardingError( _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND) destn_group_master.connect() #Make the destination group as read only to disable updates until the #connectors update their caches, thus avoiding inconsistency. destn_group_master.read_only = True #Add the new shards. Generate new shard IDs for the shard being #split and also for the shard that is created as a result of the split. new_shard_1 = Shards.add(source_shard.group_id, "DISABLED") new_shard_2 = Shards.add(destination_group_id, "DISABLED") #Both of the shard mappings associated with this shard_id should #be of the same sharding type. Hence it is safe to use one of the #shard mappings. if shard_mappings[0].type_name == "HASH": #In the case of a split involving a HASH sharding scheme, #the shard that is split gets a new shard_id, while the split #gets the new computed lower_bound and also a new shard id. #NOTE: How the shard that is split retains its lower_bound. HashShardingSpecification.add_hash_split( range_sharding_spec.shard_mapping_id, new_shard_1.shard_id, range_sharding_spec.lower_bound) HashShardingSpecification.add_hash_split( range_sharding_spec.shard_mapping_id, new_shard_2.shard_id, split_value) else: #Add the new ranges. Note that the shard being split retains #its lower_bound, while the new shard gets the computed, #lower_bound. RangeShardingSpecification.add(range_sharding_spec.shard_mapping_id, range_sharding_spec.lower_bound, new_shard_1.shard_id) RangeShardingSpecification.add(range_sharding_spec.shard_mapping_id, split_value, new_shard_2.shard_id) #The sleep ensures that the connector have refreshed their caches with the #new shards that have been added as a result of the split. time.sleep(_utils.TTL) #The source shard group master would have been marked as read only #during the sync. Remove the read_only flag. source_group = Group.fetch(source_group_id) if source_group is None: raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND % (source_group_id, )) source_group_master = MySQLServer.fetch(source_group.master) if source_group_master is None: raise _errors.ShardingError( _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND) source_group_master.connect() #Kill all the existing connections on the servers source_group.kill_connections_on_servers() #Allow connections on the source group master source_group_master.read_only = False #Allow connections on the destination group master destn_group_master.read_only = False #Setup replication for the new group from the global server _group_replication.setup_group_replication \ (shard_mapping_defn[2], destination_group_id) #Enable the split shards new_shard_1.enable() new_shard_2.enable() #Trigger changing the mappings for the shard that was copied if not update_only: _events.trigger_within_procedure(PRUNE_SHARDS, new_shard_1.shard_id, new_shard_2.shard_id, prune_limit)