def _setup_replication(shard_id, source_group_id, destn_group_id, split_value, prune_limit, cmd): """Setup replication between the source and the destination groups and ensure that they are in sync. :param shard_id: The shard ID of the shard that needs to be moved. :param source_group_id: The group_id of the source shard. :param destn_group_id: The ID of the group to which the shard needs to be moved. :param split_value: Indicates the value at which the range for the particular shard will be split. Will be set only for shard split operations. :param prune_limit: The number of DELETEs that should be done in one batch. :param cmd: Indicates the type of re-sharding operation """ source_group = Group.fetch(source_group_id) if source_group is None: raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND % (source_group_id, )) destination_group = Group.fetch(destn_group_id) if destination_group is None: raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND % (destn_group_id, )) master = MySQLServer.fetch(source_group.master) if master is None: raise _errors.ShardingError( _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND) master.connect() slave = MySQLServer.fetch(destination_group.master) if slave is None: raise _errors.ShardingError( _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND) slave.connect() #Stop and reset any slave that might be running on the slave server. _utils.set_offline_mode(slave, True) ### TODO: if forced offline_mode _replication.stop_slave(slave, wait=True) _replication.reset_slave(slave, clean=True) #Change the master to the shard group master. _replication.switch_master(slave, master, master.repl_user, master.repl_pass) #Start the slave so that syncing of the data begins _replication.start_slave(slave, wait=True) _utils.set_offline_mode(slave, False) ### TODO: if forced offline_mode #Setup sync between the source and the destination groups. _events.trigger_within_procedure( SETUP_SYNC, shard_id, source_group_id, destn_group_id, split_value, prune_limit, cmd )
def _setup_sync(shard_id, source_group_id, destn_group_id, split_value, prune_limit, cmd): """sync the source and the destination groups. :param shard_id: The shard ID of the shard that needs to be moved. :param source_group_id: The group_id of the source shard. :param destn_group_id: The ID of the group to which the shard needs to be moved. :param split_value: Indicates the value at which the range for the particular shard will be split. Will be set only for shard split operations. :param prune_limit: The number of DELETEs that should be done in one batch. :param cmd: Indicates the type of re-sharding operation """ source_group = Group.fetch(source_group_id) if source_group is None: raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND % (source_group_id, )) destination_group = Group.fetch(destn_group_id) if destination_group is None: raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND % (destn_group_id, )) master = MySQLServer.fetch(source_group.master) if master is None: raise _errors.ShardingError( _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND) master.connect() slave = MySQLServer.fetch(destination_group.master) if slave is None: raise _errors.ShardingError( _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND) slave.connect() #Synchronize until the slave catches up with the master. _replication.synchronize_with_read_only(slave, master) #Reset replication once the syncing is done. _replication.stop_slave(slave, wait=True) _replication.reset_slave(slave, clean=True) #Trigger changing the mappings for the shard that was copied _events.trigger_within_procedure( SETUP_RESHARDING_SWITCH, shard_id, source_group_id, destn_group_id, split_value, prune_limit, cmd )
def _setup_sync(shard_id, source_group_id, destn_group_id, split_value, prune_limit, cmd): """sync the source and the destination groups. :param shard_id: The shard ID of the shard that needs to be moved. :param source_group_id: The group_id of the source shard. :param destn_group_id: The ID of the group to which the shard needs to be moved. :param split_value: Indicates the value at which the range for the particular shard will be split. Will be set only for shard split operations. :param prune_limit: The number of DELETEs that should be done in one batch. :param cmd: Indicates the type of re-sharding operation """ source_group = Group.fetch(source_group_id) if source_group is None: raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND % (source_group_id, )) destination_group = Group.fetch(destn_group_id) if destination_group is None: raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND % (destn_group_id, )) master = MySQLServer.fetch(source_group.master) if master is None: raise _errors.ShardingError( _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND) master.connect() slave = MySQLServer.fetch(destination_group.master) if slave is None: raise _errors.ShardingError( _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND) slave.connect() #Synchronize until the slave catches up with the master. _replication.synchronize_with_read_only(slave, master) #Reset replication once the syncing is done. _replication.stop_slave(slave, wait=True) _replication.reset_slave(slave, clean=True) #Trigger changing the mappings for the shard that was copied _events.trigger_within_procedure( SETUP_RESHARDING_SWITCH, shard_id, source_group_id, destn_group_id, split_value, prune_limit, cmd )
def test_shard_split_fail_GTID_EXECUTED(self): self.split_fail = True status = self.proxy.group.lookup_servers("GROUPID3") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] for obtained_server in obtained_server_list: if obtained_server["status"] == "PRIMARY": shard_uuid = obtained_server["server_uuid"] shard_server = MySQLServer.fetch(shard_uuid) shard_server.connect() break shard_server.exec_stmt("DROP DATABASE IF EXISTS Extra") shard_server.exec_stmt("CREATE DATABASE Extra") shard_server.exec_stmt("CREATE TABLE Extra.Extra_Table" "(userID INT, name VARCHAR(30))") shard_server.exec_stmt("INSERT INTO Extra.Extra_Table " "VALUES(101, 'TEST 1')") shard_server.exec_stmt("INSERT INTO Extra.Extra_Table " "VALUES(102, 'TEST 2')") shard_server.exec_stmt("INSERT INTO Extra.Extra_Table " "VALUES(103, 'TEST 3')") shard_server.exec_stmt("INSERT INTO Extra.Extra_Table " "VALUES(701, 'TEST 4')") status = self.proxy.sharding.split_shard("1", "GROUPID3", "600") self.assertStatus(status, _executor.Job.ERROR) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Tried to execute action (_restore_shard_backup).")
def test_update_only(self): """Test the shard split but without provisioning. """ # Get group information before the shard_move operation status = self.proxy.sharding.lookup_servers("db1.t1", 500, "LOCAL") local_list_before = status[2] status = self.proxy.sharding.lookup_servers("1", 500, "GLOBAL") global_list_before = status[2] # Do the shard split and compare group information. status = self.proxy.sharding.split_shard("1", "GROUPID3", "600", True) self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_setup_resharding_switch).") status = self.proxy.sharding.lookup_servers("db1.t1", 601, "LOCAL") local_list_after = status[2] self.assertNotEqual(local_list_before, local_list_after) status = self.proxy.sharding.lookup_servers("1", 601, "GLOBAL") global_list_after = status[2] self.assertEqual(global_list_before, global_list_after) # The group has changed but no data was transfered. shard_server = MySQLServer.fetch(local_list_after[0][0]) shard_server.connect() self.assertRaises( DatabaseError, shard_server.exec_stmt, "SELECT NAME FROM db1.t1", {"fetch" : True} )
def drop_shard_range_trigger(group_id, sharding_type, table_name, column_name): """Drop a trigger on the shard table. :param group_id: The ID of the group on which the trigger definition is applied. The trigger is created on the master of this group. :param sharding_type: The datatype supported by the shards. Used to name the trigger. :param table_name: The name of the table. This is used to name the trigger being created. :param column_name: The name of the column in the table being sharded. This is used to create the name of the trigger. """ global_group = Group.fetch(group_id) master_server = MySQLServer.fetch(global_group.master) master_server.connect() db, table = table_name.split(".") #Drop the INSERT trigger on the sharded table. trigger_name = db + "." + _TRIGGER_PREFIX_INSERT+table drop_insert_trigger = _DROP_TRIGGER_DEFN.format( trigger_name=trigger_name ) master_server.exec_stmt(drop_insert_trigger) #Drop the UPDATE trigger on the sharded table. trigger_name = db + "." + _TRIGGER_PREFIX_UPDATE + table drop_update_trigger = _DROP_TRIGGER_DEFN.format( trigger_name=trigger_name ) master_server.exec_stmt(drop_update_trigger)
def test_shard_prune(self): status = self.proxy.sharding.prune_shard("db2.t2") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_prune_shard_tables).") status = self.proxy.sharding.lookup_servers("db2.t2", 1, "LOCAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] shard_uuid = obtained_server_list[0][0] shard_server = MySQLServer.fetch(shard_uuid) shard_server.connect() rows = shard_server.exec_stmt( "SELECT COUNT(*) FROM db2.t2", {"fetch" : True}) self.assertTrue(int(rows[0][0]) == 100) rows = shard_server.exec_stmt( "SELECT MAX(userID2) FROM db2.t2", {"fetch" : True}) self.assertTrue(int(rows[0][0]) == 100) rows = shard_server.exec_stmt( "SELECT MIN(userID2) FROM db2.t2", {"fetch" : True}) self.assertTrue(int(rows[0][0]) == 1) status = self.proxy.sharding.lookup_servers("db2.t2", 101, "LOCAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] shard_uuid = obtained_server_list[0][0] shard_server = MySQLServer.fetch(shard_uuid) shard_server.connect() rows = shard_server.exec_stmt( "SELECT COUNT(*) FROM db2.t2", {"fetch" : True}) self.assertTrue(int(rows[0][0]) == 100) rows = shard_server.exec_stmt( "SELECT MAX(userID2) FROM db2.t2", {"fetch" : True}) self.assertTrue(int(rows[0][0]) == 200) rows = shard_server.exec_stmt( "SELECT MIN(userID2) FROM db2.t2", {"fetch" : True}) self.assertTrue(int(rows[0][0]) == 101)
def _fetch_master_of_group(group_id): """Return a reference to the master of the group. :param group_id: ID of the group whose master needs to be fetched. :return: MySQLServer object referring to the group master. """ global_group = Group.fetch(group_id) master_server = MySQLServer.fetch(global_group.master) master_server.connect() return master_server
def _fetch_master_of_group(group_id): """Return a reference to the master of the group. :param group_id: ID of the group whose master needs to be fetched. :return: MySQLServer object referring to the group master. """ global_group = Group.fetch(group_id) master_server = MySQLServer.fetch(global_group.master) master_server.connect() return master_server
def stop_group_slave(group_master_id, group_slave_id, clear_ref): """Stop the slave on the slave group. This utility method is the completement of the setup_group_replication method and is used to stop the replication on the slave group. Given a master group ID and the slave group ID the method stops the slave on the slave group and updates the references on both the master and the slave group. :param group_master_id: The id of the master group. :param group_slave_id: The id of the slave group. :param clear_ref: The parameter indicates if the stop_group_slave needs to clear the references to the group's slaves. For example when you do a disable shard the shard group still retains the references to its slaves, since when enabled it needs to enable the replication. """ master_group = Group.fetch(group_master_id) slave_group = Group.fetch(group_slave_id) if master_group is None: raise _errors.GroupError \ (GROUP_REPLICATION_GROUP_NOT_FOUND_ERROR % (group_master_id, )) if slave_group is None: raise _errors.GroupError \ (GROUP_REPLICATION_GROUP_NOT_FOUND_ERROR % (group_slave_id, )) slave_group_master = MySQLServer.fetch(slave_group.master) if slave_group_master is None: raise _errors.GroupError \ (GROUP_REPLICATION_GROUP_MASTER_NOT_FOUND_ERROR % (slave_group.master, )) if not server_running(slave_group_master): #The server is already down. We cannot connect to it to stop #replication. return try: slave_group_master.connect() except _errors.DatabaseError: #Server is not accessible, unable to connect to the server. return #Stop replication on the master of the group and clear the references, #if clear_ref has been set. _replication.stop_slave(slave_group_master, wait=True) _replication.reset_slave(slave_group_master, clean=True) if clear_ref: slave_group.remove_master_group_id() master_group.remove_slave_group_id(group_slave_id)
def stop_group_slave(group_master_id, group_slave_id, clear_ref): """Stop the slave on the slave group. This utility method is the completement of the setup_group_replication method and is used to stop the replication on the slave group. Given a master group ID and the slave group ID the method stops the slave on the slave group and updates the references on both the master and the slave group. :param group_master_id: The id of the master group. :param group_slave_id: The id of the slave group. :param clear_ref: The parameter indicates if the stop_group_slave needs to clear the references to the group's slaves. For example when you do a disable shard the shard group still retains the references to its slaves, since when enabled it needs to enable the replication. """ master_group = Group.fetch(group_master_id) slave_group = Group.fetch(group_slave_id) if master_group is None: raise _errors.GroupError \ (GROUP_REPLICATION_GROUP_NOT_FOUND_ERROR % (group_master_id, )) if slave_group is None: raise _errors.GroupError \ (GROUP_REPLICATION_GROUP_NOT_FOUND_ERROR % (group_slave_id, )) slave_group_master = MySQLServer.fetch(slave_group.master) if slave_group_master is None: raise _errors.GroupError \ (GROUP_REPLICATION_GROUP_MASTER_NOT_FOUND_ERROR % (slave_group.master, )) if not server_running(slave_group_master): #The server is already down. We cannot connect to it to stop #replication. return try: slave_group_master.connect() except _errors.DatabaseError: #Server is not accessible, unable to connect to the server. return #Stop replication on the master of the group and clear the references, #if clear_ref has been set. _replication.stop_slave(slave_group_master, wait=True) _replication.reset_slave(slave_group_master, clean=True) if clear_ref: slave_group.remove_master_group_id() master_group.remove_slave_group_id(group_slave_id)
def stop_group_slaves(master_group_id): """Stop the group slaves for the given master group. This will be used for use cases that required all the slaves replicating from this group to be stopped. An example use case would be disabling a shard. :param master_group_id: The master group ID. """ master_group = Group.fetch(master_group_id) if master_group is None: raise _errors.GroupError \ (GROUP_REPLICATION_GROUP_NOT_FOUND_ERROR % \ (master_group_id, )) # Stop the replication on all of the registered slaves for the group. for slave_group_id in master_group.slave_group_ids: slave_group = Group.fetch(slave_group_id) # Fetch the Slave Group and the master of the Slave Group slave_group_master = MySQLServer.fetch(slave_group.master) if slave_group_master is None: _LOGGER.warning( GROUP_REPLICATION_GROUP_MASTER_NOT_FOUND_ERROR % \ (slave_group.master, ) ) continue if not server_running(slave_group_master): # The server is already down. we cannot connect to it to stop # replication. continue try: slave_group_master.connect() _replication.stop_slave(slave_group_master, wait=True) # Reset the slave to remove the reference of the master so # that when the server is used as a slave next it does not # complaint about having a different master. _replication.reset_slave(slave_group_master, clean=True) except _errors.DatabaseError as error: # Server is not accessible, unable to connect to the server. _LOGGER.warning( "Error while unconfiguring group replication between " "(%s) and (%s): (%s).", master_group_id, slave_group.group_id, error ) continue
def stop_group_slaves(master_group_id): """Stop the group slaves for the given master group. This will be used for use cases that required all the slaves replicating from this group to be stopped. An example use case would be disabling a shard. :param master_group_id: The master group ID. """ master_group = Group.fetch(master_group_id) if master_group is None: raise _errors.GroupError \ (GROUP_REPLICATION_GROUP_NOT_FOUND_ERROR % \ (master_group_id, )) # Stop the replication on all of the registered slaves for the group. for slave_group_id in master_group.slave_group_ids: slave_group = Group.fetch(slave_group_id) # Fetch the Slave Group and the master of the Slave Group slave_group_master = MySQLServer.fetch(slave_group.master) if slave_group_master is None: _LOGGER.warning(GROUP_REPLICATION_GROUP_MASTER_NOT_FOUND_ERROR, slave_group.master) continue if not server_running(slave_group_master): # The server is already down. we cannot connect to it to stop # replication. continue try: slave_group_master.connect() _replication.stop_slave(slave_group_master, wait=True) # Reset the slave to remove the reference of the master so # that when the server is used as a slave next it does not # complaint about having a different master. _replication.reset_slave(slave_group_master, clean=True) except _errors.DatabaseError as error: # Server is not accessible, unable to connect to the server. _LOGGER.warning( "Error while unconfiguring group replication between " "(%s) and (%s): (%s).", master_group_id, slave_group.group_id, error) continue
def add_shard_range_trigger(group_id, sharding_type, table_name, column_name): """Add a trigger on the shard table to ensure that values inserted fall within the valid shard ranges. :param group_id: The ID of the group on which the trigger definition is applied. The trigger is created on the master of this group. :param sharding_type: The datatype supported by the shards. Used to name the trigger. :param table_name: The name of the table. This is used to name the trigger being created. :param column_name: The name of the column in the table being sharded. This is used to create the name of the trigger. """ global_group = Group.fetch(group_id) master_server = MySQLServer.fetch(global_group.master) master_server.connect() #Create an INSERT trigger on the sharded table. db, table = table_name.split(".") trigger_tmpl = _TRIGGER_DEFN[sharding_type] trigger_name = db + "." + _TRIGGER_PREFIX_INSERT + table create_insert_trigger = trigger_tmpl.format( trigger_name=trigger_name, operation="INSERT", table_name=table_name, column_name="NEW"+"."+column_name ) master_server.exec_stmt(create_insert_trigger) #Create an UPDATE trigger on the sharded table. trigger_tmpl = _TRIGGER_DEFN[sharding_type] trigger_name = db + "." + _TRIGGER_PREFIX_UPDATE + table create_update_trigger =trigger_tmpl.format( trigger_name=trigger_name, operation="UPDATE", table_name=table_name, column_name="NEW"+"."+column_name ) master_server.exec_stmt(create_update_trigger)
def _setup_shard_switch_move(shard_id, source_group_id, destination_group_id, update_only): """Setup the moved shard to map to the new group. :param shard_id: The shard ID of the shard that needs to be moved. :param source_group_id: The group_id of the source shard. :param destination_group_id: The ID of the group to which the shard needs to be moved. :update_only: Only update the state store and skip provisioning. """ #Fetch the Range sharding specification. When we start implementing #heterogenous sharding schemes, we need to find out the type of #sharding scheme and we should use that to find out the sharding #implementation. _, source_shard, _, shard_mapping_defn = \ _services_sharding._verify_and_fetch_shard(shard_id) #Setup replication between the shard group and the global group. _group_replication.setup_group_replication \ (shard_mapping_defn[2], destination_group_id) #set the shard to point to the new group. source_shard.group_id = destination_group_id #Stop the replication between the global server and the original #group associated with the shard. _group_replication.stop_group_slave\ (shard_mapping_defn[2], source_group_id, True) #Reset the read only flag on the source server. source_group = Group.fetch(source_group_id) if source_group is None: raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND % (source_group_id, )) master = MySQLServer.fetch(source_group.master) if master is None: raise _errors.ShardingError( _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND) if not update_only: master.connect() master.read_only = False
def test_sync_readonly_servers(self): status = self.proxy.group.lookup_servers("GROUPID3") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] for idx in range(0, 2): if obtained_server_list[idx]["status"] == MySQLServer.SECONDARY: slave_uuid = obtained_server_list[idx]["server_uuid"] slave_server = MySQLServer.fetch(slave_uuid) slave_server.connect() _group_replication.setup_group_replication("GROUPID2", "GROUPID3") _replication.synchronize_with_read_only( slave_server, self.shard_server, 3, 5 ) _group_replication.stop_group_slave("GROUPID2", "GROUPID3", True) try: rows = self.shard_server.exec_stmt( "SELECT NAME FROM db1.t1", {"fetch" : True}) except _errors.DatabaseError: raise Exception("Enable Shard failed to enable shard.") self.assertEqual(len(rows), 15)
def test_shard_split(self): split_cnt_1 = 0 split_cnt_2 = 0 shard_server_1 = None shard_server_2 = None expected_address_list_1 = \ [MySQLInstances().get_address(2), MySQLInstances().get_address(3)] expected_address_list_2 = \ [MySQLInstances().get_address(4), MySQLInstances().get_address(5)] status = self.proxy.sharding.split_shard("1", "GROUPID3") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_prune_shard_tables_after_split).") for i in range(1, 100): status = self.proxy.sharding.lookup_servers("db1.t1", i, "LOCAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] obtained_uuid_list = [obtained_server_list[0][0], obtained_server_list[1][0]] obtained_address_list = [obtained_server_list[0][1], obtained_server_list[1][1]] try: self.assertEqual( set(expected_address_list_1), set(obtained_address_list) ) split_cnt_1 = split_cnt_1 + 1 if shard_server_1 is None: shard_server_1 = MySQLServer.fetch(obtained_uuid_list[0]) except AssertionError: self.assertEqual( set(expected_address_list_2), set(obtained_address_list) ) split_cnt_2 = split_cnt_2 + 1 if shard_server_2 is None: shard_server_2 = MySQLServer.fetch(obtained_uuid_list[0]) #Ensure that both the splits have been utilized. self.assertTrue(split_cnt_1 > 0) self.assertTrue(split_cnt_2 > 0) shard_server_1.connect() shard_server_2.connect() row_cnt_shard_1 = shard_server_1.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch" : True} ) row_cnt_shard_2 = shard_server_2.exec_stmt( "SELECT COUNT(*) FROM db1.t1", {"fetch" : True} ) #Ensure that the split has happened, the number of values in #each shard should be less than the original. self.assertTrue(int(row_cnt_shard_1[0][0]) < 100) self.assertTrue(int(row_cnt_shard_2[0][0]) < 100) #Ensure tha two new shard_ids have been generated. hash_sharding_specifications = HashShardingSpecification.list(1) self.assertTrue(ShardingUtils.compare_hash_specifications( hash_sharding_specifications[1], HashShardingSpecification.fetch(2))) self.assertTrue(ShardingUtils.compare_hash_specifications( hash_sharding_specifications[0], HashShardingSpecification.fetch(3)))
def test_global_update_propogation_failover(self): status = self.proxy.sharding.lookup_servers("1", 500, "GLOBAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] for idx in range(0, 2): if obtained_server_list[idx][2]: global_master_uuid = obtained_server_list[idx][0] else: global_slave_uuid = obtained_server_list[idx][0] global_master = MySQLServer.fetch(global_master_uuid) global_master.connect() global_master.exec_stmt("DROP DATABASE IF EXISTS global_db") global_master.exec_stmt("CREATE DATABASE global_db") global_master.exec_stmt("CREATE TABLE global_db.global_table" "(userID INT, name VARCHAR(30))") global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(101, 'TEST 1')") global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(202, 'TEST 2')") status = self.proxy.group.promote( "GROUPID1", global_slave_uuid ) self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_change_to_candidate).") sleep(5) status = self.proxy.sharding.lookup_servers("1", 500, "GLOBAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] for idx in range(0, 2): if obtained_server_list[idx][2]: global_master_uuid = obtained_server_list[idx][0] break global_master = MySQLServer.fetch(global_master_uuid) global_master.connect() global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(303, 'TEST 3')") global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(404, 'TEST 4')") status = self.proxy.group.lookup_servers("GROUPID2") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] for idx in range(0, 2): if obtained_server_list[idx]["status"] == MySQLServer.SECONDARY: slave_uuid = obtained_server_list[idx]["server_uuid"] break status = self.proxy.group.promote("GROUPID2", str(slave_uuid)) self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_change_to_candidate).") sleep(5) global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(505, 'TEST 5')") global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(606, 'TEST 6')") status = self.proxy.group.lookup_servers("GROUPID3") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] for idx in range(0, 2): if obtained_server_list[idx]["status"] == MySQLServer.SECONDARY: slave_uuid = obtained_server_list[idx]["server_uuid"] break status = self.proxy.group.promote("GROUPID3", str(slave_uuid)) self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_change_to_candidate).") global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(505, 'TEST 7')") global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(606, 'TEST 8')") sleep(5) status = self.proxy.sharding.lookup_servers("db1.t1", 500, "LOCAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] for idx in range(0, 2): shard_uuid = obtained_server_list[idx][0] shard_server = MySQLServer.fetch(shard_uuid) shard_server.connect() rows = shard_server.exec_stmt( "SELECT NAME FROM global_db.global_table", {"fetch" : True}) self.assertEqual(len(rows), 8) self.assertEqual(rows[0][0], 'TEST 1') self.assertEqual(rows[1][0], 'TEST 2') self.assertEqual(rows[2][0], 'TEST 3') self.assertEqual(rows[3][0], 'TEST 4') self.assertEqual(rows[4][0], 'TEST 5') self.assertEqual(rows[5][0], 'TEST 6') self.assertEqual(rows[6][0], 'TEST 7') self.assertEqual(rows[7][0], 'TEST 8') status = self.proxy.sharding.lookup_servers("db1.t1", 1500, "LOCAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] for idx in range(0, 2): shard_uuid = obtained_server_list[idx][0] shard_server = MySQLServer.fetch(shard_uuid) shard_server.connect() rows = shard_server.exec_stmt( "SELECT NAME FROM global_db.global_table", {"fetch" : True}) self.assertEqual(len(rows), 8) self.assertEqual(rows[0][0], 'TEST 1') self.assertEqual(rows[1][0], 'TEST 2') self.assertEqual(rows[2][0], 'TEST 3') self.assertEqual(rows[3][0], 'TEST 4') self.assertEqual(rows[4][0], 'TEST 5') self.assertEqual(rows[5][0], 'TEST 6') self.assertEqual(rows[6][0], 'TEST 7') self.assertEqual(rows[7][0], 'TEST 8')
def test_shard_split(self): status = self.proxy.sharding.split_shard("1", "GROUPID3", "600") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_prune_shard_tables_after_split).") status = self.proxy.sharding.lookup_servers("db1.t1", 500, "LOCAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] for idx in range(0, 2): server_uuid = obtained_server_list[idx][0] shard_server = MySQLServer.fetch(server_uuid) shard_server.connect() rows = shard_server.exec_stmt( "SELECT NAME FROM db1.t1", {"fetch" : True}) self.assertEqual(len(rows), 3) self.assertEqual(rows[0][0], 'TEST 1') self.assertEqual(rows[1][0], 'TEST 2') self.assertEqual(rows[2][0], 'TEST 3') status = self.proxy.sharding.lookup_servers("db1.t1", 800, "LOCAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] for idx in range(0, 2): server_uuid = obtained_server_list[idx][0] shard_server = MySQLServer.fetch(server_uuid) shard_server.connect() rows = shard_server.exec_stmt( "SELECT NAME FROM db1.t1", {"fetch" : True}) self.assertEqual(len(rows), 4) self.assertEqual(rows[0][0], 'TEST 4') self.assertEqual(rows[1][0], 'TEST 5') self.assertEqual(rows[2][0], 'TEST 6') self.assertEqual(rows[3][0], 'TEST 7') status = self.proxy.sharding.lookup_servers("1", 500, "GLOBAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] for idx in range(0, 2): if obtained_server_list[idx][2]: global_master_uuid = obtained_server_list[idx][0] break global_master = MySQLServer.fetch(global_master_uuid) global_master.connect() global_master.exec_stmt("DROP DATABASE IF EXISTS global_db") global_master.exec_stmt("CREATE DATABASE global_db") global_master.exec_stmt("CREATE TABLE global_db.global_table" "(userID INT, name VARCHAR(30))") global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(101, 'TEST 1')") global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(202, 'TEST 2')") status = self.proxy.group.promote("GROUPID1") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_change_to_candidate).") sleep(5) status = self.proxy.sharding.lookup_servers("1", 500, "GLOBAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] for idx in range(0, 2): if obtained_server_list[idx][2]: global_master_uuid = obtained_server_list[idx][0] break global_master = MySQLServer.fetch(global_master_uuid) global_master.connect() global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(303, 'TEST 3')") global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(404, 'TEST 4')") global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(505, 'TEST 5')") global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(606, 'TEST 6')") sleep(5) status = self.proxy.sharding.lookup_servers("db1.t1", 500, "LOCAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] for idx in range(0, 2): if obtained_server_list[idx][2]: shard_uuid = obtained_server_list[idx][0] shard_server = MySQLServer.fetch(shard_uuid) shard_server.connect() rows = shard_server.exec_stmt( "SELECT NAME FROM global_db.global_table", {"fetch" : True} ) self.assertEqual(len(rows), 6) self.assertEqual(rows[0][0], 'TEST 1') self.assertEqual(rows[1][0], 'TEST 2') self.assertEqual(rows[2][0], 'TEST 3') self.assertEqual(rows[3][0], 'TEST 4') self.assertEqual(rows[4][0], 'TEST 5') self.assertEqual(rows[5][0], 'TEST 6') #Ensure tha two new shard_ids have been generated. range_sharding_specifications = RangeShardingSpecification.list(1) self.assertTrue(ShardingUtils.compare_range_specifications( range_sharding_specifications[0], RangeShardingSpecification.fetch(2))) self.assertTrue(ShardingUtils.compare_range_specifications( range_sharding_specifications[1], RangeShardingSpecification.fetch(3)))
def test_shard_move(self): status = self.proxy.sharding.move_shard("1", "GROUPID3") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_setup_resharding_switch).") status = self.proxy.sharding.lookup_servers("db1.t1", 500, "LOCAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] for idx in range(0, 2): server_uuid = obtained_server_list[idx][0] shard_server = MySQLServer.fetch(server_uuid) shard_server.connect() rows = shard_server.exec_stmt("SELECT NAME FROM db1.t1", {"fetch": True}) self.assertEqual(len(rows), 7) self.assertEqual(rows[0][0], "TEST 1") self.assertEqual(rows[1][0], "TEST 2") self.assertEqual(rows[2][0], "TEST 3") self.assertEqual(rows[3][0], "TEST 4") self.assertEqual(rows[4][0], "TEST 5") self.assertEqual(rows[5][0], "TEST 6") self.assertEqual(rows[6][0], "TEST 7") status = self.proxy.sharding.lookup_servers("1", 500, "GLOBAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] for idx in range(0, 2): if obtained_server_list[idx][2]: global_master_uuid = obtained_server_list[idx][0] break global_master = MySQLServer.fetch(global_master_uuid) global_master.connect() global_master.exec_stmt("DROP DATABASE IF EXISTS global_db") global_master.exec_stmt("CREATE DATABASE global_db") global_master.exec_stmt("CREATE TABLE global_db.global_table" "(userID INT, name VARCHAR(30))") global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(101, 'TEST 1')") global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(202, 'TEST 2')") status = self.proxy.group.promote("GROUPID1") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_change_to_candidate).") sleep(5) status = self.proxy.sharding.lookup_servers("1", 500, "GLOBAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] for idx in range(0, 2): if obtained_server_list[idx][2]: global_master_uuid = obtained_server_list[idx][0] break global_master = MySQLServer.fetch(global_master_uuid) global_master.connect() global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(303, 'TEST 3')") global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(404, 'TEST 4')") global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(505, 'TEST 5')") global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(606, 'TEST 6')") sleep(5) status = self.proxy.sharding.lookup_servers("db1.t1", 500, "LOCAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] for idx in range(0, 2): if obtained_server_list[idx][2]: shard_uuid = obtained_server_list[idx][0] shard_server = MySQLServer.fetch(shard_uuid) shard_server.connect() rows = shard_server.exec_stmt("SELECT NAME FROM global_db.global_table", {"fetch": True}) self.assertEqual(len(rows), 6) self.assertEqual(rows[0][0], "TEST 1") self.assertEqual(rows[1][0], "TEST 2") self.assertEqual(rows[2][0], "TEST 3") self.assertEqual(rows[3][0], "TEST 4") self.assertEqual(rows[4][0], "TEST 5") self.assertEqual(rows[5][0], "TEST 6")
def setup_group_replication(group_master_id, group_slave_id): """Sets up replication between the masters of the two groups and updates the references to the groups in each other. :param group_master_id: The group whose master will act as the master in the replication setup. :param group_slave_id: The group whose master will act as the slave in the replication setup. """ group_master = Group.fetch(group_master_id) group_slave = Group.fetch(group_slave_id) if group_master is None: raise _errors.GroupError \ (GROUP_REPLICATION_GROUP_NOT_FOUND_ERROR % (group_master_id, )) if group_slave is None: raise _errors.GroupError \ (GROUP_REPLICATION_GROUP_NOT_FOUND_ERROR % (group_slave_id, )) if group_master.master is None: raise _errors.GroupError \ (GROUP_REPLICATION_GROUP_MASTER_NOT_FOUND_ERROR % "") if group_slave.master is None: raise _errors.GroupError \ (GROUP_REPLICATION_GROUP_MASTER_NOT_FOUND_ERROR % "") #Master is the master of the Global Group. We replicate from here to #the masters of all the shard Groups. master = MySQLServer.fetch(group_master.master) if master is None: raise _errors.GroupError \ (GROUP_REPLICATION_GROUP_MASTER_NOT_FOUND_ERROR % \ (group_master.master, )) #Get the master of the shard Group. slave = MySQLServer.fetch(group_slave.master) if slave is None: raise _errors.GroupError \ (GROUP_REPLICATION_GROUP_MASTER_NOT_FOUND_ERROR % \ (group_slave.master, )) if not server_running(master): #The server is already down. We cannot connect to it to setup #replication. raise _errors.GroupError \ (GROUP_MASTER_NOT_RUNNING % (group_master.group_id, )) try: master.connect() except _errors.DatabaseError as error: #Server is not accessible, unable to connect to the server. raise _errors.GroupError( GROUP_REPLICATION_SERVER_ERROR % (group_slave.master, error) ) if not server_running(slave): #The server is already down. We cannot connect to it to setup #replication. raise _errors.GroupError \ (GROUP_MASTER_NOT_RUNNING % (group_slave.group_id, )) try: slave.connect() except _errors.DatabaseError as error: raise _errors.GroupError( GROUP_REPLICATION_SERVER_ERROR % (group_master.master, error) ) _replication.stop_slave(slave, wait=True) #clear references to old masters in the slave _replication.reset_slave(slave, clean=True) _replication.switch_master(slave, master, master.user, master.passwd) _replication.start_slave(slave, wait=True) try: group_master.add_slave_group_id(group_slave_id) group_slave.add_master_group_id(group_master_id) except _errors.DatabaseError: #If there is an error while adding a reference to #the slave group or a master group, it means that #the slave group was already added and the error #is happening because the group was already registered. #Ignore this error. pass
def _check_shard_information(shard_id, destn_group_id, split_value, prune_limit, cmd, update_only): """Verify the sharding information before starting a re-sharding operation. :param shard_id: The destination shard ID. :param destn_group_id: The Destination group ID. :param split_value: The point at which the sharding definition should be split. :param prune_limit: The number of DELETEs that should be done in one batch. :param cmd: Indicates if it is a split or a move being executed. :param update_only: If the operation is a update only operation. """ backup_user = _services_utils.read_config_value( _config.global_config, 'servers', 'backup_user' ) backup_passwd = _services_utils.read_config_value( _config.global_config, 'servers', 'backup_password' ) restore_user = _services_utils.read_config_value( _config.global_config, 'servers', 'restore_user' ) restore_passwd = _services_utils.read_config_value( _config.global_config, 'servers', 'restore_password' ) mysqldump_binary = _services_utils.read_config_value( _config.global_config, 'sharding', 'mysqldump_program' ) mysqlclient_binary = _services_utils.read_config_value( _config.global_config, 'sharding', 'mysqlclient_program' ) if not _services_utils.is_valid_binary(mysqldump_binary): raise _errors.ShardingError( _services_sharding.MYSQLDUMP_NOT_FOUND % mysqldump_binary) if not _services_utils.is_valid_binary(mysqlclient_binary): raise _errors.ShardingError( _services_sharding.MYSQLCLIENT_NOT_FOUND % mysqlclient_binary) if cmd == "SPLIT": range_sharding_spec, _, shard_mappings, _ = \ _services_sharding.verify_and_fetch_shard(shard_id) upper_bound = \ SHARDING_SPECIFICATION_HANDLER[shard_mappings[0].type_name].\ get_upper_bound( range_sharding_spec.lower_bound, range_sharding_spec.shard_mapping_id, shard_mappings[0].type_name ) #If the underlying sharding scheme is a HASH. When a shard is split, #all the tables that are part of the shard, have the same sharding #scheme. All the shard mappings associated with this shard_id will be #of the same sharding type. Hence it is safe to use one of the shard #mappings. if shard_mappings[0].type_name == "HASH": if split_value is not None: raise _errors.ShardingError( _services_sharding.NO_LOWER_BOUND_FOR_HASH_SHARDING ) if upper_bound is None: #While splitting a range, retrieve the next upper bound and #find the mid-point, in the case where the next upper_bound #is unavailable pick the maximum value in the set of values in #the shard. upper_bound = HashShardingSpecification.fetch_max_key(shard_id) #Calculate the split value. split_value = \ SHARDING_DATATYPE_HANDLER[shard_mappings[0].type_name].\ split_value( range_sharding_spec.lower_bound, upper_bound ) elif split_value is not None: if not (SHARDING_DATATYPE_HANDLER[shard_mappings[0].type_name].\ is_valid_split_value( split_value, range_sharding_spec.lower_bound, upper_bound ) ): raise _errors.ShardingError( _services_sharding.INVALID_LOWER_BOUND_VALUE % (split_value, ) ) elif split_value is None: raise _errors.ShardingError( _services_sharding.SPLIT_VALUE_NOT_DEFINED ) #Ensure that the group does not already contain a shard. if Shards.lookup_shard_id(destn_group_id) is not None: raise _errors.ShardingError( _services_sharding.SHARD_MOVE_DESTINATION_NOT_EMPTY % (destn_group_id, ) ) #Fetch the group information for the source shard that #needs to be moved. source_shard = Shards.fetch(shard_id) if source_shard is None: raise _errors.ShardingError( _services_sharding.SHARD_NOT_FOUND % (shard_id, )) #Fetch the group_id and the group that hosts the source shard. source_group_id = source_shard.group_id destn_group = Group.fetch(destn_group_id) if destn_group is None: raise _errors.ShardingError( _services_sharding.SHARD_GROUP_NOT_FOUND % (destn_group_id, )) if not update_only: # Check if the source server has backup privileges. source_group = Group.fetch(source_group_id) server = _services_utils.fetch_backup_server(source_group) server.user = backup_user server.passwd = backup_passwd _backup.MySQLDump.check_backup_privileges(server) # Check if the destination server has restore privileges. destination_group = Group.fetch(destn_group_id) server = MySQLServer.fetch(destination_group.master) server.user = restore_user server.passwd = restore_passwd _backup.MySQLDump.check_restore_privileges(server) _events.trigger_within_procedure( BACKUP_SOURCE_SHARD, shard_id, source_group_id, destn_group_id, split_value, prune_limit, cmd, update_only ) else: _events.trigger_within_procedure( SETUP_RESHARDING_SWITCH, shard_id, source_group_id, destn_group_id, split_value, prune_limit, cmd, update_only )
def test_switchover_with_no_master(self): """Ensure that a switchover/failover happens when masters in the shard and global groups are dead. """ # Check that a shard group has it master pointing to a the master # in the global group. global_group = Group.fetch("GROUPID1") shard_group = Group.fetch("GROUPID2") other_shard_group = Group.fetch("GROUPID3") global_master = MySQLServer.fetch(global_group.master) global_master.connect() shard_master = MySQLServer.fetch(shard_group.master) shard_master.connect() other_shard_master = MySQLServer.fetch(other_shard_group.master) other_shard_master.connect() self.assertEqual( _replication.slave_has_master(shard_master), str(global_group.master) ) self.assertEqual( _replication.slave_has_master(other_shard_master), str(global_group.master) ) # Demote the master in the global group and check that a # shard group points to None. global_group = Group.fetch("GROUPID1") self.assertEqual(global_group.master, global_master.uuid) self.proxy.group.demote("GROUPID1") global_group = Group.fetch("GROUPID1") self.assertEqual(global_group.master, None) self.assertEqual(_replication.slave_has_master(shard_master), None) self.assertEqual( _replication.slave_has_master(other_shard_master), None ) # Demote the master in a shard group and promote the master # in the global group. global_group = Group.fetch("GROUPID1") self.assertEqual(global_group.master, None) shard_group = Group.fetch("GROUPID2") self.assertEqual(shard_group.master, shard_master.uuid) self.proxy.group.demote("GROUPID2") shard_group = Group.fetch("GROUPID2") self.assertEqual(shard_group.master, None) self.proxy.group.promote("GROUPID1", str(global_master.uuid)) global_group = Group.fetch("GROUPID1") self.assertEqual(global_group.master, global_master.uuid) self.assertEqual(_replication.slave_has_master(shard_master), None) self.assertEqual( _replication.slave_has_master(other_shard_master), str(global_group.master) ) # Promote the master in the previous shard group and check that # everything is back to normal. global_group = Group.fetch("GROUPID1") self.assertEqual(global_group.master, global_master.uuid) self.assertEqual(_replication.slave_has_master(shard_master), None) shard_group = Group.fetch("GROUPID2") self.assertEqual(shard_group.master, None) self.proxy.group.promote("GROUPID2", str(shard_master.uuid)) self.assertEqual( _replication.slave_has_master(shard_master), str(global_group.master) ) self.assertEqual( _replication.slave_has_master(other_shard_master), str(global_group.master) ) shard_group = Group.fetch("GROUPID2") self.assertEqual(shard_group.master, shard_master.uuid) # Demote the master in the global group, check that a shard group # points to None, promot it again and check that everything is back # to normal global_group = Group.fetch("GROUPID1") self.assertEqual(global_group.master, global_master.uuid) shard_group = Group.fetch("GROUPID2") self.assertEqual(shard_group.master, shard_master.uuid) self.proxy.group.demote("GROUPID1") global_group = Group.fetch("GROUPID1") self.assertEqual(global_group.master, None) self.assertEqual(_replication.slave_has_master(shard_master), None) self.proxy.group.promote("GROUPID1", str(global_master.uuid)) global_group = Group.fetch("GROUPID1") self.assertEqual(global_group.master, global_master.uuid) self.assertEqual( _replication.slave_has_master(shard_master), str(global_group.master) ) self.assertEqual( _replication.slave_has_master(other_shard_master), str(global_group.master) )
def test_properties(self): """Test setting MySQLServer's properties. """ server = self.server # Check property user. self.assertEqual(server.user, tests.utils.MySQLInstances().user) server.user = "******" self.assertEqual(server.user, "user") server.user = tests.utils.MySQLInstances().user # Check property passwd. self.assertEqual(server.passwd, tests.utils.MySQLInstances().passwd) server.passwd = "passwd" self.assertEqual(server.passwd, "passwd") server.passwd = tests.utils.MySQLInstances().passwd # Check property status. self.assertEqual(server.status, MySQLServer.SECONDARY) server.status = MySQLServer.FAULTY self.assertEqual(server.status, MySQLServer.FAULTY) fetched_server = MySQLServer.fetch(server.uuid) self.assertEqual(server.status, fetched_server.status) server.status = MySQLServer.SECONDARY fetched_server = MySQLServer.fetch(server.uuid) self.assertEqual(server.status, fetched_server.status) # Check property mode. self.assertEqual(server.mode, MySQLServer.READ_ONLY) server.mode = MySQLServer.OFFLINE self.assertEqual(server.mode, MySQLServer.OFFLINE) fetched_server = MySQLServer.fetch(server.uuid) self.assertEqual(server.mode, fetched_server.mode) server.mode = MySQLServer.READ_ONLY fetched_server = MySQLServer.fetch(server.uuid) self.assertEqual(server.mode, fetched_server.mode) # Check property weight. self.assertEqual(server.weight, MySQLServer.DEFAULT_WEIGHT) server.weight = 0.1 self.assertEqual(server.weight, 0.1) fetched_server = MySQLServer.fetch(server.uuid) self.assertEqual(server.weight, fetched_server.weight) server.weight = MySQLServer.DEFAULT_WEIGHT fetched_server = MySQLServer.fetch(server.uuid) self.assertEqual(server.weight, fetched_server.weight) # Create instance without connecting it with a server. self.assertEqual(server.read_only, None) self.assertEqual(server.server_id, None) self.assertEqual(server.gtid_enabled, None) self.assertEqual(server.binlog_enabled, None) self.assertEqual(server.version, None) # Bind instance to a server. server.connect() self.assertNotEqual(server.read_only, None) self.assertNotEqual(server.server_id, 0) self.assertEqual(server.gtid_enabled, True) self.assertEqual(server.binlog_enabled, True) # Check read_only property. server.read_only = True self.assertEqual(server.read_only, True) server.read_only = False self.assertEqual(server.read_only, False)
def setup_group_replication(group_master_id, group_slave_id): """Sets up replication between the masters of the two groups and updates the references to the groups in each other. :param group_master_id: The group whose master will act as the master in the replication setup. :param group_slave_id: The group whose master will act as the slave in the replication setup. """ group_master = Group.fetch(group_master_id) group_slave = Group.fetch(group_slave_id) if group_master is None: raise _errors.GroupError \ (GROUP_REPLICATION_GROUP_NOT_FOUND_ERROR % (group_master_id, )) if group_slave is None: raise _errors.GroupError \ (GROUP_REPLICATION_GROUP_NOT_FOUND_ERROR % (group_slave_id, )) if group_master.master is None: raise _errors.GroupError \ (GROUP_REPLICATION_GROUP_MASTER_NOT_FOUND_ERROR % "") if group_slave.master is None: raise _errors.GroupError \ (GROUP_REPLICATION_GROUP_MASTER_NOT_FOUND_ERROR % "") #Master is the master of the Global Group. We replicate from here to #the masters of all the shard Groups. master = MySQLServer.fetch(group_master.master) if master is None: raise _errors.GroupError \ (GROUP_REPLICATION_GROUP_MASTER_NOT_FOUND_ERROR % \ (group_master.master, )) #Get the master of the shard Group. slave = MySQLServer.fetch(group_slave.master) if slave is None: raise _errors.GroupError \ (GROUP_REPLICATION_GROUP_MASTER_NOT_FOUND_ERROR % \ (group_slave.master, )) if not server_running(master): #The server is already down. We cannot connect to it to setup #replication. raise _errors.GroupError \ (GROUP_MASTER_NOT_RUNNING % (group_master.group_id, )) try: master.connect() except _errors.DatabaseError as error: #Server is not accessible, unable to connect to the server. raise _errors.GroupError(GROUP_REPLICATION_SERVER_ERROR % (group_slave.master, error)) if not server_running(slave): #The server is already down. We cannot connect to it to setup #replication. raise _errors.GroupError \ (GROUP_MASTER_NOT_RUNNING % (group_slave.group_id, )) try: slave.connect() except _errors.DatabaseError as error: raise _errors.GroupError(GROUP_REPLICATION_SERVER_ERROR % (group_master.master, error)) _replication.stop_slave(slave, wait=True) #clear references to old masters in the slave _replication.reset_slave(slave, clean=True) _replication.switch_master(slave, master, master.user, master.passwd) _replication.start_slave(slave, wait=True) try: group_master.add_slave_group_id(group_slave_id) group_slave.add_master_group_id(group_master_id) except _errors.DatabaseError: #If there is an error while adding a reference to #the slave group or a master group, it means that #the slave group was already added and the error #is happening because the group was already registered. #Ignore this error. pass
def test_shard_enable(self): self.proxy.sharding.disable_shard("1") self.proxy.sharding.disable_shard("2") sleep(5) self.proxy.sharding.enable_shard("1") self.proxy.sharding.enable_shard("2") sleep(3) status = self.proxy.sharding.lookup_servers("1", 500, "GLOBAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] for idx in range(0, 2): if obtained_server_list[idx][2]: global_master_uuid = obtained_server_list[idx][0] break global_master = MySQLServer.fetch(global_master_uuid) global_master.connect() global_master.exec_stmt("DROP DATABASE IF EXISTS global_db") global_master.exec_stmt("CREATE DATABASE global_db") global_master.exec_stmt("CREATE TABLE global_db.global_table" "(userID INT, name VARCHAR(30))") global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(101, 'TEST 1')") global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(202, 'TEST 2')") sleep(3) status = self.proxy.sharding.lookup_servers("db1.t1", 500, "LOCAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] for idx in range(0, 2): if obtained_server_list[idx][2]: shard_uuid = obtained_server_list[idx][0] shard_server = MySQLServer.fetch(shard_uuid) shard_server.connect() try: rows = shard_server.exec_stmt( "SELECT NAME FROM global_db.global_table", {"fetch" : True} ) except _errors.DatabaseError: raise Exception("Enable Shard failed to enable shard.") self.assertEqual(len(rows), 2) self.assertEqual(rows[0][0], 'TEST 1') self.assertEqual(rows[1][0], 'TEST 2') status = self.proxy.sharding.lookup_servers("db1.t1", 1500, "LOCAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] for idx in range(0, 2): if obtained_server_list[idx][2]: shard_uuid = obtained_server_list[idx][0] shard_server = MySQLServer.fetch(shard_uuid) shard_server.connect() try: rows = shard_server.exec_stmt( "SELECT NAME FROM global_db.global_table", {"fetch" : True} ) except _errors.DatabaseError: raise Exception("Enable Shard failed to enable shard.") self.assertEqual(len(rows), 2) self.assertEqual(rows[0][0], 'TEST 1') self.assertEqual(rows[1][0], 'TEST 2')
def test_global_update_propogation_switchover(self): """Ensure that the global data propogation is not impacted when a switchover is triggered. Basically it should ensure that the new master is redirected to replicate to all the other shards. """ status = self.proxy.sharding.lookup_servers("1", 500, "GLOBAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] for idx in range(0, 2): if obtained_server_list[idx][2]: global_master_uuid = obtained_server_list[idx][0] break global_master = MySQLServer.fetch(global_master_uuid) global_master.connect() global_master.exec_stmt("DROP DATABASE IF EXISTS global_db") global_master.exec_stmt("CREATE DATABASE global_db") global_master.exec_stmt("CREATE TABLE global_db.global_table" "(userID INT, name VARCHAR(30))") global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(101, 'TEST 1')") global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(202, 'TEST 2')") status = self.proxy.group.promote("GROUPID1") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_change_to_candidate).") sleep(5) status = self.proxy.sharding.lookup_servers("1", 500, "GLOBAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] for idx in range(0, 2): if obtained_server_list[idx][2]: global_master_uuid = obtained_server_list[idx][0] break global_master = MySQLServer.fetch(global_master_uuid) global_master.connect() global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(303, 'TEST 3')") global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(404, 'TEST 4')") status = self.proxy.group.promote("GROUPID2") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_change_to_candidate).") sleep(5) global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(505, 'TEST 5')") global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(606, 'TEST 6')") status = self.proxy.group.promote("GROUPID3") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_change_to_candidate).") sleep(5) global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(505, 'TEST 7')") global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(606, 'TEST 8')") sleep(5) status = self.proxy.sharding.lookup_servers("db1.t1", 500, "LOCAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] for idx in range(0, 2): if obtained_server_list[idx][2]: shard_uuid = obtained_server_list[idx][0] shard_server = MySQLServer.fetch(shard_uuid) shard_server.connect() rows = shard_server.exec_stmt( "SELECT NAME FROM global_db.global_table", {"fetch" : True} ) self.assertEqual(len(rows), 8) self.assertEqual(rows[0][0], 'TEST 1') self.assertEqual(rows[1][0], 'TEST 2') self.assertEqual(rows[2][0], 'TEST 3') self.assertEqual(rows[3][0], 'TEST 4') self.assertEqual(rows[4][0], 'TEST 5') self.assertEqual(rows[5][0], 'TEST 6') self.assertEqual(rows[6][0], 'TEST 7') self.assertEqual(rows[7][0], 'TEST 8') status = self.proxy.sharding.lookup_servers("db1.t1", 1500, "LOCAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] for idx in range(0, 2): if obtained_server_list[idx][2]: shard_uuid = obtained_server_list[idx][0] shard_server = MySQLServer.fetch(shard_uuid) shard_server.connect() rows = shard_server.exec_stmt( "SELECT NAME FROM global_db.global_table", {"fetch" : True} ) self.assertEqual(len(rows), 8) self.assertEqual(rows[0][0], 'TEST 1') self.assertEqual(rows[1][0], 'TEST 2') self.assertEqual(rows[2][0], 'TEST 3') self.assertEqual(rows[3][0], 'TEST 4') self.assertEqual(rows[4][0], 'TEST 5') self.assertEqual(rows[5][0], 'TEST 6') self.assertEqual(rows[6][0], 'TEST 7') self.assertEqual(rows[7][0], 'TEST 8')
def test_shard_server_added_later(self): self.proxy.sharding.disable_shard("1") self.proxy.sharding.remove_shard("1") sleep(3) status = self.proxy.sharding.lookup_servers("1", 500, "GLOBAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] for idx in range(0, 2): if obtained_server_list[idx][2]: global_master_uuid = obtained_server_list[idx][0] break global_master = MySQLServer.fetch(global_master_uuid) global_master.connect() global_master.exec_stmt("DROP DATABASE IF EXISTS global_db") global_master.exec_stmt("CREATE DATABASE global_db") global_master.exec_stmt("CREATE TABLE global_db.global_table" "(userID INT, name VARCHAR(30))") global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(101, 'TEST 1')") global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(202, 'TEST 2')") global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(303, 'TEST 3')") global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(404, 'TEST 4')") global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(505, 'TEST 5')") global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(606, 'TEST 6')") global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(707, 'TEST 7')") sleep(5) status = self.proxy.sharding.add_shard(1, "GROUPID2/0", "ENABLED") self.assertStatus(status, _executor.Job.ERROR) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Tried to execute action (_add_shard).") sleep(5) status = self.proxy.sharding.lookup_servers("db1.t1", 1500, "LOCAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] for idx in range(0, 2): if obtained_server_list[idx][2]: shard_uuid = obtained_server_list[idx][0] shard_server = MySQLServer.fetch(shard_uuid) shard_server.connect() try: rows = shard_server.exec_stmt( "SELECT NAME FROM global_db.global_table", {"fetch" : True} ) except _errors.DatabaseError: raise Exception("Enable Shard failed to enable shard.") self.assertEqual(len(rows), 7) self.assertEqual(rows[0][0], 'TEST 1') self.assertEqual(rows[1][0], 'TEST 2') self.assertEqual(rows[2][0], 'TEST 3') self.assertEqual(rows[3][0], 'TEST 4') self.assertEqual(rows[4][0], 'TEST 5') self.assertEqual(rows[5][0], 'TEST 6') self.assertEqual(rows[6][0], 'TEST 7')
def _setup_shard_switch_move(shard_id, source_group_id, destination_group_id, update_only): """Setup the moved shard to map to the new group. :param shard_id: The shard ID of the shard that needs to be moved. :param source_group_id: The group_id of the source shard. :param destination_group_id: The ID of the group to which the shard needs to be moved. :update_only: Only update the state store and skip provisioning. """ #Fetch the Range sharding specification. When we start implementing #heterogenous sharding schemes, we need to find out the type of #sharding scheme and we should use that to find out the sharding #implementation. _, source_shard, _, shard_mapping_defn = \ _services_sharding.verify_and_fetch_shard(shard_id) destination_group = Group.fetch(destination_group_id) if destination_group is None: raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND % (destination_group_id, )) destn_group_master = MySQLServer.fetch(destination_group.master) if destn_group_master is None: raise _errors.ShardingError( _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND) destn_group_master.connect() #Set the destination group master to read_only destn_group_master.read_only = True #Setup replication between the shard group and the global group. _group_replication.setup_group_replication \ (shard_mapping_defn[2], destination_group_id) #set the shard to point to the new group. source_shard.group_id = destination_group_id #Stop the replication between the global server and the original #group associated with the shard. _group_replication.stop_group_slave\ (shard_mapping_defn[2], source_group_id, True) #The sleep ensures that the connector have refreshed their caches with the #new shards that have been added as a result of the split. time.sleep(_utils.TTL) #Reset the read only flag on the source server. source_group = Group.fetch(source_group_id) if source_group is None: raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND % (source_group_id, )) master = MySQLServer.fetch(source_group.master) if master is None: raise _errors.ShardingError( _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND) if not update_only: master.connect() master.read_only = False #Kill all the existing connections on the servers source_group.kill_connections_on_servers() #allow updates in the destination group master destn_group_master.read_only = False
def _setup_move_sync(shard_id, source_group_id, destn_group_id, split_value, cmd): """Setup replication between the source and the destination groups and ensure that they are in sync. :param shard_id: The shard ID of the shard that needs to be moved. :param source_group_id: The group_id of the source shard. :param destn_group_id: The ID of the group to which the shard needs to be moved. :param split_value: Indicates the value at which the range for the particular shard will be split. Will be set only for shard split operations. :param cmd: Indicates the type of re-sharding operation """ source_group = Group.fetch(source_group_id) if source_group is None: raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND % (source_group_id, )) destination_group = Group.fetch(destn_group_id) if destination_group is None: raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND % (destination_group_id, )) master = MySQLServer.fetch(source_group.master) if master is None: raise _errors.ShardingError( _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND) master.connect() slave = MySQLServer.fetch(destination_group.master) if slave is None: raise _errors.ShardingError( _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND) slave.connect() #Stop and reset any slave that might be running on the slave server. _replication.stop_slave(slave, wait=True) _replication.reset_slave(slave, clean=True) #Change the master to the shard group master. _replication.switch_master(slave, master, master. user, master.passwd) #Start the slave so that syncing of the data begins _replication.start_slave(slave, wait=True) #Synchronize until the slave catches up with the master. _replication.synchronize_with_read_only(slave, master) #Reset replication once the syncing is done. _replication.stop_slave(slave, wait=True) _replication.reset_slave(slave, clean=True) #Trigger changing the mappings for the shard that was copied _events.trigger_within_procedure( SETUP_RESHARDING_SWITCH, shard_id, source_group_id, destn_group_id, split_value, cmd )
def setUp(self): """Creates the following topology for testing, GROUPID1 - localhost:13001, localhost:13002 - Global Group GROUPID2 - localhost:13003, localhost:13004 - shard 1 GROUPID3 - localhost:13005, localhost:13006 - shard 2 """ self.manager, self.proxy = tests.utils.setup_xmlrpc() status = self.proxy.group.create("GROUPID1", "First description.") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_create_group).") status = self.proxy.group.add( "GROUPID1", MySQLInstances().get_address(0) ) self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_add_server).") status = self.proxy.group.add( "GROUPID1", MySQLInstances().get_address(1) ) self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_add_server).") status = self.proxy.group.create("GROUPID2", "Second description.") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_create_group).") status = self.proxy.group.add( "GROUPID2", MySQLInstances().get_address(2) ) self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_add_server).") status = self.proxy.group.add( "GROUPID2", MySQLInstances().get_address(3) ) self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_add_server).") status = self.proxy.group.create("GROUPID3", "Third description.") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_create_group).") status = self.proxy.group.add( "GROUPID3", MySQLInstances().get_address(4) ) self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_add_server).") status = self.proxy.group.add( "GROUPID3", MySQLInstances().get_address(5) ) self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_add_server).") status = self.proxy.group.promote("GROUPID1") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_change_to_candidate).") status = self.proxy.group.promote("GROUPID2") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_change_to_candidate).") status = self.proxy.group.promote("GROUPID3") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_change_to_candidate).") status = self.proxy.sharding.create_definition("HASH", "GROUPID1") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_define_shard_mapping).") self.assertEqual(status[2], 1) status = self.proxy.sharding.add_table(1, "db1.t1", "userID") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_add_shard_mapping).") status = self.proxy.sharding.add_shard(1, "GROUPID2", "ENABLED") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_add_shard).") status = self.proxy.sharding.lookup_servers("db1.t1", 500, "LOCAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] for idx in range(0, 2): if obtained_server_list[idx][2]: shard_uuid = obtained_server_list[idx][0] shard_server = MySQLServer.fetch(shard_uuid) shard_server.connect() shard_server.exec_stmt("DROP DATABASE IF EXISTS db1") shard_server.exec_stmt("CREATE DATABASE db1") shard_server.exec_stmt("CREATE TABLE db1.t1" "(userID INT, name VARCHAR(30))") for i in range(1, 100): shard_server.exec_stmt("INSERT INTO db1.t1 " "VALUES(%s, 'TEST %s')" % (i, i))
def tearDown(self): self.proxy.sharding.enable_shard("2") status = self.proxy.sharding.lookup_servers("1", 500, "GLOBAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] for idx in range(0, 2): shard_uuid = obtained_server_list[idx][0] shard_server = MySQLServer.fetch(shard_uuid) shard_server.connect() shard_server.exec_stmt("DROP DATABASE IF EXISTS global_db") status = self.proxy.sharding.lookup_servers("db1.t1", 500, "LOCAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] for idx in range(0, 2): shard_uuid = obtained_server_list[idx][0] shard_server = MySQLServer.fetch(shard_uuid) shard_server.connect() shard_server.exec_stmt("DROP DATABASE IF EXISTS global_db") shard_server.exec_stmt("DROP DATABASE IF EXISTS db1") status = self.proxy.sharding.lookup_servers("db1.t1", 800, "LOCAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] for idx in range(0, 2): shard_uuid = obtained_server_list[idx][0] shard_server = MySQLServer.fetch(shard_uuid) shard_server.connect() shard_server.exec_stmt("DROP DATABASE IF EXISTS global_db") shard_server.exec_stmt("DROP DATABASE IF EXISTS db1") status = self.proxy.sharding.disable_shard("2") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_disable_shard).") status = self.proxy.sharding.disable_shard("3") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_disable_shard).") status = self.proxy.sharding.remove_shard("2") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_remove_shard).") status = self.proxy.sharding.remove_shard("3") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_remove_shard).") status = self.proxy.sharding.remove_table("db1.t1") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_remove_shard_mapping).") status = self.proxy.sharding.remove_definition("1") self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_remove_shard_mapping_defn).") self.proxy.group.demote("GROUPID1") self.proxy.group.demote("GROUPID2") self.proxy.group.demote("GROUPID3") for group_id in ("GROUPID1", "GROUPID2", "GROUPID3"): status = self.proxy.group.lookup_servers(group_id) self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] status = self.proxy.group.remove( group_id, obtained_server_list[0]["server_uuid"] ) self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_remove_server).") status = self.proxy.group.remove( group_id, obtained_server_list[1]["server_uuid"] ) self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_remove_server).") status = self.proxy.group.destroy(group_id) self.assertStatus(status, _executor.Job.SUCCESS) self.assertEqual(status[1][-1]["state"], _executor.Job.COMPLETE) self.assertEqual(status[1][-1]["description"], "Executed action (_destroy_group).") tests.utils.cleanup_environment() tests.utils.teardown_xmlrpc(self.manager, self.proxy)
def _setup_shard_switch_split(shard_id, source_group_id, destination_group_id, split_value, prune_limit, cmd, update_only): """Setup the moved shard to map to the new group. :param shard_id: The shard ID of the shard that needs to be moved. :param source_group_id: The group_id of the source shard. :param destn_group_id: The ID of the group to which the shard needs to be moved. :param split_value: Indicates the value at which the range for the particular shard will be split. Will be set only for shard split operations. :param prune_limit: The number of DELETEs that should be done in one batch. :param cmd: Indicates the type of re-sharding operation. :update_only: Only update the state store and skip provisioning. """ #Fetch the Range sharding specification. range_sharding_spec, source_shard, shard_mappings, shard_mapping_defn = \ _services_sharding.verify_and_fetch_shard(shard_id) #Disable the old shard source_shard.disable() #Remove the old shard. range_sharding_spec.remove() source_shard.remove() destination_group = Group.fetch(destination_group_id) if destination_group is None: raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND % (destination_group_id, )) destn_group_master = MySQLServer.fetch(destination_group.master) if destn_group_master is None: raise _errors.ShardingError( _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND) destn_group_master.connect() #Make the destination group as read only to disable updates until the #connectors update their caches, thus avoiding inconsistency. destn_group_master.read_only = True #Add the new shards. Generate new shard IDs for the shard being #split and also for the shard that is created as a result of the split. new_shard_1 = Shards.add(source_shard.group_id, "DISABLED") new_shard_2 = Shards.add(destination_group_id, "DISABLED") #Both of the shard mappings associated with this shard_id should #be of the same sharding type. Hence it is safe to use one of the #shard mappings. if shard_mappings[0].type_name == "HASH": #In the case of a split involving a HASH sharding scheme, #the shard that is split gets a new shard_id, while the split #gets the new computed lower_bound and also a new shard id. #NOTE: How the shard that is split retains its lower_bound. HashShardingSpecification.add_hash_split( range_sharding_spec.shard_mapping_id, new_shard_1.shard_id, range_sharding_spec.lower_bound) HashShardingSpecification.add_hash_split( range_sharding_spec.shard_mapping_id, new_shard_2.shard_id, split_value) else: #Add the new ranges. Note that the shard being split retains #its lower_bound, while the new shard gets the computed, #lower_bound. RangeShardingSpecification.add(range_sharding_spec.shard_mapping_id, range_sharding_spec.lower_bound, new_shard_1.shard_id) RangeShardingSpecification.add(range_sharding_spec.shard_mapping_id, split_value, new_shard_2.shard_id) #The sleep ensures that the connector have refreshed their caches with the #new shards that have been added as a result of the split. time.sleep(_utils.TTL) #The source shard group master would have been marked as read only #during the sync. Remove the read_only flag. source_group = Group.fetch(source_group_id) if source_group is None: raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND % (source_group_id, )) source_group_master = MySQLServer.fetch(source_group.master) if source_group_master is None: raise _errors.ShardingError( _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND) source_group_master.connect() #Kill all the existing connections on the servers source_group.kill_connections_on_servers() #Allow connections on the source group master source_group_master.read_only = False #Allow connections on the destination group master destn_group_master.read_only = False #Setup replication for the new group from the global server _group_replication.setup_group_replication \ (shard_mapping_defn[2], destination_group_id) #Enable the split shards new_shard_1.enable() new_shard_2.enable() #Trigger changing the mappings for the shard that was copied if not update_only: _events.trigger_within_procedure(PRUNE_SHARDS, new_shard_1.shard_id, new_shard_2.shard_id, prune_limit)
def _run(self): """Function that verifies servers' availabilities. """ from mysql.fabric.server import ( Group, MySQLServer, ConnectionManager, ) ignored_status = [MySQLServer.FAULTY] quarantine = {} interval = FailureDetector._DETECTION_INTERVAL detections = FailureDetector._DETECTIONS detection_timeout = FailureDetector._DETECTION_TIMEOUT connection_manager = ConnectionManager() slave_deep_checks = FailureDetector._SLAVE_DEEP_CHECKS _persistence.init_thread() while self.__check: try: unreachable = set() group = Group.fetch(self.__group_id) if group is not None: for server in group.servers(): if server.status in ignored_status: ### Server is FAULTY connection_manager.kill_connections(server) continue else: ### Server is Not FAULTY if MySQLServer.is_alive(server, detection_timeout): ### Server is alive ### check depends on `slave_deep_checks` parameter if slave_deep_checks: ### When server is alive and status != FAULTY is_master= (group.master == server.uuid) if not is_master: ### Checking master is dead or alive. master_server = MySQLServer.fetch(group.master) if MySQLServer.is_alive(master_server, detection_timeout): ### Checking is replication valid or not if master is alive. server.connect() slave_issues, why_slave_issues = \ _replication.check_slave_issues(server) if slave_issues: if (why_slave_issues['io_error'] and \ why_slave_issues['io_errno'] == 2003): ### Nothing to do during reconnecting, just logging _LOGGER.info(why_slave_issues) else: ### If slave threads are not running, set status to SPARE server.status = MySQLServer.SPARE ### Done slave_issues. server.disconnect() ### Endif MySQLServer.is_alive(master_server, detection_timeout) ### Endif not is_master ### Endif slave_deep_checks continue ### Else MySQLServer.is_alive(server, detection_timeout) else: unreachable.add(server.uuid) _LOGGER.warning( "Server (%s) in group (%s) is unreachable.", server.uuid, self.__group_id ) unstable = False failed_attempts = 0 if server.uuid not in quarantine: quarantine[server.uuid] = failed_attempts = 1 else: failed_attempts = quarantine[server.uuid] + 1 quarantine[server.uuid] = failed_attempts if failed_attempts >= detections: unstable = True can_set_faulty = group.can_set_server_faulty( server, get_time() ) if unstable and can_set_faulty: # We have to make this transactional and make the # failover (i.e. report failure) robust to failures. # Otherwise, a master might be set to faulty and # a new one never promoted. server.status = MySQLServer.FAULTY connection_manager.kill_connections(server) procedures = trigger("REPORT_FAILURE", None, str(server.uuid), threading.current_thread().name, MySQLServer.FAULTY, False ) executor = _executor.Executor() for procedure in procedures: executor.wait_for_procedure(procedure) ### Endif MySQLServer.is_alive(server, detection_timeout) ### Endif server.status in ignored_status ### End for server in group.servers() ### Endif group is not None for uuid in quarantine.keys(): if uuid not in unreachable: del quarantine[uuid] except (_errors.ExecutorError, _errors.DatabaseError): pass except Exception as error: _LOGGER.exception(error) time.sleep(interval) _persistence.deinit_thread()
def _setup_shard_switch_move(shard_id, source_group_id, destination_group_id, update_only): """Setup the moved shard to map to the new group. :param shard_id: The shard ID of the shard that needs to be moved. :param source_group_id: The group_id of the source shard. :param destination_group_id: The ID of the group to which the shard needs to be moved. :update_only: Only update the state store and skip provisioning. """ #Fetch the Range sharding specification. When we start implementing #heterogenous sharding schemes, we need to find out the type of #sharding scheme and we should use that to find out the sharding #implementation. _, source_shard, _, shard_mapping_defn = \ _services_sharding.verify_and_fetch_shard(shard_id) destination_group = Group.fetch(destination_group_id) if destination_group is None: raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND % (destination_group_id, )) destn_group_master = MySQLServer.fetch(destination_group.master) if destn_group_master is None: raise _errors.ShardingError( _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND) destn_group_master.connect() #Set the destination group master to read_only destn_group_master.read_only = True #Setup replication between the shard group and the global group. _group_replication.setup_group_replication \ (shard_mapping_defn[2], destination_group_id) #set the shard to point to the new group. source_shard.group_id = destination_group_id #Stop the replication between the global server and the original #group associated with the shard. _group_replication.stop_group_slave\ (shard_mapping_defn[2], source_group_id, True) #The sleep ensures that the connector have refreshed their caches with the #new shards that have been added as a result of the split. time.sleep(_utils.TTL) #Reset the read only flag on the source server. source_group = Group.fetch(source_group_id) if source_group is None: raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND % (source_group_id, )) master = MySQLServer.fetch(source_group.master) if master is None: raise _errors.ShardingError( _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND) if not update_only: master.connect() master.read_only = False #Kill all the existing connections on the servers source_group.kill_connections_on_servers() #allow updates in the destination group master destn_group_master.read_only = False
def _setup_shard_switch_split(shard_id, source_group_id, destination_group_id, split_value, prune_limit, cmd, update_only): """Setup the moved shard to map to the new group. :param shard_id: The shard ID of the shard that needs to be moved. :param source_group_id: The group_id of the source shard. :param destn_group_id: The ID of the group to which the shard needs to be moved. :param split_value: Indicates the value at which the range for the particular shard will be split. Will be set only for shard split operations. :param prune_limit: The number of DELETEs that should be done in one batch. :param cmd: Indicates the type of re-sharding operation. :update_only: Only update the state store and skip provisioning. """ #Fetch the Range sharding specification. range_sharding_spec, source_shard, shard_mappings, shard_mapping_defn = \ _services_sharding.verify_and_fetch_shard(shard_id) #Disable the old shard source_shard.disable() #Remove the old shard. range_sharding_spec.remove() source_shard.remove() destination_group = Group.fetch(destination_group_id) if destination_group is None: raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND % (destination_group_id, )) destn_group_master = MySQLServer.fetch(destination_group.master) if destn_group_master is None: raise _errors.ShardingError( _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND) destn_group_master.connect() #Make the destination group as read only to disable updates until the #connectors update their caches, thus avoiding inconsistency. destn_group_master.read_only = True #Add the new shards. Generate new shard IDs for the shard being #split and also for the shard that is created as a result of the split. new_shard_1 = Shards.add(source_shard.group_id, "DISABLED") new_shard_2 = Shards.add(destination_group_id, "DISABLED") #Both of the shard mappings associated with this shard_id should #be of the same sharding type. Hence it is safe to use one of the #shard mappings. if shard_mappings[0].type_name == "HASH": #In the case of a split involving a HASH sharding scheme, #the shard that is split gets a new shard_id, while the split #gets the new computed lower_bound and also a new shard id. #NOTE: How the shard that is split retains its lower_bound. HashShardingSpecification.add_hash_split( range_sharding_spec.shard_mapping_id, new_shard_1.shard_id, range_sharding_spec.lower_bound ) HashShardingSpecification.add_hash_split( range_sharding_spec.shard_mapping_id, new_shard_2.shard_id, split_value ) else: #Add the new ranges. Note that the shard being split retains #its lower_bound, while the new shard gets the computed, #lower_bound. RangeShardingSpecification.add( range_sharding_spec.shard_mapping_id, range_sharding_spec.lower_bound, new_shard_1.shard_id ) RangeShardingSpecification.add( range_sharding_spec.shard_mapping_id, split_value, new_shard_2.shard_id ) #The sleep ensures that the connector have refreshed their caches with the #new shards that have been added as a result of the split. time.sleep(_utils.TTL) #The source shard group master would have been marked as read only #during the sync. Remove the read_only flag. source_group = Group.fetch(source_group_id) if source_group is None: raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND % (source_group_id, )) source_group_master = MySQLServer.fetch(source_group.master) if source_group_master is None: raise _errors.ShardingError( _services_sharding.SHARD_GROUP_MASTER_NOT_FOUND) source_group_master.connect() #Kill all the existing connections on the servers source_group.kill_connections_on_servers() #Allow connections on the source group master source_group_master.read_only = False #Allow connections on the destination group master destn_group_master.read_only = False #Setup replication for the new group from the global server _group_replication.setup_group_replication \ (shard_mapping_defn[2], destination_group_id) #Enable the split shards new_shard_1.enable() new_shard_2.enable() #Trigger changing the mappings for the shard that was copied if not update_only: _events.trigger_within_procedure( PRUNE_SHARDS, new_shard_1.shard_id, new_shard_2.shard_id, prune_limit )
def test_properties(self): """Test setting MySQLServer's properties. """ server = self.server # Check property user. self.assertEqual(server.user, tests.utils.MySQLInstances().user) server.user = "******" self.assertEqual(server.user, "user") server.user = tests.utils.MySQLInstances().user # Check property passwd. self.assertEqual(server.passwd, tests.utils.MySQLInstances().passwd) server.passwd = "passwd" self.assertEqual(server.passwd, "passwd") server.passwd = tests.utils.MySQLInstances().passwd # Check property status. self.assertEqual(server.status, MySQLServer.SECONDARY) server.status = MySQLServer.FAULTY self.assertEqual(server.status, MySQLServer.FAULTY) fetched_server = MySQLServer.fetch(server.uuid) self.assertEqual(server.status, fetched_server.status) server.status = MySQLServer.SECONDARY fetched_server = MySQLServer.fetch(server.uuid) self.assertEqual(server.status, fetched_server.status) # Check property mode. self.assertEqual(server.mode, MySQLServer.READ_ONLY) server.mode = MySQLServer.OFFLINE self.assertEqual(server.mode, MySQLServer.OFFLINE) fetched_server = MySQLServer.fetch(server.uuid) self.assertEqual(server.mode, fetched_server.mode) server.mode = MySQLServer.READ_ONLY fetched_server = MySQLServer.fetch(server.uuid) self.assertEqual(server.mode, fetched_server.mode) # Check property weight. self.assertEqual(server.weight, MySQLServer.DEFAULT_WEIGHT) server.weight = 0.1 self.assertEqual(server.weight, 0.1) fetched_server = MySQLServer.fetch(server.uuid) self.assertEqual(server.weight, fetched_server.weight) server.weight = MySQLServer.DEFAULT_WEIGHT fetched_server = MySQLServer.fetch(server.uuid) self.assertEqual(server.weight, fetched_server.weight) # Create instance without connecting it with a server. self.assertEqual(server.read_only, None) self.assertEqual(server.server_id, None) self.assertEqual(server.gtid_enabled, None) self.assertEqual(server.binlog_enabled, None) self.assertEqual(server.version, None) # Bind instance to a server. server.connect() self.assertNotEqual(server.read_only, None) self.assertNotEqual(server.server_id, 0) self.assertEqual(server.gtid_enabled, True) self.assertEqual(server.binlog_enabled, True) # Check read_only property. server.read_only = True self.assertEqual(server.read_only, True) server.read_only = False self.assertEqual(server.read_only, False)
def test_global_update_propogation(self): """Ensure the global updates are passed to all the shards. """ #Lookup the global server and run some DDL statements status = self.proxy.sharding.lookup_servers("1", 500, "GLOBAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] for idx in range(0, 2): if obtained_server_list[idx][2]: global_master_uuid = obtained_server_list[idx][0] break global_master = MySQLServer.fetch(global_master_uuid) global_master.connect() global_master.exec_stmt("DROP DATABASE IF EXISTS global_db") global_master.exec_stmt("CREATE DATABASE global_db") global_master.exec_stmt("CREATE TABLE global_db.global_table" "(userID INT, name VARCHAR(30))") global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(101, 'TEST 1')") global_master.exec_stmt("INSERT INTO global_db.global_table " "VALUES(202, 'TEST 2')") #Give some times for replication to update the shards sleep(3) #Lookup and verify that the data is updated in the other shards. status = self.proxy.sharding.lookup_servers("db1.t1", 500, "LOCAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] for idx in range(0, 2): if obtained_server_list[idx][2]: shard_uuid = obtained_server_list[idx][0] shard_server = MySQLServer.fetch(shard_uuid) shard_server.connect() rows = shard_server.exec_stmt( "SELECT NAME FROM global_db.global_table", {"fetch" : True} ) self.assertEqual(len(rows), 2) self.assertEqual(rows[0][0], 'TEST 1') self.assertEqual(rows[1][0], 'TEST 2') status = self.proxy.sharding.lookup_servers("db1.t1", 1500, "LOCAL") self.assertEqual(status[0], True) self.assertEqual(status[1], "") obtained_server_list = status[2] for idx in range(0, 2): if obtained_server_list[idx][2]: shard_uuid = obtained_server_list[idx][0] shard_server = MySQLServer.fetch(shard_uuid) shard_server.connect() rows = shard_server.exec_stmt( "SELECT NAME FROM global_db.global_table", {"fetch" : True} ) self.assertEqual(len(rows), 2) self.assertEqual(rows[0][0], 'TEST 1') self.assertEqual(rows[1][0], 'TEST 2')
def _check_shard_information(shard_id, destn_group_id, split_value, prune_limit, cmd, update_only): """Verify the sharding information before starting a re-sharding operation. :param shard_id: The destination shard ID. :param destn_group_id: The Destination group ID. :param split_value: The point at which the sharding definition should be split. :param prune_limit: The number of DELETEs that should be done in one batch. :param cmd: Indicates if it is a split or a move being executed. :param update_only: If the operation is a update only operation. """ backup_user = _services_utils.read_config_value( _config.global_config, 'servers', 'backup_user' ) backup_passwd = _services_utils.read_config_value( _config.global_config, 'servers', 'backup_password' ) restore_user = _services_utils.read_config_value( _config.global_config, 'servers', 'restore_user' ) restore_passwd = _services_utils.read_config_value( _config.global_config, 'servers', 'restore_password' ) mysqldump_binary = _services_utils.read_config_value( _config.global_config, 'sharding', 'mysqldump_program' ) mysqlclient_binary = _services_utils.read_config_value( _config.global_config, 'sharding', 'mysqlclient_program' ) if not _services_utils.is_valid_binary(mysqldump_binary): raise _errors.ShardingError( _services_sharding.MYSQLDUMP_NOT_FOUND % mysqldump_binary) if not _services_utils.is_valid_binary(mysqlclient_binary): raise _errors.ShardingError( _services_sharding.MYSQLCLIENT_NOT_FOUND % mysqlclient_binary) if cmd == "SPLIT": range_sharding_spec, _, shard_mappings, _ = \ _services_sharding.verify_and_fetch_shard(shard_id) upper_bound = \ SHARDING_SPECIFICATION_HANDLER[shard_mappings[0].type_name].\ get_upper_bound( range_sharding_spec.lower_bound, range_sharding_spec.shard_mapping_id, shard_mappings[0].type_name ) #If the underlying sharding scheme is a HASH. When a shard is split, #all the tables that are part of the shard, have the same sharding #scheme. All the shard mappings associated with this shard_id will be #of the same sharding type. Hence it is safe to use one of the shard #mappings. if shard_mappings[0].type_name == "HASH": if split_value is not None: raise _errors.ShardingError( _services_sharding.NO_LOWER_BOUND_FOR_HASH_SHARDING ) if upper_bound is None: #While splitting a range, retrieve the next upper bound and #find the mid-point, in the case where the next upper_bound #is unavailable pick the maximum value in the set of values in #the shard. upper_bound = HashShardingSpecification.fetch_max_key(shard_id) #Calculate the split value. split_value = \ SHARDING_DATATYPE_HANDLER[shard_mappings[0].type_name].\ split_value( range_sharding_spec.lower_bound, upper_bound ) elif split_value is not None: if not (SHARDING_DATATYPE_HANDLER[shard_mappings[0].type_name].\ is_valid_split_value( split_value, range_sharding_spec.lower_bound, upper_bound ) ): raise _errors.ShardingError( _services_sharding.INVALID_LOWER_BOUND_VALUE % (split_value, ) ) elif split_value is None: raise _errors.ShardingError( _services_sharding.SPLIT_VALUE_NOT_DEFINED ) #Ensure that the group does not already contain a shard. if Shards.lookup_shard_id(destn_group_id) is not None: raise _errors.ShardingError( _services_sharding.SHARD_MOVE_DESTINATION_NOT_EMPTY % (destn_group_id, ) ) #Fetch the group information for the source shard that #needs to be moved. source_shard = Shards.fetch(shard_id) if source_shard is None: raise _errors.ShardingError( _services_sharding.SHARD_NOT_FOUND % (shard_id, )) #Fetch the group_id and the group that hosts the source shard. source_group_id = source_shard.group_id destn_group = Group.fetch(destn_group_id) if destn_group is None: raise _errors.ShardingError( _services_sharding.SHARD_GROUP_NOT_FOUND % (destn_group_id, )) if not update_only: # Check if the source server has backup privileges. source_group = Group.fetch(source_group_id) server = _services_utils.fetch_backup_server(source_group) server.user = backup_user server.passwd = backup_passwd _backup.MySQLDump.check_backup_privileges(server) # Check if the destination server has restore privileges. destination_group = Group.fetch(destn_group_id) server = MySQLServer.fetch(destination_group.master) server.user = restore_user server.passwd = restore_passwd _backup.MySQLDump.check_restore_privileges(server) _events.trigger_within_procedure( BACKUP_SOURCE_SHARD, shard_id, source_group_id, destn_group_id, split_value, prune_limit, cmd, update_only ) else: _events.trigger_within_procedure( SETUP_RESHARDING_SWITCH, shard_id, source_group_id, destn_group_id, split_value, prune_limit, cmd, update_only )