def test_target_slave_failover(self, config, instance_data, expected_data): instance = instance_data["modify_cluster_instance"] expected_object = baseCheckPoint( expected_data[instance["cacheInstanceClass"]], instance["instance_password"]) client, _, instanceId = create_validate_instance( config, instance, expected_object) shard_num = instance["target_shardNumber"] resp = reset_class(config, instanceId, instance["target_cacheInstanceClass"], client, shard_num) assertRespNotNone(resp) expected_object = baseCheckPoint( expected_data[instance["target_cacheInstanceClass"]], instance["instance_password"]) expected_object.side = 1 expected_object.current_rs_type = "b" expected_object.next_rs_type = "a" # 等待spaceStatus变为DoingCopyfrom for i in range(0, 1200): resp_status = get_space_status(instanceId, config) if resp_status == "DoingCopyfrom": break sleep(1) # 触发slave failover next_rs_type = get_next_rs_type(instanceId, config) redisNum = get_redis_num(instanceId, config, next_rs_type) redisId = get_shard_id(redisNum, 1)[0] replicasetName = instanceId + "-slave-" + next_rs_type dockerName = replicasetName + "-" + str(redisId) oldRunTime = get_docker_running_time(config, instanceId, replicasetName, dockerName) status = trigger_docker_failover("redis", config, instanceId, config["region"], docker_name=dockerName) assert status == 200 assert wait_docker_run_time_change(config, instanceId, oldRunTime, replicasetName, dockerName) # 手动调用任务恢复接口 print( "please run recover task interface.For example:curl http://127.0.0.1:1818/reloadTask -d '{\"taskId\":" ",\"isRollback\":false}'") sleep(10) for i in range(0, 3600): if get_space_status(instanceId, config) == "Running": break sleep(1) assert check_admin_proxy_redis_configmap(instanceId, config, expected_object, shard_num)
def runCommandAndCheckResp(self): resp = send_web_command(self.conf, self.instance_id, self.region, self.command, self.client, self.token) assertRespNotNone(resp) if self.excepted_resp == None: return find_resp_error(resp.result["commandResult"]) result = proc_web_command_result(resp.result["commandResult"]) return sorted(result) == sorted(self.excepted_resp)
def test_source_master_and_proxy_failover(self, config, instance_data, expected_data): instance = instance_data["modify_standard_instance"] expected_object = baseCheckPoint(expected_data[instance["cacheInstanceClass"]], instance["instance_password"]) client, _, instanceId = create_validate_instance(config, instance, expected_object) shard_num = instance["target_shardNumber"] resp = reset_class(config, instanceId, instance["target_cacheInstanceClass"], client, shard_num) assertRespNotNone(resp) expected_object = baseCheckPoint(expected_data[instance["target_cacheInstanceClass"]], instance["instance_password"]) expected_object.side = 1 expected_object.current_rs_type = "b" expected_object.next_rs_type = "a" # 等待resize开始 for i in range(0, 600): resp_get_job = get_job(instanceId, config, str(resp.request_id)) if resp_get_job["code"] == 0: break sleep(1) # 触发master failover current_rs_type = get_current_rs_type(instanceId, config) redisNum = get_redis_num(instanceId, config, current_rs_type) redisId = get_shard_id(redisNum, 1)[0] replicasetName = instanceId + "-master-" + current_rs_type dockerName = replicasetName + "-" + str(redisId) oldRunTime = get_docker_running_time(config, instanceId, replicasetName, dockerName) status = trigger_docker_failover("redis", config, instanceId, config["region"], docker_name=dockerName) assert status == 200 # 触发proxy failover proxyId = get_shard_id(get_proxy_num(instanceId, config), 1)[0] proxyReplicasetName = instanceId + "-proxy" proxyDockerName = proxyReplicasetName + "-" + str(proxyId) proxyOldRunTime = get_docker_running_time(config, instanceId, proxyReplicasetName, proxyDockerName) status = trigger_docker_failover("proxy", config, instanceId, config["region"],id=proxyId) assert status == 200 # 等待redis failover结束 assert wait_docker_run_time_change(config, instanceId, oldRunTime, replicasetName, dockerName) #等待proxy failover结束 assert wait_docker_run_time_change(config, instanceId, proxyOldRunTime, proxyReplicasetName, proxyDockerName) # 手动调用任务恢复接口 print("please run recover task interface.For example:curl http://127.0.0.1:1818/reloadTask -d '{\"taskId\":\"$taskId\",\"isRollback\":false}'") sleep(10) for i in range(0, 3600): if get_space_status(instanceId, config) == "Running": break sleep(1) assert check_admin_proxy_redis_configmap(instanceId, config, expected_object, shard_num)
def test_modify_passwd_and_admin_failover(self, config, instance_data, expected_data): instances = instance_data["create_cluster_specified"] expected_object = baseCheckPoint( expected_data[instances[0]["cacheInstanceClass"]], instances[0]["instance_password"]) client, _, instanceId = create_validate_instance( config, instances[0], expected_object) # 修改密码 newPasswd = "2qaz2WSX" resp = reset_password(config, instanceId, newPasswd, client) assertRespNotNone(resp) expected_object.password = get_sha256_pwd(newPasswd) replicasetName = instanceId + "-admin" dockerName = replicasetName + "-0" oldRunTime = get_docker_running_time(config, instanceId, replicasetName, dockerName) status = trigger_docker_failover("admin", config, instanceId, config["region"]) assert status == 200 assert wait_docker_run_time_change(config, instanceId, oldRunTime, replicasetName, dockerName) assert check_admin_proxy_redis_configmap(instanceId, config, expected_object, instances[0]["shardNumber"]) print "admin failover success" sleep(10) # 第二次修改密码并触发admin failover newPasswd = "3qaz2WSX" resp = reset_password(config, instanceId, newPasswd, client) assertRespNotNone(resp) expected_object.password = get_sha256_pwd(newPasswd) oldRunTime = get_docker_running_time(config, instanceId, replicasetName, dockerName) status = trigger_docker_failover("admin", config, instanceId, config["region"]) assert status == 200 assert wait_docker_run_time_change(config, instanceId, oldRunTime, replicasetName, dockerName) assert check_admin_proxy_redis_configmap(instanceId, config, expected_object, instances[0]["shardNumber"]) print "admin failover success"
def test_create_cacheAnalysis(self, config, instance_data, expected_data): instances = instance_data["create_standard_specified"] expected_object = baseCheckPoint( expected_data[instances[0]["cacheInstanceClass"]], instances[0]["instance_password"]) client, _, instance_id = create_validate_instance( config, instances[0], expected_object) resp = create_cache_analysis(config, instance_id, client) assertRespNotNone(resp) expected_object.space_status = "KeyAnalysising" assert check_admin_proxy_redis_configmap(instance_id, config, expected_object, instances[0]["shardNumber"])
def test_all_command(self,config, instance_data, expected_data): instance = instance_data["modify_standard_instance"] expected_object = baseCheckPoint(expected_data[instance["cacheInstanceClass"]], instance["instance_password"]) client, _, instanceId = create_validate_instance(config, instance, expected_object) write_data(config,instanceId,1024*1024*1024*0.8,instance["instance_password"]) shard_num = instance["target_shardNumber"] resp = reset_class(config, instanceId, instance["target_cacheInstanceClass"], client=None, shardNumber=shard_num) assertRespNotNone(resp) expected_object = baseCheckPoint(expected_data[instance["target_cacheInstanceClass"]], instance["instance_password"]) expected_object.side = 1 expected_object.current_rs_type = "b" expected_object.next_rs_type = "a" # 等待spaceStatus变为DoingCopyfrom ''' for i in range(0, 1200): resp_status = get_space_status(instanceId, config) if resp_status == "DoingCopyfrom": break sleep(1) ''' resp = send_web_command(config, instanceId, config["region"], "auth " + instance["instance_password"]) token = resp.result["token"] object = WebCommand(config, instanceId, config["region"], token) threading.Thread(target=send_web_command,args=(config,instanceId,config["region"],"blpop " + str(uuid.uuid1())+" 300", None,token)) #t = threading.Thread(target=object.runAllForeverCommand()) #t.setDaemon(True) #t.start() sleep(10) for i in range(0, 3600): if get_space_status(instanceId, config) == "Running": break sleep(1) #t.join(10) assert check_admin_proxy_redis_configmap(instanceId, config, expected_object, shard_num)
def test_modify_cache_Analysis_time(self, config, instance_data, expected_data): instances = instance_data["create_standard_specified"] expected_object = baseCheckPoint( expected_data[instances[0]["cacheInstanceClass"]], instances[0]["instance_password"]) client, _, instance_id = create_validate_instance( config, instances[0], expected_object) time = "01:00-02:00 +0800" resp = modify_cache_analysis_time(config, instance_id, time, client) assertRespNotNone(resp) assert check_admin_proxy_redis_configmap(instance_id, config, expected_object, instances[0]["shardNumber"]) resp = query_cache_analysis_time(config, instance_id) assert resp.result["time"] == time
def test_normal_resize(self, config, instance_data, expected_data): instance = instance_data["modify_cluster_instance"] expected_object = baseCheckPoint( expected_data[instance["cacheInstanceClass"]], instance["instance_password"]) client, _, instanceId = create_validate_instance( config, instance, expected_object) shard_num = instance["target_shardNumber"] resp = reset_class(config, instanceId, instance["target_cacheInstanceClass"], client, shard_num) assertRespNotNone(resp) expected_object = baseCheckPoint( expected_data[instance["target_cacheInstanceClass"]], instance["instance_password"]) expected_object.side = 1 expected_object.current_rs_type = "b" expected_object.next_rs_type = "a" sleep(60) # 等待resize开始 for i in range(0, 600): resp_get_job = get_job(instanceId, config, str(resp.request_id)) if resp_get_job["code"] == 0: break sleep(1) sleep(10) for i in range(0, 3600): if get_space_status(instanceId, config) == "Running": break sleep(1) assert check_admin_proxy_redis_configmap(instanceId, config, expected_object, shard_num)
def runCommand(self): resp = send_web_command(self.conf, self.instance_id, self.region, self.command, self.client, self.token) assertRespNotNone(resp) result = proc_web_command_result(resp.result["commandResult"]) return result