def get_sql_analyse_dataset_info(host, params): sql_analyse_statement_id = get_sql_analyse_statement_id(host, params) # print(sql_analyse_statement_id) url = ' %s/api/datasets/sql/analyzeresult?statementId=%s' % ( host, sql_analyse_statement_id) res = requests.get(url=url, headers=get_headers(host)) print(res.text) count_num = 0 while ("waiting") in res.text or ("running") in res.text: print('再次查询前', res.text) res = requests.get(url=url, headers=get_headers(host)) count_num += 1 if count_num == 100: return print('再次查询后', res.text) # 返回的是str类型 print(res.text) if '"statement":"available"' in res.text: text_dict = json.loads(res.text) text_dict_content = text_dict["content"] # print(res.text) # print(text_dict_content) return text_dict_content else: print('获取数据集输出字段失败') return
def create_flink_exe(): flink_exe_id = [] for i in data_for_exe(): result = requests.post(url=host_for_url + "/api/executions/rtcflow", json=i, headers=get_headers(host_for_url)) print(host_for_url + "/api/executions/rtcflow", get_headers(host_for_url)) print(result.status_code, result.text) flink_exe_id.append(dict_res(result.text)["id"]) return flink_exe_id
def deal_request_method(): for i in range(2, all_rows + 1): request_method = case_table_sheet.cell(row=i, column=4).value request_url = case_table_sheet.cell(row=i, column=5).value # request_data = case_table_sheet.cell(row=i, column=6).value data = case_table_sheet.cell(row=i, column=6).value request_data = deal_parameters(data) key_word = case_table_sheet.cell(row=i, column=3).value # 请求方法转大写 if request_method: request_method_upper = request_method.upper() # 根据不同的请求方法,进行分发 if request_method_upper == 'POST': # 调用post方法发送请求 post_request_result_check(row=i, column=8, url=request_url, headers=get_headers(), data=request_data, table_sheet_name=case_table_sheet) elif request_method_upper == 'GET': # 调用GET请求 get_request_result_check(url=request_url, headers=get_headers(), data=request_data, table_sheet_name=case_table_sheet, row=i, column=8) elif request_method_upper == 'PUT': put_request_result_check(url=request_url, row=i, data=request_data, table_sheet_name=case_table_sheet, column=8) elif request_method_upper == 'DELETE': delete_request_result_check(request_url, request_data, table_sheet_name=case_table_sheet, row=i, column=8, headers=get_headers()) else: print('请求方法%s不在处理范围内' % request_method) else: print('第 %d 行请求方法为空' % i) # 执行结束后保存表格 case_table.save(ab_dir("api_cases.xlsx"))
def test_case01(self): """--正常创建DBdataset,选择已存在的schema属性为true--""" try: schema_query = 'select id from merce_schema where name = "gbj_schema"' schema = ms.ExecuQuery(schema_query) except Exception as e: raise e else: schema_id = {} schema_id["id"] = schema[0]["id"] # print(schema_id) data = { "name": self.dataset_name, "expiredPeriod": 0, "storage": "JDBC", "storageConfigurations": self.storage, "schema": schema_id, "owner": "2059750c-a300-4b64-84a6-e8b086dbfd42", "resource": { "id": "39386f75-9b28-43a6-a6bf-bd5e0e85d437" } } res = requests.post(url=self.create_dataset_url, headers=get_headers(HOST_189), data=json.dumps(data)) # print(res.status_code, res.text) self.assertEqual(res.status_code, 201, 'DB-dataset创建失败')
def test_case01(self): """查询系统服务状态""" res = requests.get(url=query_component_status_url, headers=get_headers(HOST_189)) print(res.text) # 检查响应状态码是否200 self.assertEqual(res.status_code, 200, '系统服务状态接口响应状态码不是200,服务异常')
def test_query_rule_buildIn(self): """查询内建规则""" data = { "fieldList": [{ "fieldName": "buildType", "fieldValue": "Builtin", "comparatorOperator": "EQUAL" }], "sortObject": { "field": "lastModifiedTime", "orderDirection": "DESC" }, "offset": 0, "limit": 8 } response = requests.post(url=self.rule_query_url, headers=get_headers(), json=data) # print(response.status_code, response.json()) self.assertEqual(200, response.status_code, '查询内建规则失败,失败原因%s' % response.text) if response.json()["totalElements"]: self.assertIsNotNone(response.json()["content"], '内建规则查询结果为空')
def test_create_rule_SQL(self): """创建Custom - SQL规则""" rule_name = "rule_for_SQL_students_copy" + str( random.randint(0, 999999999)) data = { "aggType": "None", "buildType": "Custom", "customType": "SQL", "dataScope": "Field", "fieldValueType": "Number", "ruleOption": { "paramsMap": {} }, # "id":"57dbfa51-31af-4dc1-9a7e-7ba50c322ef2", "ruleClass": "", "name": rule_name, "customValue": "$grade > 80" } response = requests.post(url=self.create_rule_url, headers=get_headers(), json=data) self.assertEqual(201, response.status_code, '创建规则接口调用失败,失败原因%s' % response.text) self.assertIsNotNone(response.json()["id"], '规则创建成功后未返回规则id') return response.json()["id"]
def test_query_zdaf_by_name(self): """按照分析模板名称查询分析任务""" data = { "fieldList": [{ "fieldName": "name", "fieldValue": "%api_test_use%", "comparatorOperator": "LIKE" }], "sortObject": { "field": "lastModifiedTime", "orderDirection": "DESC" }, "offset": 0, "limit": 8 } keywords = 'api_test_use' response = requests.post(url=self.query_zdaf, headers=get_headers(host), json=data) self.assertEqual(200, response.status_code, '分析任务查询接口调用失败') if response.json()["content"]: self.assertIn(keywords, response.json()["content"][0]["name"], '查询得到的分析任务name中没有包含查询关键字keyword')
def test_case01(self): """创建schedulers,单次执行""" scheduler_name = 'api_auto_create_schedulers_once' + str( random.randint(0, 99999)) flow_table = load_workbook(abs_dir("flow_dataset_info.xlsx")) info_sheet = flow_table.get_sheet_by_name("flow_info") flow_id = info_sheet.cell(row=2, column=2).value flow_name = info_sheet.cell(row=2, column=3).value data = { "name": scheduler_name, "flowId": flow_id, "flowName": flow_name, "flowType": 'dataflow', "schedulerId": "once", "configurations": { "startTime": get_time(), "arguments": [], "cron": "once", "properties": [] } } res = requests.post(url=create_scheduler_url, headers=get_headers(), json=data) print(res.status_code, res.text) self.assertEqual(res.status_code, 201, '创建单次执行的scheduler失败: %s' % res.text) time.sleep(5)
def test_query_rule_KeyWords(self): """根据关键字students查询规则""" data = { "fieldList": [{ "fieldName": "name", "fieldValue": "%students%", "comparatorOperator": "LIKE" }], "sortObject": { "field": "lastModifiedTime", "orderDirection": "DESC" }, "offset": 0, "limit": 8 } response = requests.post(url=self.rule_query_url, headers=get_headers(), json=data) # print(response.status_code, response.json()) self.assertEqual(200, response.status_code, 'SQL规则查询失败,失败原因%s' % response.text) if response.json()["totalElements"]: self.assertIsNotNone(response.json()["content"], 'SQL规则查询结果为空') contents = response.json()["content"] # print(contents,'\n' , type(contents)) for content in contents: # print(content["name"]) self.assertIn('students', content["name"], '按照关键字查询规则时,返回的查询结果中,name未包含查询关键字')
def test_case01(self): """根据scheduler name模糊查询""" keyword = "%student%" data = { "fieldList": [{ "fieldName": "name", "fieldValue": keyword, "comparatorOperator": "LIKE" }], "sortObject": { "field": "lastModifiedTime", "orderDirection": "DESC" }, "offset": 0, "limit": 8 } # 提取出参数中的查询关键词 fieldValue = data["fieldList"][0]["fieldValue"][1:-1] res = requests.post(url=self.query_scheduler_url, headers=get_headers(), data=json.dumps(data)) # print(res.status_code, res.text) query_results = dict_res(res.text) # print(type(query_results["content"])) self.assertEqual(res.status_code, 200, "查询失败") # 对比查询关键字和查询结果中的scheduler name query_results = dict_res( query_results["content"][0]) # 将查询结果中的第一个值进行dictionary格式化 query_result_name = query_results["name"] # 查询关键词应该包含在查询结果的scheduler name中 self.assertIn(fieldValue, query_result_name, "查询结果中scheduler的name和查询关键词name不一致")
def test_case13(self): """创建dataset, storage非法""" dataset_name = time.strftime("%Y%m%d%H%M%S", time.localtime()) + 'dataset' schema_info = schema() data = { "name": self.dataset_name, "schema": schema_info, "storage": "HDF", "expiredPeriod": 0, "storageConfigurations": self.storageConfigurations, "sliceTime": "", "sliceType": "H", "owner": owner, "resource": dataset_resource } res = requests.post(url=self.create_dataset_url, headers=get_headers(host), data=json.dumps(data)) # print(res.status_code, 'test_case09', res.text) try: err = json.loads(res.text) err = json.loads(err["err"]) err_code = int(err["list"][0]["code"]) self.assertEqual(err_code, 903, '创建dataset, storage非法时err_code不正确') except Exception as e: print('测试用例--创建dataset, storage非法--执行失败')
def test_case03(self): """创建schema时name参数的值为空""" data = { "name": "", "fields": [{ "name": "id", "type": "int" }], "resource": { "id": "9123ca72-ebd1-422b-b8b0-e150b7c69dc5" } } res = requests.post(url=self.create_schema_url, headers=get_headers(host), data=json.dumps(data)) # id = json.loads(res.text) # print(res.status_code, 'res.text', res.text) text = json.loads(res.text) # print(text) text_err = json.loads(text['err']) text_err_code = int(text_err["list"][0]["code"]) message = text_err["list"][0]["message"] # print(text_err_code, message) # print("message", message) self.assertEqual(text_err_code, 902, "错误message为%s" % message)
def test_case08(self): """创建dataset, resource参数错误, resource 为dataset的resource""" dataset_name = time.strftime("%Y%m%d%H%M%S", time.localtime()) + 'dataset' schema_info = schema() data = { "name": self.dataset_name, "schema": schema_info, "storage": "HDFS", "expiredPeriod": 0, "storageConfigurations": self.storageConfigurations, "sliceTime": "", "sliceType": "H", "owner": owner, "resource": schema_resource } res = requests.post(url=self.create_dataset_url, headers=get_headers(host), data=json.dumps(data)) # print(res.status_code, 'test_case08', res.text) err = json.loads(res.text) err_message = err["err"] err_message = err_message.strip() self.assertEqual(err_message, 'dataset resource id is wrong', '创建dataset, resource参数错误时err message不正确')
def test_case04(self): """--创建HDFS dataset, name参数值为空--""" schema_info = schema() data = { "name": "", "schema": schema_info, "storage": "HDFS", "expiredPeriod": 0, "storageConfigurations": self.storageConfigurations, "sliceTime": "", "sliceType": "H", "owner": "2059750c-a300-4b64-84a6-e8b086dbfd42", "resource": { "id": "39386f75-9b28-43a6-a6bf-bd5e0e85d437" } } res = requests.post(url=self.create_dataset_url, headers=get_headers(host), data=json.dumps(data)) # print(res.status_code, res.text) # 取得res.text中的code, 用来做断言 err = json.loads(res.text) err = json.loads(err["err"]) err_code = int(err["list"][0]["code"]) self.assertEqual(err_code, 902, '创建HDFS dataset, name参数值为空时err_code错误')
def create_new_scheduler(self): """ 批量创建scheduler, 并返回scheduler_id_list, 供get_execution_info(self)调用 :return: scheduler_id_list """ print("------开始创建任务------") from basic_info.url_info import create_scheduler_url scheduler_id_list = [] scheduler_number = 1 for data in self.data_for_create_scheduler(): res = requests.post(url=create_scheduler_url, headers=get_headers(host), json=data) print('第%d 个scheduler' % scheduler_number) scheduler_number += 1 time.sleep(2) # print(res.status_code, res.text) if res.status_code == 201 and res.text: scheduler_id_format = dict_res(res.text) try: scheduler_id = scheduler_id_format["id"] except KeyError as e: print("scheduler_id_format中存在异常%s" % e) else: scheduler_id_list.append(scheduler_id) else: print("flow: %s scheduler创建失败" % data["flowid"]) # return None print("------create_new_scheduler(self)执行结束, 返回scheduler_id_list------\n") print('scheduler_id_list', scheduler_id_list) return scheduler_id_list
def test_case04(self): """根据flowtype-streamflow查询""" data = { "fieldList": [{ "fieldName": "flowType", "fieldValue": "streamflow", "comparatorOperator": "LIKE" }], "sortObject": { "field": "lastModifiedTime", "orderDirection": "DESC" }, "offset": 0, "limit": 8 } # 提取出参数中的查询关键词 fieldValue = data["fieldList"][0]["fieldValue"] res = requests.post(url=self.query_scheduler_url, headers=get_headers(), data=json.dumps(data)) query_results = dict_res(res.text) # print(type(query_results["content"])) # 将查询结果中的第一个值进行dictionary格式化 # query_results = dict_res(query_results["content"][0]) # query_result_flowType = query_results["flowType"] # 响应码应该为200 self.assertEqual(res.status_code, 200, "查询失败")
def test_case05(self): """flowtype+name组合查询scheduler""" data = { "fieldList": [{ "fieldName": "name", "fieldValue": "%gbj%", "comparatorOperator": "LIKE" }, { "fieldName": "flowType", "fieldValue": "workflow", "comparatorOperator": "EQUAL" }], "sortObject": { "field": "lastModifiedTime", "orderDirection": "DESC" }, "offset": 0, "limit": 8 } data_name = data["fieldList"][0]["fieldValue"][1:-1] data_flowType = data["fieldList"][1]["fieldValue"] res = requests.post(url=self.query_scheduler_url, headers=get_headers(), data=json.dumps(data)) # print(res.status_code, res.text) # # query_results = dict_res(res.text) # query_result_name = query_results["content"][0]["name"] # query_result_flowType = query_results["content"][0]["flowType"] # print(data_name, query_result_name) self.assertEqual(200, res.status_code, "flowtype+name组合查询scheduler失败:%s" % res.text)
def test_case04(self): """创建flow时, flow type非法""" flow_name = time.strftime("%Y%m%d%H%M%S", time.localtime()) + 'streamflow' data = { "name": flow_name, "flowType": "ttttt", "resource": { "id": "8cb5f399-ec5d-4236-98d3-88f0d1d19d2b" }, "steps": [], "links": [] } res = requests.post(url=self.create_flow_url, headers=get_headers(host), data=json.dumps(data)) # response # response_text = json.loads(res.text) # print(res.status_code, res.text) err = json.loads(res.text) # print(type(err), err, ) err_dict = json.loads(err["err"]) err_code = int(err_dict["list"][0]["code"]) self.assertEqual(err_code, 903, '创建flow时, name为空时的err_code不正确') time.sleep(3)
def test_query_zdaf_all(self): """查询所有的分析任务""" from basic_info.url_info import query_zdaf data = { "fieldList": [], "sortObject": { "field": "lastModifiedTime", "orderDirection": "DESC" }, "offset": 0, "limit": 8 } response = requests.post(url=query_zdaf, headers=get_headers(host), json=data) # 接口查询返回8条分析任务id并排序 content_ids = [] for id in response.json()["content"]: content_ids.append(id["id"]) content_ids.sort() self.assertEqual(200, response.status_code, '分析任务查询接口调用失败') # 数据库查询得到最新的8条分析任务id并排序 zdaf_data_limit8 = 'select id from merce_zdaf where flow_status != "PREPARING" order by last_modified_time desc limit 8' zdaf8 = ms.ExecuQuery(zdaf_data_limit8) zdaf_ids = [item[key] for item in zdaf8 for key in item] zdaf_ids.sort() self.assertEqual(content_ids, zdaf_ids, '分析任务查询接口返回的查询结果和数据库数据不一致')
def test_case03(self): """正常创建flow-streamflow""" flow_name = time.strftime("%Y%m%d%H%M%S", time.localtime()) + 'streamflow' data = { "name": flow_name, "flowType": "streamflow", "resource": { "id": "8cb5f399-ec5d-4236-98d3-88f0d1d19d2b" }, "steps": [], "links": [] } res = requests.post(url=self.create_flow_url, headers=get_headers(), data=json.dumps(data)) # response response_text = json.loads(res.text) # 查询创建的flow id, name, type,并组装成一个dict, 和response对比 SQL = 'select id, flow_type from merce_flow where name = "%s"' % flow_name flow_info = ms.ExecuQuery(SQL) flow_id = flow_info[0]["id"] flow_Type = flow_info[0]["flow_type"] # print(flow_id, flow_Type) # print(type(response_text), response_text) self.assertEqual(res.status_code, 200, 'flow创建后返回的status_code不正确') self.assertEqual(response_text["id"], flow_id, 'flow创建后查询ID不相等') self.assertEqual(response_text["flowType"], flow_Type, 'flow创建后flow_type不一致') time.sleep(3)
def test_case01(self): """使用id查询""" try: dataset_sql = 'select id, name from merce_dataset order by create_time desc limit 1' dataset_info = ms.ExecuQuery(dataset_sql) dataset_id = dataset_info[0]["id"] dataset_name = dataset_info[0]["name"] # print(type(dataset_id[0][0])) except Exception as e: raise e else: url2 = '%s/api/datasets/%s?tenant=%s' % (HOST_189, dataset_id, tenant_id) response = requests.get(url=url2, headers=get_headers()).text response = json.loads(response) response_id = response["id"] response_name = response["name"] # print("id:", response["id"]) # print({"id": dataset_id, "name": dataset_name} == {"id": response_id, "name": response_name}) self.assertEqual({ "id": dataset_id, "name": dataset_name }, { "id": response_id, "name": response_name }, '两次查询得到的dataset id和name不一致,查询失败')
def test_case04(self): """根据名称查询流程""" # 该接口没有返回值 res = requests.get(url=query_flowname_url, headers=get_headers()) # print('case04', res.text, res.status_code) self.assertEqual(res.status_code, 204, 'flow根据name查询返回的status_code不正确') time.sleep(3)
def test_case06(self): """query:根据上次修改时间查询全部的scheduler""" end_time = get_time() # lastModifiedTime结束时间是当前时间 start_time = get_time() - (10 * 24 * 3600 * 1000 ) # lastModifiedTime开始时间是当前时间的十天前 data = { "fieldList": [{ "fieldName": "lastModifiedTime", "fieldValue": start_time, "comparatorOperator": "GREATER_THAN" }, { "fieldName": "lastModifiedTime", "fieldValue": end_time, "comparatorOperator": "LESS_THAN" }], "sortObject": { "field": "lastModifiedTime", "orderDirection": "DESC" }, "offset": 0, "limit": 8 } res = requests.post(url=self.query_scheduler_url, headers=get_headers(), data=json.dumps(data)) query_results = dict_res(res.text) # print(res.text, query_results) first_Time = query_results["content"][0]["lastModifiedTime"] # print('first_one_lastModifiedTime:', first_Time) # 将查询结果中的第一个的lastModifiedTime和查询使用的开始时间,结束时间做对比,应该包含在二者之间 self.assertEqual(end_time > first_Time > start_time, True, "查询结果的lastModifiedTime不包含在起始时间内,查询结果不正确")
def get_step_output_ensure_statementId(HOST, params): url = '%s/api/steps/validateinit/dataflow' % HOST res = requests.post(url=url, headers=get_headers(HOST), data=params) try: print(dict_res(res.text)["statementId"]) return dict_res(res.text)["statementId"] except: return
def test_query_zmod_model_detail(self): """查看任务关联模板详情""" from basic_info.url_info import query_zmod_model_detail_url response = requests.get(url=query_zmod_model_detail_url, headers=get_headers(host)) self.assertEqual(200, response.status_code, '查看任务关联模板详情接口调用失败') self.assertEqual(zmod_id[0], response.json()["id"], '任务详情查询结果中id不一致')
def preview_result_flow_use(host, datasetId, statementID): if isinstance(statementID, int): url = "%s/api/datasets/%s/previewresult?statementId=%d&clusterId=cluster1" % (host, datasetId, statementID) res = requests.get(url=url, headers=get_headers(host)) print(res.url) print('%s数据集preview_result:%s' % (datasetId, res.text)) while 'waiting' in res.text or 'running' in res.text: res = requests.get(url=url, headers=get_headers(host)) try: dataset_result = dict_res(res.text)['content'] except: return 0 else: print('%s数据集dataset_result: %s ' % (datasetId, dataset_result)) return dataset_result else: print('%s数据集返回的statementID为空')
def get_sql_analyse_statement_id(host, param): url = ' %s/api/datasets/sql/analyzeinit' % host res = requests.post(url=url, headers=get_headers(host), data=param) try: res_statementId = json.loads(res.text) sql_analyse_statement_id = res_statementId['statementId'] return sql_analyse_statement_id except KeyError: return
def test_case02(self): """停用计划""" data = [] data.append(scheduler_id) res = requests.post(url=disable_scheduler_url, headers=get_headers(), data=json.dumps(data)) # print(res.status_code) self.assertEqual(res.status_code, 204, msg="停用计划接口调用失败")
def statementId_no_dataset(host, param): url = '%s/api/datasets/new/previewinit?tenant=%s' % (host, tenant_id_189) res = requests.post(url=url, headers=get_headers(host), json=param) try: res_statementId = json.loads(res.text) statementId = res_statementId['statementId'] return statementId except KeyError: return