class TokenCheck(): TOKEN_NOT_OK_VALUE = "Token parsing exception" TOKEN_NOT_OK_VALUE2 = "请求token失效" COOKIE_NOT_OK_VALUE = "" SUCCESS_LOGIN_CASE_ID = "login_01" def __init__(self): self.depent_data = DependentData() self.comtool = CommonUtil() ##适配 .响应body 里包含了 失效的文字,则重新获取token def check_token_exception(self, response_body): if self.comtool.is_contain(TokenCheck.TOKEN_NOT_OK_VALUE,str(response_body) )\ or self.comtool.is_contain( TokenCheck.TOKEN_NOT_OK_VALUE2,str(response_body)) : print("token失效了,重新获取") if self.__reget_token(): print("重新获取token成功") return True return False ## return False def __reget_token(self, loginid=None): if not loginid: loginid = TokenCheck.SUCCESS_LOGIN_CASE_ID #重新执行,获取token接口 ret = self.depent_data.force_runcase_by_caseid(loginid, token=True) return ret
class OpValue(): def __init__(self): self.getcfg = GetConf() self.comtool = CommonUtil() # 传递含有变量的信息,进行替换 变量值 def replace_value(self, data): # data = {"sourceType": "0", "userName": "******", "password": "******"} # data = {"sourceType": "0", "userName": "******", "password": "******"} nn = self.comtool.res_find(str(data)) ##找到所有变量 data_ret = data for key in nn: try: getv = self.getcfg.read_conf_value_toexcel(key) # 获取key的值 data_ret = self.comtool.res_value_replace( key, getv, str(data_ret)) # 替换data的赋值给data_ret except Exception as e: print("替换excel的变量失败。 确认%s变量存在。跳过替换。" % key, e) pass return str(data_ret).replace("'", '"') def save_value(self): pass
def __init__(self): self.run_method = RunMethod() self.get_data = GetData() self.com_util = CommonUtil() self.send_mail = SendMail() self.op_cookie = OperationCookie() self.op_token = OperationToken() self.token_check = TokenCheck() self.save_body_values = SaveBodyValue() self.fail_count = [] self.break_count = [] self.pass_count = []
class RunTest: def __init__(self): self.run_method = RunMethod() self.data = GetSheetData() self.common_util = CommonUtil() def go_run(self): rows_count = self.data.get_case_lines() for i in range(1, rows_count): is_run = self.data.is_run(i) id = self.data.get_id(i) url = self.data.get_url(i) method = self.data.get_method(i) header = self.data.get_header(i) depend_id = self.data.get_depend_id(i) depend_data = self.data.get_depend_data(i) depend_key = self.data.get_depend_key(i) data = self.data.get_request_data(i) expect = self.data.get_expect(i) if is_run: res = self.run_method.main(method, url, header, data) # 把结果写入excel # print(expect) # print(res) if self.common_util.is_contain(expect, res): self.data.write_result(i, 'pass') print(u"测试通过") else: self.data.write_result(i, res) print(u"测试失败") return res
class RunTest(object): def __init__(self): self.runmethod = RunMethod() self.getdata = GetData() self.commonutil = CommonUtil() self.sendmail = SendMail() def go_on_run(self): pass_count = [] fail_count = [] cookies = None rows = self.getdata.get_case_lines() for i in range(1, rows): is_run = self.getdata.get_is_run(i) if is_run: url = self.getdata.get_url(i) is_depend = self.getdata.is_depend(i) request_method = self.getdata.get_request_method(i) expect = self.getdata.get_expect(i) # print(expect) is_cookie = self.getdata.is_cookie(i) is_header = self.getdata.is_header(i) data = self.getdata.get_data_for_json(i) print(data) depend_case = self.getdata.is_depend(i) if is_depend: self.depend_data = DependentData(depend_case) field_depend = self.getdata.get_field_depend(i) data_depend = self.depend_data.get_data_for_key(i) data[field_depend] = data_depend if is_cookie == 'write': res = self.runmethod.run_main(url, request_method, data) op_cookie = OperationCookie(json.loads(res)) op_cookie.write_cookie() if is_cookie == 'yes': op_json = OperationJson('../dataconfig/cookie.json') cookie = op_json.get_data("apsid") cookies = {"apsid": cookie} res = self.runmethod.run_main(url, request_method, data, is_header, cookies) if self.commonutil.iscontain(expect, res): print("测试通过") self.getdata.write_result(i, "测试通过") pass_count.append(i) else: print(expect) print(res) print("测试失败") self.getdata.write_result(i, res) fail_count.append(i) else: return None
def run_gene_prioritization_pipeline(self): """ Runs data cleaning for gene_prioritization_pipeline. Args: NA. Returns: validation_flag: Boolean type value indicating if input data is valid or not. message: A message indicates the status of current check. """ # Checks user spreadsheet data and phenotype data if self.user_spreadsheet_df is None or self.phenotype_df is None: return False, logger.logging # Imputes na value on user spreadsheet data user_spreadsheet_df_imputed = SpreadSheet.impute_na(self.user_spreadsheet_df, option=self.run_parameters['impute']) if user_spreadsheet_df_imputed is None: return False, logger.logging # Checks if value of inputs satisfy certain criteria: see details in function validate_inputs_for_gp_fp user_spreadsheet_val_chked, phenotype_val_checked = CommonUtil.validate_inputs_for_gp_fp( user_spreadsheet_df_imputed, self.phenotype_df, self.run_parameters['correlation_measure']) if user_spreadsheet_val_chked is None or phenotype_val_checked is None: return False, logger.logging # Removes NA value and duplication on column and row name user_spreadsheet_df_checked = SpreadSheet.remove_dataframe_indexer_duplication(user_spreadsheet_val_chked) # Checks the validity of gene name to see if it can be ensemble or not user_spreadsheet_df_cleaned, map_filtered_dedup, mapping = SpreadSheet.map_ensemble_gene_name( user_spreadsheet_df_checked, self.run_parameters) if user_spreadsheet_df_cleaned is None or phenotype_val_checked is None: return False, logger.logging # Stores cleaned phenotype data (transposed) to a file, dimension: phenotype x sample IOUtil.write_to_file(phenotype_val_checked, self.run_parameters['phenotype_name_full_path'], self.run_parameters['results_directory'], '_ETL.tsv') IOUtil.write_to_file(user_spreadsheet_df_cleaned, self.run_parameters['spreadsheet_name_full_path'], self.run_parameters['results_directory'], '_ETL.tsv') # writes dedupped mapping between user_supplied_gene_name and ensemble name to a file IOUtil.write_to_file(map_filtered_dedup, self.run_parameters['spreadsheet_name_full_path'], self.run_parameters['results_directory'], '_MAP.tsv', use_index=True, use_header=False) # writes user supplied gene name along with its mapping status to a file IOUtil.write_to_file(mapping, self.run_parameters['spreadsheet_name_full_path'], self.run_parameters['results_directory'], '_User_To_Ensembl.tsv', use_index=False, use_header=True) logger.logging.append( 'INFO: Cleaned user spreadsheet has {} row(s), {} column(s).'.format( user_spreadsheet_df_cleaned.shape[0], user_spreadsheet_df_cleaned.shape[1])) logger.logging.append( 'INFO: Cleaned phenotype data has {} row(s), {} column(s).'.format(phenotype_val_checked.shape[0], phenotype_val_checked.shape[1])) return True, logger.logging
def run_general_clustering_pipeline(self): """ Runs data cleaning for general_clustering_pipeline. Args: NA. Returns: validation_flag: Boolean type value indicating if input data is valid or not. message: A message indicates the status of current check. """ if self.user_spreadsheet_df is None: return False, logger.logging # Checks intersection between user spreadsheet data and phenotype data phenotype_df_cleaned = None if self.phenotype_df is not None: phenotype_df_cleaned = CommonUtil.check_phenotype_intersection(self.phenotype_df, self.user_spreadsheet_df.columns.values) if phenotype_df_cleaned is None: logger.logging.append('ERROR: Phenotype is emtpy. Please provide a valid phenotype data.') return False, logger.logging logger.logging.append('INFO: Start to process user spreadsheet data.') # Checks if user spreadsheet contains na value and only real number user_spreadsheet_df_val_check = SpreadSheet.check_user_spreadsheet_data(self.user_spreadsheet_df, dropna_colwise=True, check_real_number=True, check_positive_number=True) if user_spreadsheet_df_val_check is None: return False, logger.logging user_spreadsheet_df_rm_na_header = SpreadSheet.remove_na_header(user_spreadsheet_df_val_check) if user_spreadsheet_df_rm_na_header is None: return False, logger.logging # Removes NA value and duplication on column and row name user_spreadsheet_df_cleaned = SpreadSheet.remove_dataframe_indexer_duplication(user_spreadsheet_df_rm_na_header) if user_spreadsheet_df_cleaned is None: return False, logger.logging IOUtil.write_to_file(user_spreadsheet_df_cleaned, self.run_parameters['spreadsheet_name_full_path'], self.run_parameters['results_directory'], '_ETL.tsv') logger.logging.append( 'INFO: Cleaned user spreadsheet has {} row(s), {} column(s).'.format( user_spreadsheet_df_cleaned.shape[0], user_spreadsheet_df_cleaned.shape[1])) if phenotype_df_cleaned is not None: IOUtil.write_to_file(phenotype_df_cleaned, self.run_parameters['phenotype_name_full_path'], self.run_parameters['results_directory'], '_ETL.tsv') logger.logging.append( 'INFO: Cleaned phenotype data has {} row(s), {} column(s).'.format(phenotype_df_cleaned.shape[0], phenotype_df_cleaned.shape[1])) return True, logger.logging
class RunTest(object): def __init__(self): self.runmain = RunMain() self.data = getData() self.com_util = CommonUtil() def run(self): res = None row_counts = self.data.get_case_lines() # 获取excel表格行数 # print(row_counts) 5 print(row_counts) for row_count in range(1, row_counts): print( "==============================================================================" ) print(row_count) url = self.data.get_request_url(row_count) # y行不变遍历获取x列的请求地址 method = self.data.get_request_method(row_count) # y行不变遍历获取x列的请求方式 is_run = self.data.get_is_run(row_count) # y行不变遍历获取x列的是否运行 data = self.data.get_request_data( row_count ) # y行不变遍历获取x列的请求数据,这里面时三次调用,依次分别是get_data_for_json丶get_key_words丶get_request_data # header = self.data.get_is_header print(eval(data)) data = json.dumps(eval(data)) header = { 'Content-Type': 'application/json', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1', 'Authorization': 'access_key=&^Rwqq%QSSx!HTnFm9XB@pFzL&8im$;app_id=2LaqRXRQvNrVN0Nl' } expect = int(self.data.get_expect_data(row_count)) print('url:', url) print('method:', method) print('is_run:', is_run) print('data:', data) print("expect", expect) print(type(data)) # print('header:', header) if is_run: res = self.runmain.run_main(url, method, data, header) if self.com_util.is_contains(expect, res): print("测试通过") self.data.write_reality_data(row_count, "pass") else: print("测试失败") self.data.write_reality_data(row_count, "fail") print("*" * 60 + "分割线" + "*" * 60) return res
def run_feature_prioritization_pipeline(self): """ Run data cleaning for feature prioritization pipeline. Args: NA. Returns: validation_flag: Boolean type value indicating if input data is valid or not. message: A message indicates the status of current check. """ from knpackage.toolbox import get_spreadsheet_df if self.user_spreadsheet_df is None or self.phenotype_df is None: return False, logger.logging # Imputes na value on user spreadsheet data user_spreadsheet_df_imputed = SpreadSheet.impute_na(self.user_spreadsheet_df, option=self.run_parameters['impute']) if user_spreadsheet_df_imputed is None: return False, logger.logging # Checks if value of inputs satisfy certain criteria user_spreadsheet_val_chked, phenotype_val_chked = CommonUtil.validate_inputs_for_gp_fp( user_spreadsheet_df_imputed, self.phenotype_df, self.run_parameters[ 'correlation_measure']) if user_spreadsheet_val_chked is None or phenotype_val_chked is None: return False, logger.logging IOUtil.write_to_file(user_spreadsheet_val_chked, self.run_parameters['spreadsheet_name_full_path'], self.run_parameters['results_directory'], '_ETL.tsv') logger.logging.append( 'INFO: Cleaned user spreadsheet has {} row(s), {} column(s).'.format( user_spreadsheet_val_chked.shape[0], user_spreadsheet_val_chked.shape[1])) if self.run_parameters['correlation_measure'] == 't_test': phenotype_df = get_spreadsheet_df(self.run_parameters['phenotype_name_full_path']) phenotype_output = TransformationUtil.phenotype_expander(phenotype_df) else: phenotype_output = phenotype_val_chked IOUtil.write_to_file(phenotype_output, self.run_parameters['phenotype_name_full_path'], self.run_parameters['results_directory'], '_ETL.tsv') logger.logging.append( 'INFO: Cleaned phenotypic data has {} row(s), {} column(s).'.format(phenotype_val_chked.shape[0], phenotype_val_chked.shape[1])) return True, logger.logging
class SaveBodyValue(): KEY_ERROR_RET = "NOF_FOUND_KEY" ''' 从响应体中,保存需要被保存的变量到conf.ini文件里 ''' def __init__(self): self.getconf = GetConf() self.comtool = CommonUtil() def save_value_to_conf(self, get_body_value, current_result): ''' 从json文件中,提取值,最终返回字典 :param save_body_value: 传递一个字典(“user":"******" :param current_result: 需要从该内容中 匹配提取 :return: dictvalue "user":"******" ''' current_result = current_result #所有内容 dictvalue = {} if not isinstance(current_result, dict): try: current_result = json.loads(current_result) except Exception as e: traceback.print_exc() return None if not isinstance(get_body_value, dict): try: get_body_value = json.loads(get_body_value) except Exception as e: traceback.print_exc() return None for keyname in dict(get_body_value): save_key = keyname get_save_value_jmespath = get_body_value[save_key] save_value = self.comtool.json_search(get_save_value_jmespath, current_result) dictvalue[save_key] = save_value print("保存的k,v:", save_key, save_value) if not self.getconf.write_conf_value(save_key, save_value): return False return True
class RunTest: def __init__(self): self.run_method = RunMethod() self.data = GetData() self.com_util=CommonUtil() def go_on_run(self): # 获取case的行数 rows_count = self.data.get_case_lines() print(rows_count) for i in range(1,rows_count): # 获取需要的列的内容 is_run=self.data.get_is_run(i) if is_run: url=self.data.get_request_url(i) method=self.data.get_request_method(i) request_data=self.data.get_data_for_json(i) expect=self.data.get_expcet_data(i) header=self.data.is_header(i) depend_case=self.data.is_depend(i) if depend_case!=None: self.depend_data=DependdentData(depend_case) # 获取依赖响应的数据【依赖case里面的数据】 depend_response_data=self.depend_data.get_data_for_key(i) # 获取依赖的key depend_key=self.data.get_depend_field(i) # 更新请求数据里面的内容 request_data[depend_key]=depend_response_data res=self.run_method.run_main(method,url,request_data,header) print(res) if self.com_util.is_contain(expect,res): print("测试通过") self.data.write_result(i,"pass") else: print("测试失败") self.data.write_result(i,res)
def run_samples_clustering_pipeline(self): """ Runs data cleaning for samples_clustering_pipeline. Args: NA Returns: validation_flag: Boolean type value indicating if input data is valid or not. message: A message indicates the status of current check. """ if self.user_spreadsheet_df is None: return False, logger.logging logger.logging.append('INFO: Start to process user spreadsheet data.') # Checks if only non-negative real number appears in user spreadsheet and drop na column wise user_spreadsheet_val_chked = SpreadSheet.check_user_spreadsheet_data(self.user_spreadsheet_df, dropna_colwise=True, check_real_number=True, check_positive_number=True) if user_spreadsheet_val_chked is None: return False, logger.logging # Removes NA value and duplication on column and row name user_spreadsheet_df_checked = SpreadSheet.remove_dataframe_indexer_duplication(user_spreadsheet_val_chked) # Checks the validity of gene name to see if it can be ensemble or not user_spreadsheet_df_cleaned, map_filtered_dedup, mapping = SpreadSheet.map_ensemble_gene_name( user_spreadsheet_df_checked, self.run_parameters) if 'gg_network_name_full_path' in self.run_parameters.keys() and \ not CommonUtil.check_network_data_intersection(user_spreadsheet_df_cleaned.index, self.run_parameters): return False, logger.logging # The logic here ensures that even if phenotype data doesn't fits requirement, the rest pipelines can still run. if user_spreadsheet_df_cleaned is None: return False, logger.logging else: IOUtil.write_to_file(user_spreadsheet_df_cleaned, self.run_parameters['spreadsheet_name_full_path'], self.run_parameters['results_directory'], '_ETL.tsv') # writes dedupped mapping between user_supplied_gene_name and ensemble name to a file IOUtil.write_to_file(map_filtered_dedup, self.run_parameters['spreadsheet_name_full_path'], self.run_parameters['results_directory'], '_MAP.tsv', use_index=True, use_header=False) # writes user supplied gene name along with its mapping status to a file IOUtil.write_to_file(mapping, self.run_parameters['spreadsheet_name_full_path'], self.run_parameters['results_directory'], '_User_To_Ensembl.tsv', use_index=False, use_header=True) logger.logging.append( 'INFO: Cleaned user spreadsheet has {} row(s), {} column(s).'.format( user_spreadsheet_df_cleaned.shape[0], user_spreadsheet_df_cleaned.shape[1])) if self.phenotype_df is not None: logger.logging.append('INFO: Start to process phenotype data.') phenotype_df_cleaned = CommonUtil.check_phenotype_intersection(self.phenotype_df, self.user_spreadsheet_df.columns.values) if phenotype_df_cleaned is None: logger.logging.append('ERROR: Phenotype is emtpy. Please provide a valid phenotype data.') return False, logger.logging else: IOUtil.write_to_file(phenotype_df_cleaned, self.run_parameters['phenotype_name_full_path'], self.run_parameters['results_directory'], '_ETL.tsv') logger.logging.append('INFO: Cleaned phenotype data has {} row(s), {} ' 'column(s).'.format(phenotype_df_cleaned.shape[0], phenotype_df_cleaned.shape[1])) return True, logger.logging
def run_signature_analysis_pipeline(self): """ Runs data cleaning for signature_analysis_pipeline. Args: NA. Returns: validation_flag: Boolean type value indicating if input data is valid or not. message: A message indicates the status of current check. """ if self.signature_df is None or self.user_spreadsheet_df is None: return False, logger.logging # Removes NA index for both signature data and user spreadsheet data signature_df = SpreadSheet.remove_na_index(self.signature_df) user_spreadsheet_df = SpreadSheet.remove_na_index(self.user_spreadsheet_df) # Checks if only real number and non-NA value appear in user spreadsheet if SpreadSheet.check_user_spreadsheet_data(user_spreadsheet_df, check_na=True, check_real_number=True, check_positive_number=False) is None: return False, logger.logging # Checks duplicate columns and rows in user spreadsheet data if CheckUtil.check_duplicates(user_spreadsheet_df, check_column=True, check_row=True): logger.logging.append('ERROR: Found duplicates on user spreadsheet data. Rejecting...') return False, logger.logging # Checks intersection of genes between signature data and user spreadsheet data intersection = CheckUtil.find_intersection(signature_df.index, user_spreadsheet_df.index) if intersection is None: logger.logging.append('ERROR: Cannot find intersection between spreadsheet genes and signature genes.') return False, logger.logging logger.logging.append( 'INFO: Found {} intersected gene(s) between phenotype and spreadsheet data.'.format(len(intersection))) # Checks number of unique value in userspread sheet equals to 2 if not SpreadSheet.check_unique_values(user_spreadsheet_df, cnt=2): logger.logging.append( 'ERROR: user spreadsheet data does not meet the requirment of having at least two unique values.') return False, logger.logging # Checks intersection among network data, signature data and user spreadsheet data if 'gg_network_name_full_path' in self.run_parameters.keys() and \ not CommonUtil.check_network_data_intersection(intersection, self.run_parameters): return False, logger.logging # The logic here ensures that even if phenotype data doesn't fits requirement, the rest pipelines can still run. if user_spreadsheet_df is None: return False, logger.logging else: IOUtil.write_to_file(user_spreadsheet_df, self.run_parameters['spreadsheet_name_full_path'], self.run_parameters['results_directory'], '_ETL.tsv') logger.logging.append( 'INFO: Cleaned user spreadsheet has {} row(s), {} column(s).'.format( user_spreadsheet_df.shape[0], user_spreadsheet_df.shape[1])) if signature_df is not None: IOUtil.write_to_file(signature_df, self.run_parameters['signature_name_full_path'], self.run_parameters['results_directory'], '_ETL.tsv') logger.logging.append( 'INFO: Cleaned phenotype data has {} row(s), {} column(s).'.format(signature_df.shape[0], signature_df.shape[1])) return True, logger.logging
class RunTest(): def __init__(self): self.run_method = RunMethod() self.get_data = GetData() self.com_util = CommonUtil() self.send_mail = SendMail() self.op_cookie = OperationCookie() self.op_token = OperationToken() self.token_check = TokenCheck() self.save_body_values = SaveBodyValue() self.fail_count = [] self.break_count = [] self.pass_count = [] def _result_handler(self, row, save_body_value, expect_result, current_result, expect_code, current_code): if self.com_util.is_contain(expect_result, current_result) \ and str(expect_code) == str(current_code): # log.info('----pass') # self.pass_count.append(row) # self.get_data.write_test_result(row, 'pass') # 成功后,检查是否有需要保存的变量和需要正则获取的值(json) if save_body_value: log.debug("##get 需要保存值:" + str(save_body_value)) if not self.save_body_values.save_value_to_conf( save_body_value, current_result): log.error("保存 响应值的保存记录失败") ##判断case失败 else: return False # print('----fail') # self.fail_count.append(row) # self.get_data.write_test_result(row, 'fail') # 程序执行 def get_on_run(self): log.info('执行Sheet : [' + self.get_data.get_current_sheet_name() + ']') rows_count = self.get_data.get_case_lines() for row_num in range(2, rows_count + 1): # 因为openpyxl是从1计数 # 获取 测试的值 is_run = self.get_data.get_is_run(row_num) if not is_run: #不运行,则记录 skip case # print('Skip case : ', self.get_data.get_case_id_name(row_num)) # self.get_data.write_test_result(row_num, 'Not run,skip case') continue # 判断是否执行不执行,直接跳过 log.info('Start case : ' + self.get_data.get_case_id_name(row_num)) #获取row 测试相关信息 try: method = self.get_data.get_request_method(row_num) url = self.get_data.get_url_final(row_num) data = self.get_data.get_request_data_final(row_num) header = self.get_data.get_header_info(row_num) #获取的 header的值 is_cookie = self.get_data.get_is_cookie( row_num) #获取cookie\token相关信息 进行cookie或者token处理 expect_result = self.get_data.get_expect_result_final( row_num) #获取期望的结果(包含的内容) expect_code = self.get_data.get_expect_code( row_num) #获取期望的code # save_body_value类似 -- {"user1":"data.records[0].name","user2":"data.records[1].name"} save_body_value = self.get_data.get_save_value( row_num ) #获取需要保存的key,value.最终保存在conf.ini的valueauto section中 # 判断是否有 依赖,如果有 就进行依赖case的测试。如果没有就 直接进行 该api基本测试 # dp_caseid = self.get_data.get_dependent_caseid(row_num) is_dependent = self.get_data.get_is_dependent(row_num) except Exception as e: log.error("----Has some error : " + traceback.print_exc()) self.break_count.append(row_num) self.get_data.write_test_result(row_num, 'break' + str(e)) log.error('----break case') continue if is_dependent: op = DependentData() dependent_response_data = op.get_dependent_data_for_key( row_num) if not dependent_response_data: # 如果返回False, 就是依赖的case 失败了,那么这个例子也不用跑了 log.error('----fail , dependent return case fail') self.get_data.write_test_result( row_num, 'fail, dependent case return fail') self.fail_count.append(row_num) continue # log.error('run dp data get : ' + rs) filed_data = self.get_data.get_dependent_filed( row_num) #获取 当前case 要被替换的字段内容 data[filed_data] = dependent_response_data #处理cookie : wc更新cookies,yc携带cookies ,nc不需要cookies if is_cookie == "wc": #需要更新cookies,通过响应值获取ck run_response_data = self.run_method.run_main(method, url, data=data, header=header) op_cookie = OperationCookie() cookie_value = op_cookie.trans_response_cookie_value( run_response_data[2]) op_cookie.write_cookie(cookie_value) elif is_cookie == "yc": # self.op_cookie.get_cookie('loongaio') cookie = self.op_cookie.get_cookie_file_data() cookie = json.loads(cookie) run_response_data = self.run_method.run_main(method, url, data=data, cookie=cookie, header=header) elif is_cookie == "nc": run_response_data = self.run_method.run_main(method, url, data=data, header=header) # 处理token elif is_cookie == "wt": #需要更新token,通过响应值获取token run_response_data = self.run_method.run_main(method, url, data=data, header=header) # log.debug("#$$# :"+str(run_response_data)) op_token = OperationToken() token_value = op_token.trans_response_token_value_by_body( run_response_data[2]) op_token.write_token(token_value) elif is_cookie == "yt": token = self.op_token.get_token() log.debug("#携带的token#:" + token) header["token"] = token ##根据当前web系统适当调整,这里的token是加在header run_response_data = self.run_method.run_main(method, url, data=data, header=header) if self.token_check.check_token_exception(run_response_data): token = self.op_token.get_token() header["token"] = token ##根据当前web系统适当调整,这里的token是加在header run_response_data = self.run_method.run_main(method, url, data=data, header=header) else: #其他值,则不需要token和cookies run_response_data = self.run_method.run_main(method, url, data=data, header=header) # 执行请求 , 获得的结果,list[0]为响应code, [1]为结果 , [2] 为 response 本身。 把当前 实际结果写到excel current_code = str(run_response_data[0]) current_result = str(run_response_data[1]) self.get_data.write_current_result(row_num, current_result) self.get_data.write_current_code(row_num, current_code) try: #判断case成功 result_check_fail_flag = False #检查响应结果是否和预期结果一致 # expect_result=list(expect_result) #判断多个预期值的情况一样。数组形式的 for exp_res in expect_result: log.debug("#预期检查值#:" + exp_res) if not self.com_util.is_contain(exp_res, current_result): log.info('----fail') self.fail_count.append(row_num) self.get_data.write_test_result(row_num, 'fail') result_check_fail_flag = True log.error("响应断言不匹配 %s" % exp_res) break if not result_check_fail_flag: #响应 断言检查成功后,检查code断言 expect_result = expect_result[0] if int(expect_code) == int(current_code): log.info('----pass') self.pass_count.append(row_num) self.get_data.write_test_result(row_num, 'pass') # 成功后,检查是否有需要保存的变量和需要正则获取的值(json) if save_body_value: log.debug("##get 需要保存值" + str(save_body_value)) if not self.save_body_values.save_value_to_conf( save_body_value, current_result): log.error("保存 响应值的保存记录失败") ##判断case失败 else: log.error("code不一致%s %s" % (expect_code, current_code)) log.info('----fail') self.fail_count.append(row_num) self.get_data.write_test_result(row_num, 'fail') except Exception as e: log.error(traceback.print_exc()) self._send_test_mail() #输出结果,并发送邮件 def _fail_row_info(self): f_arr = [] for i in self.fail_count: r_info = '[' + self.get_data.get_current_sheet_name() + '] ' + 'Row: ' + str(i) + ', caseid : ' +\ self.get_data.get_case_id_name(i) + ', Url : ' + self.get_data.get_url_final(i) + \ ", Comment : " + self.get_data.get_comment_info(i) f_arr.append(r_info) return f_arr def _send_test_mail(self): passnum = len(self.pass_count) failnum = len(self.fail_count) breaknum = len(self.break_count) totalnum = passnum + failnum + breaknum result = "%.2f%%" % (passnum / totalnum * 100) fm = [i for i in self._fail_row_info()] if fm: #有失败时,打印信息 content = "这次接口运行情况如下:\n 总计运行接口个数: %s 。通过: %s , 失败: %s , 中断: %s \n 通过百分比:%s " \ "\n 失败接口如下:\n %s" % \ (totalnum,passnum,failnum,breaknum,result,'\n '.join(fm)) else: #全部成功时,打印信息 content = "这次接口运行情况如下:\n 总计运行接口个数: %s 。通过: %s , 失败: %s , 中断: %s \n 通过百分比:%s " % \ (totalnum, passnum, failnum, breaknum, result) log.info(content) sub = "自动化测试邮件-api" sm = SendMail()
def __init__(self): self.depent_data = DependentData() self.comtool = CommonUtil()
def __init__(self): # self.case_id = case_id self.get_data = GetData() self.run_method = RunMethod() self.com_util = CommonUtil() self.opera_excel = OperationExcel()
class DependentData(): def __init__(self): # self.case_id = case_id self.get_data = GetData() self.run_method = RunMethod() self.com_util = CommonUtil() self.opera_excel = OperationExcel() def _get_case_row_num_by_caseid(self): rownum = self.get_data.get_case_row_by_idname(self.case_id) return rownum # rownum = self.get_data.get_case_row_by_idname(self.case_id) ## token=True,或者cookie=True是为了 refresh token使用的。单独执行一条case def _run_dependent_case(self, token=None, cookies=None): # 不需要 isrun # i = self.get_data.get_case_row_by_idname(self.case_id) #获取id的行号 i = self._get_case_row_num_by_caseid() method = self.get_data.get_request_method(i) url = self.get_data.get_url_final(i) data = self.get_data.get_request_data_final(i) header = self.get_data.get_header_info(i) expect_result = self.get_data.get_expect_result_final(i) # 执行请求 , 获得的结果,list[0]为响应code, [1]为结果 if token: run_response_data = self.run_method.run_main(method, url, data=data, header=header) # print("#$$# :",str(run_response_data)) op_token = OperationToken() token_value = op_token.trans_response_token_value_by_body(run_response_data[2]) op_token.write_token(token_value) return token_value elif cookies: run_response_data = self.run_method.run_main(method, url, data=data, header=header) op_cookie = OperationCookie() cookie_value = op_cookie.trans_response_cookie_value(run_response_data[2]) op_cookie.write_cookie(cookie_value) return cookie_value else: is_dependent = self.get_data.get_is_dependent(i) if is_dependent: op = DependentData() rs = op.get_dependent_data_for_key(i) # print('run dp data get : ' + rs) if not rs: # 如果返回False, 就是依赖的case 失败了,那么这个例子也不用跑了 print('fail , dependent return case fail') self.get_data.write_test_result(i, 'fail, dependent case return fail') return False # print('run dp data get : ' + rs) filed_data = self.get_data.get_dependent_filed(i) data[filed_data] = rs run_response_data = self.run_method.run_main(method, url, data, header) try: if self.com_util.is_contain(expect_result, run_response_data[1]): return json.loads(run_response_data[1]) else: return False except Exception as e: print(e) # 通过 dependent 的key获取真实的 value内容 def get_dependent_data_for_key(self, row): self.case_id = self.get_data.get_dependent_caseid(row) # 获取case id denpendet_data = self.get_data.get_dependent_data(row) # 获取 依赖上层的data key print('dp data:' + denpendet_data) response_data = self._run_dependent_case() # 运行依赖 print('rs data : ', response_data) if not response_data: return False json_exe = parse(denpendet_data) madle = json_exe.find(response_data) arr0 = [math.value for math in madle][0] return arr0 # 获取 依赖data ##如果想重新强制获取token更新,则提供登录的caseid:login_01, token=True def force_runcase_by_caseid(self, caseid, token=None, cookies=None): self.case_id = caseid if token: return self._run_dependent_case(token=True) # TODO if cookies: return True return self._run_dependent_case()
def __init__(self): self.runmain = RunMain() self.data = getData() self.com_util = CommonUtil()
def __init__(self): self.run_method = RunMethod() self.data = GetData() self.com_util=CommonUtil()
def __init__(self): self.getconf = GetConf() self.comtool = CommonUtil()
class GetData(): def __init__(self, filename=None, sheet_id=None): self.opera_excel = OperationExcel(filename, sheet_id) self.com_util = CommonUtil() self.op_excel_value = op_excel_value.OpValue() self.get_conf = GetConf() def trans_value(self, v): return self.com_util.value_trans(v) # 获取当前 sheet name def get_current_sheet_name(self): return self.trans_value(self.opera_excel.get_sheet_name()) # 获取excel行数 def get_case_lines(self): return self.trans_value(self.opera_excel.get_sheet_rows_num()) # 通过caseidname 获取行号 def get_case_row_by_idname(self, idname): col_num = 1 for i in self.opera_excel.get_col_value(1): if idname == i: return col_num col_num += 1 return False # 获取case id def get_case_id_name(self, row): col = data_config.get_id_col() case_id = self.opera_excel.get_cell_value(row, col) if case_id: return self.trans_value(case_id) else: return "" #获取模块名称 def get_mod_name(self, row): col = data_config.get_modname_col() modname = self.opera_excel.get_cell_value(row, col) return self.trans_value(modname) #获取api接口名称 def get_apiname(self, row): col = data_config.get_apiname_col() apiname = self.opera_excel.get_cell_value(row, col) return self.trans_value(apiname) #获取url 请求地址 def get_url(self, row): col = data_config.get_url_col() url = self.opera_excel.get_cell_value(row, col) if url: return self.trans_value(url) else: return "" def get_url_final(self, row): url = self.get_url(row) if url: url = str( self.get_conf.read_conf_value_toexcel("urlprefix")).replace( '"', '') + str(url) return url # 是个否运行 def get_is_run(self, row): flag = None col = data_config.get_run_col() is_run = self.opera_excel.get_cell_value(row, col) if str(is_run).lower() == "yes" or str(is_run).lower() == "y": flag = True else: flag = False return self.trans_value(flag) # 获取请求的方式 def get_request_method(self, row): col = data_config.get_request_method_col() method = self.opera_excel.get_cell_value(row, col) return self.trans_value(method) #获取 cookie值 def get_is_cookie(self, row): col = data_config.get_cookie_col() is_cookie = self.opera_excel.get_cell_value(row, col) return self.trans_value(is_cookie) # # 是否携带 header # def get_is_header(self, row): # col = data_config.get_header_col() # is_header = self.opera_excel.get_cell_value(row, col) # if str(is_header).lower() == "yes": # return self.trans_value(data_config.get_header_info()) # else: # return None ##获取header值 def get_header_info(self, row): col = data_config.get_header_col() headerinfo = self.opera_excel.get_cell_value(row, col) if str(headerinfo) == "" or headerinfo == None: return {} else: return json.loads(headerinfo) # 获取 dependent caseid name def get_dependent_caseid(self, row): col = data_config.get_dependent_caseid_col() caseid = self.opera_excel.get_cell_value(row, col) if caseid == "": return None else: return caseid # 获取dependent data 内容 def get_dependent_data(self, row): col = data_config.get_dependent_data_col() dependent_data = self.opera_excel.get_cell_value(row, col) return dependent_data # 获取depent filed字段内容 def get_dependent_filed(self, row): col = data_config.get_dependent_filed_col() filed_data = self.opera_excel.get_cell_value(row, col) return filed_data # 获取请求数据 def _get_request_data(self, row): col = data_config.get_request_data_col() data = self.opera_excel.get_cell_value(row, col) if data == "": return None return self.trans_value(data) # 通过关键字拿到 请求体,request_data数据 def get_request_data_final(self, row): request_data = self._get_request_data(row) #请求数据,关键字 if str(request_data).startswith("json_"): #如果json_开头则进行json获取 opera_json = OperationJson() request_data = opera_json.get_value(request_data) return self.trans_value(request_data) elif str(request_data) != "": request_data = self.op_excel_value.replace_value( request_data) # 更新替换excel请求中带的变量 return self.trans_value( request_data) #如果不为空,不是json_开头,那么就替换变量再返回数据 else: return None #没有请求数据,则返回None #保存响应中的指定值(json获取方式) def get_save_value(self, row): col = data_config.get_save_value_col() savevalue = self.opera_excel.get_cell_value(row, col) if str(savevalue) == "" or savevalue == None: return None else: return str(savevalue) # 获取预期响应结果 def _get_expect_result(self, row): col = data_config.get_expect_result_col() expect_value = self.opera_excel.get_cell_value(row, col) if expect_value == "": return None return self.trans_value(expect_value) def get_expect_result_final(self, row): ''' 获取预期结果,返回数组类型 :param row: :return: type:list ''' expect_result = self._get_expect_result(row) if str(expect_result).startswith("json_"): # 如果json_开头则进行json获取 opera_json = OperationJson() expect_data = opera_json.get_value(expect_result) print('通过json配置的预期结果断言###值为:', expect_data) return list(str(self.trans_value(expect_data)).split(',')) elif str(expect_result) != "": expect_data = self.op_excel_value.replace_value( expect_result) # 更新替换excel请求中带的变量 return list(str(self.trans_value(expect_data)).split( ',')) # 如果不为空,不是json_开头,那么就替换变量再返回数据 elif str(expect_result) == "" or expect_result == None: return [] # 没有请求数据,则返回None else: return ["ERROR_FOUND_EXPECT_RESULT"] #获取预期响应code def get_expect_code(self, row): col = data_config.get_except_code_col() expect_code = self.opera_excel.get_cell_value(row, col) if expect_code == "": return 200 return self.trans_value(expect_code) #获取数据库校验语句 def get_dbcheck_sql(self, row): col = data_config.get_dbcheck_col() dbchecksql = self.opera_excel.get_cell_value(row, col) if str(dbchecksql) != "" or str(dbchecksql) != None: return str(dbchecksql) else: return None #获取备注信息 def get_comment_info(self, row): col = data_config.get_comment_col() commentinfo = self.opera_excel.get_cell_value(row, col) if str(commentinfo) != "" or str(commentinfo) != None: return str(commentinfo) else: return "" # 判断是否需要 依赖case def get_is_dependent(self, row): data = self.get_dependent_caseid(row) if data: return True else: return False #写 当前结果到excel def write_current_result(self, row, value): col = data_config.get_current_result_col() writevalue = self.opera_excel.write_cell_value(row, col, value) if writevalue: return True else: return False #写 当前响应code到excel def write_current_code(self, row, value): col = data_config.get_current_code_col() writevalue = self.opera_excel.write_cell_value(row, col, value) if writevalue: return True else: return False #写 测试结果到excel def write_test_result(self, row, value): col = data_config.get_test_result_col() ## row,col,value,result(通过value来判断pass,fail,else 用于写不同的颜色) writevalue = self.opera_excel.write_cell_value(row, col, value, result=value) if writevalue: return True else: return False
def __init__(self): self.runmethod = RunMethod() self.getdata = GetData() self.commonutil = CommonUtil() self.sendmail = SendMail()
def __init__(self, filename=None, sheet_id=None): self.opera_excel = OperationExcel(filename, sheet_id) self.com_util = CommonUtil() self.op_excel_value = op_excel_value.OpValue() self.get_conf = GetConf()