def query_result_equals(expect_resp, actual_resp, compare_level="data_set"): expect_column_types = [ x['columnTypeName'] for x in expect_resp['columnMetas'] ] expect_column_numbers = [ x['columnType'] for x in expect_resp['columnMetas'] ] expect_result = [[y.strip() if y else y for y in x] for x in expect_resp['results']] actual_column_types = [ x['columnTypeName'] for x in actual_resp['columnMetas'] ] actual_column_numbers = [ x['columnType'] for x in actual_resp['columnMetas'] ] actual_result = [[y.strip() if y else y for y in x] for x in actual_resp['results']] if len(expect_column_types) != len(actual_column_types): Messages.write_message('column count assert failed [{0},{1}]'.format( len(expect_column_types), len(actual_column_types))) logging.error('column count assert failed [%s,%s]', len(expect_column_types), len(actual_column_types)) return False if compare_level == "data_set": return dataset_equals(expect_result, actual_result, expect_column_types, actual_column_types, expect_column_numbers, actual_column_numbers) if compare_level == "row_count": return row_count_equals(expect_result, actual_result)
def _log_request(self, req): Messages.write_message('\n<b>{} {}</b>'.format(req.method, req.url)) Messages.write_message('\n<i>Request Headers:</i>') Messages.write_message('\n'.join('{}: {}'.format(k, v) for k, v in req.headers.items())) Messages.write_message('\n<i>Request Body:</i>') Messages.write_message('{}'.format(req.body))
def dataset_equals(expect, actual, expect_col_types=None, actual_col_types=None, expect_col_nums=None, actual_col_nums=None): if len(expect) != len(actual): Messages.write_message('row count assert failed [{0},{1}]'.format( len(expect), len(actual))) logging.error('row count assert failed [%s,%s]', len(expect), len(actual)) return False if expect_col_types is None: expect_col_types = ['VARCHAR'] * len(expect[0]) expect_set = set() for values in expect: expect_set.add(_Row(values, expect_col_types, expect_col_nums)) if actual_col_types is None: actual_col_types = expect_col_types if expect_col_types else [ 'VARCHAR' ] * len(actual[0]) actual_set = set() for values in actual: actual_set.add(_Row(values, actual_col_types, actual_col_nums)) assert_result = expect_set ^ actual_set if assert_result: logging.error('diff[%s]', len(assert_result)) print(assert_result) Messages.write_message("\nDiff {0}".format(assert_result)) return False return True
def test_clear(self): messages = ['HAHAHAH', 'HAHAHAH1', 'HAHAHAH2', 'HAHAHAH3'] for message in messages: Messages.write_message(message) _MessagesStore.clear() pending_messages = _MessagesStore.pending_messages() self.assertEqual([], pending_messages)
def test_clear(self): messages = ['HAHAHAH', 'HAHAHAH1', 'HAHAHAH2', 'HAHAHAH3'] for message in messages: Messages.write_message(message) MessagesStore.clear() pending_messages = MessagesStore.pending_messages() self.assertEqual([], pending_messages)
def init_env(reuqestPath): config = read_config() request = read_json_file(os.path.join(data_store.spec['specificationPath'], reuqestPath)) matchResult = do_match(config, request) if matchResult is None: return Messages.write_message(matchResult) fill_repository(config, matchResult)
def is_smaller(a, b): try: variableA = _get_variable(a) variableB = _get_variable(b) Messages.write_message("%s 应小于 %s" %(str(variableA), str(variableB))) assert variableA < variableB except Exception as e: assert False
def row_count_equals(expect_result, actual_result): if len(expect_result) != len(actual_result): Messages.write_message('row count assert failed [{0},{1}]'.format( len(expect_result), len(actual_result))) logging.error('row count assert failed [%s,%s]', len(expect_result), len(actual_result)) return False return True
def override_on_cube(cube_override_conf): sh_config(set_auto="True", executor_instance="5", instance_strategy="100,2,500,3,1000,4") time.sleep(60) with open( os.path.join('meta_data/auto_config', 'auto_config_override_conf.json'), 'r') as f: cube_desc_data = json.load(f)['cube_desc_data'] client.create_cube('learn_kylin', cube_override_conf, cube_desc_data=cube_desc_data) resp = client.build_segment(start_time="1325376000000", end_time="1388534400000", cube_name=cube_override_conf) job_id = resp['uuid'] step_id = job_id + "-01" client.await_job(job_id) resp = client.get_step_output(job_id=job_id, step_id=step_id) output = resp.get('cmd_output') memory = re.findall( "Override user-defined spark conf, set spark.executor.memory.*", output) cores = re.findall( "Override user-defined spark conf, set spark.executor.cores.*", output) memoryOverhead = re.findall( "Override user-defined spark conf, set spark.executor.memoryOverhead.*", output) instances = re.findall( "Override user-defined spark conf, set spark.executor.instances.*", output) partitions = re.findall( "Override user-defined spark conf, set spark.sql.shuffle.partitions.*", output) assert memory[0] == "Override user-defined spark conf, set spark.executor.memory=2G.", \ Messages.write_message("expected Override user-defined spark conf, set spark.executor.memory=2G; actually " + memory[0]) assert cores[0] == "Override user-defined spark conf, set spark.executor.cores=2.", \ Messages.write_message("expected Override user-defined spark conf, set spark.executor.cores=2; actually " + cores[0]) assert memoryOverhead[0] == "Override user-defined spark conf, set spark.executor.memoryOverhead=256M.", \ Messages.write_message("expected Override user-defined spark conf, set spark.executor.memoryOverhead=256M; actually " + memoryOverhead[0]) assert instances[0] == "Override user-defined spark conf, set spark.executor.instances=3.",\ Messages.write_message("expected Override user-defined spark conf, set spark.executor.instances=3; actually" + instances[0]) assert partitions[0] == "Override user-defined spark conf, set spark.sql.shuffle.partitions=3.", \ Messages.write_message("expected Override user-defined spark conf, set spark.sql.shuffle.partitions=3; actually " + partitions[0]) client.disable_cube(cube_override_conf) time.sleep(30) client.delete_cube(cube_override_conf) time.sleep(30)
def test_pending_messages_gives_only_those_messages_which_are_not_reported(self): messages = ['HAHAHAH', 'HAHAHAH1', 'HAHAHAH2', 'HAHAHAH3'] for message in messages: Messages.write_message(message) pending_messages = _MessagesStore.pending_messages() self.assertEqual(messages, pending_messages) pending_messages = _MessagesStore.pending_messages() self.assertEqual([], pending_messages) messages = ['HAHAHAH', 'HAHAHAH1'] for message in messages: Messages.write_message(message) pending_messages = _MessagesStore.pending_messages() self.assertEqual(messages, pending_messages)
def test_pending_messages_gives_only_those_messages_which_are_not_reported(self): messages = ['HAHAHAH', 'HAHAHAH1', 'HAHAHAH2', 'HAHAHAH3'] for message in messages: Messages.write_message(message) pending_messages = MessagesStore.pending_messages() self.assertEqual(messages, pending_messages) pending_messages = MessagesStore.pending_messages() self.assertEqual([], pending_messages) messages = ['HAHAHAH', 'HAHAHAH1'] for message in messages: Messages.write_message(message) pending_messages = MessagesStore.pending_messages() self.assertEqual(messages, pending_messages)
def sign_test(): c, _, _ = get_test_client() _, status = c.auto_sign() if status == 0: Messages.write_message("签到看起来成功了,接下来看看我们还能不能签。") _, (e, _) = c.auto_sign() assert e == 3 Messages.write_message("我们签完了。") else: status, _ = status if status == 1: Messages.write_message("现在还没到时间。") return if status == 3: Messages.write_message("已经签过到了。") return Messages.write_message("很奇怪,这里出问题了。") assert False
def check_log(output, memorySize, coresSize, memoryOverheadSize, instancesSize, partitionsSize): memory = re.findall("Auto set spark conf: spark.executor.memory = .*", output) cores = re.findall("Auto set spark conf: spark.executor.cores = .*", output) memoryOverhead = re.findall( "Auto set spark conf: spark.executor.memoryOverhead = .*", output) instances = re.findall( "Auto set spark conf: spark.executor.instances = .*", output) partitions = re.findall( "Auto set spark conf: spark.sql.shuffle.partitions = .*", output) assert memory[0] == "Auto set spark conf: spark.executor.memory = {memory}.".format(memory = memorySize), \ Messages.write_message("expected "+"Auto set spark conf: spark.executor.memory = {memory}.".format(memory = memorySize)+ ", actually " +memory[0]) assert cores[0] == "Auto set spark conf: spark.executor.cores = {cores}.".format(cores = coresSize), \ Messages.write_message("expected "+"Auto set spark conf: spark.executor.cores = {cores}.".format(cores = coresSize)+", actually " +cores[0]) assert memoryOverhead[0] == "Auto set spark conf: spark.executor.memoryOverhead = {memoryOverhead}.".format(memoryOverhead = memoryOverheadSize), \ Messages.write_message("expected "+"Auto set spark conf: spark.executor.memoryOverhead = {memoryOverhead}.".format(memoryOverhead = memoryOverheadSize)+", actually " +memoryOverhead[0]) assert instances[0] == "Auto set spark conf: spark.executor.instances = {instances}.".format(instances = instancesSize), \ Messages.write_message("expected "+"Auto set spark conf: spark.executor.instances = {instances}.".format(instances = instancesSize)+", actually " +instances[0]) assert partitions[0] == "Auto set spark conf: spark.sql.shuffle.partitions = {partitions}.".format(partitions = partitionsSize), \ Messages.write_message("expected "+"Auto set spark conf: spark.sql.shuffle.partitions = {partitions}.".format(partitions = partitionsSize)+", actually " +partitions[0])
def hello_word(): Messages.write_message('hello world') assert True
def get_token(): Messages.write_message("考虑到频繁执行这个测试的风险,我们暂时搁置这个测试。") """
def simple_step(): Messages.write_message("Hello from Python")
def make_sure_it_continues(): Messages.write_message("continuing...")
def navigate_to_orders_page(driver): # Messages.write_message("Getting URL: {0}".format(self.URL)) MAGENTO_URL = '{}/admin/sales/order'.format(os.getenv('MAGENTO_URL')) Messages.write_message('Navigation URL: {}'.format(MAGENTO_URL)) driver.get(MAGENTO_URL)
def compare_sql_result(sql, project, kylin_client, compare_level="data_set", cube=None, expected_result=None): pushdown_project = kylin_client.pushdown_project if not util.if_project_exists(kylin_client=kylin_client, project=pushdown_project): kylin_client.create_project(project_name=pushdown_project) hive_tables = kylin_client.list_hive_tables(project_name=project) if hive_tables is not None: for table_info in kylin_client.list_hive_tables(project_name=project): if table_info.get('source_type') == 0: kylin_client.load_table( project_name=pushdown_project, tables='{database}.{table}'.format( database=table_info.get('database'), table=table_info.get('name'))) kylin_resp = kylin_client.execute_query(cube_name=cube, project_name=project, sql=sql) assert kylin_resp.get( 'isException') is False, 'Thrown Exception when execute ' + sql pushdown_resp = kylin_client.execute_query(project_name=pushdown_project, sql=sql) assert pushdown_resp.get('isException') is False assert query_result_equals( pushdown_resp, kylin_resp, compare_level=compare_level ), Messages.write_message( "Query result is different with pushdown query result {0}, \n------------------------------------\n Actual result is {1} \n\n Expected result is {2}" .format(sql, kylin_resp.get('results'), pushdown_resp.get('results'))) if expected_result is not None: assert expected_result.get("cube") == kylin_resp.get( "cube" ), Messages.write_message( "Sql {0} \n------------------------------------\n Query cube is different with json file, actual cube is {1}, expected cube is {2}" .format(sql, kylin_resp.get("cube"), expected_result.get("cube"))) if kylin_resp.get("cuboidIds") is not None: assert expected_result.get("cuboidIds") == kylin_resp.get( "cuboidIds" ), Messages.write_message( "Sql {0} \n------------------------------------\n query cuboidIds is different with json file, actual cuboidIds is {1}, expected cuboidIds is {2}" .format(sql, kylin_resp.get("cuboidIds"), expected_result.get("cuboidIds"))) assert expected_result.get("totalScanCount") == kylin_resp.get( "totalScanCount" ), Messages.write_message( "Sql {0} \n------------------------------------\n query totalScanCount is different with json file, actual totalScanCount is {1}, expected totalScanCount is {2}" .format(sql, kylin_resp.get("totalScanCount"), expected_result.get("totalScanCount"))) assert expected_result.get("totalScanFiles") == kylin_resp.get( "totalScanFiles" ), Messages.write_message( "Sql {0} \n------------------------------------\n query totalScanFiles is different with json file, actual totalScanFiles is {1}, expected totalScanFiles is {2}" .format(sql, kylin_resp.get("totalScanFiles"), expected_result.get("totalScanFiles"))) assert expected_result.get("pushDown") == kylin_resp.get( "pushDown" ), Messages.write_message( "Sql {0} \n------------------------------------\n query pushDown is different with json file, actual pushDown is {1}, expected pushDown is {2}" .format(sql, kylin_resp.get("pushDown"), expected_result.get("pushDown")))
def _navigate_to_signup_page(driver): # Messages.write_message("Getting URL: {0}".format(self.URL)) MAGENTO_URL = '{}/customer/account/create/'.format( os.getenv('MAGENTO_URL')) Messages.write_message('Navigation URL: {}'.format(MAGENTO_URL)) driver.get(MAGENTO_URL)
def step_multiple_args(s, i): Messages.write_message("Values are {} and {}".format(s, i))
def assert_flight_combination(case, combination, booked_seat, NREA, extra, status): Messages.write_message(u"case={0},{1},{2},{3},{4}".format(case, combination, booked_seat, NREA, extra)) assert status == "OK" assert NREA == FlightScheduler(case, combination, booked_seat, extra).process()
def assert_flight_combination(table): for case in table.get_column_values_with_name("case"): Messages.write_message(u"case={0}".format(case)) assert 1 == 1
def assert_default_vowels(given_vowels): Messages.write_message("Given vowels are {0}".format(given_vowels)) assert given_vowels == "".join(vowels)
def verifier_site_projet_github(): titre = Driver.driver.title logging.info("le titre de la page est : " + titre) Messages.write_message("le titre de la page est : " + titre) Screenshots.capture_screenshot() assert titre == "GitHub - getgauge/gauge-python: Python language runner for Gauge", "Mauvaise page"
def navigate_to_orders_page_with_id(driver, id): # Messages.write_message("Getting URL: {0}".format(self.URL)) MAGENTO_URL = '{}/admin/sales/order/view/order_id/{}/'.format( os.getenv('MAGENTO_URL'), id) Messages.write_message('Navigation URL: {}'.format(MAGENTO_URL)) driver.get(MAGENTO_URL)
def _log_response(self, res): Messages.write_message('\n---Response---') Messages.write_message('<i>Status code:</i> <b>{}</b>'.format( res.status_code)) Messages.write_message('\n<i>Response Headers:</i>') Messages.write_message('\n'.join('{}: {}'.format(k, v) for k, v in res.headers.items())) if res.status_code != requests.codes.no_content: Messages.write_message('\n<i>Response Body:</i>') Messages.write_message('{}'.format(res.json()))
def get_ping_delay(deviceId, v_delay_name): device = data_store.spec['deviceRepository'][deviceId] if (device is None): assert False, "device is not inited" data_store.scenario[v_delay_name] = device.get_ping_delay() Messages.write_message('delay: %s' % (data_store.scenario[v_delay_name]))
def should_continue(table): for num in table: failassert(num) Messages.write_message(f"continuing inside the step{num}") assert_expectations()
def assert_title(self, taken_title): Messages.write_message("Verifying correct title") assert str(taken_title) == str(self.app_title)