def __get_result_ids(config_run): world_id = LI_WORLD_REGION_ID test_result_ids = [ TestResult.result_id_from_name(TestResult.LIVE_FEEDBACK), TestResult.result_id_from_name(TestResult.ACTIVE_USERS, load_zone_id=world_id), TestResult.result_id_from_name(TestResult.REQUESTS_PER_SECOND, load_zone_id=world_id), TestResult.result_id_from_name(TestResult.USER_LOAD_TIME, load_zone_id=world_id) ] # add scenario specific metrics scenarios = config_run["config_params"]["scenarios"] for scenario_name in scenarios: scenario_id = scenario.get(scenario_name)["id"] # add app page metrics for each scenario test_result_ids.append( __scenario_app_page_metric_id(scenario_id, scenarios[scenario_name])) # add core action metrics for each scenario test_result_ids.append(__core_actions_metric_id(scenario_id)) test_result_ids.append( __correctness_metric_id(scenario_id, scenario_name)) return test_result_ids
def __scenario_app_page_metrics(scenario_name, metrics, scenario_config): # For legacy tests where "app" page does not wrap the whole test, and where there was only one scenario per # test config, we use the whole test config load time instead. # An example is has-session which does one login and 100 has-session calls. if "use-scenario-load-time" in scenario_config: return __get_metric_as_actions_per_sec(USER_LOAD_TIME_KEY, metrics, 1000.0) else: scenario_id = scenario.get(scenario_name)["id"] app_page_metric_id = __scenario_app_page_metric_id(scenario_id, scenario_config) return metrics[app_page_metric_id]
def __scenario_app_page_metrics(scenario_name, metrics, scenario_config): # For legacy tests where "app" page does not wrap the whole test, and where there was only one scenario per # test config, we use the whole test config load time instead. # An example is has-session which does one login and 100 has-session calls. if "use-scenario-load-time" in scenario_config: return __get_metric_as_actions_per_sec(USER_LOAD_TIME_KEY, metrics, 1000.0) else: scenario_id = scenario.get(scenario_name)["id"] app_page_metric_id = __scenario_app_page_metric_id( scenario_id, scenario_config) return metrics[app_page_metric_id]
def scenariocmd(action, name): if not paths.suite_defined(): return # cannot run without project env defined import scenario import code if action == "validate": if name: scenario_cfg = scenario.get(name) code.validate(name, scenario_cfg['id']) else: scenario.show_scenarios() elif action == "update": scenario.update(name) else: print("""To validate scenario: oimp scenario validate [scenario_name] To update all scenarios: oimp scenario update To update a scenario: oimp scenario update [name]""")
def __get_result_ids(config_run): world_id=LI_WORLD_REGION_ID test_result_ids = [ TestResult.result_id_from_name(TestResult.LIVE_FEEDBACK), TestResult.result_id_from_name(TestResult.ACTIVE_USERS, load_zone_id=world_id), TestResult.result_id_from_name(TestResult.REQUESTS_PER_SECOND, load_zone_id=world_id), TestResult.result_id_from_name(TestResult.USER_LOAD_TIME, load_zone_id=world_id) ] # add scenario specific metrics scenarios = config_run["config_params"]["scenarios"] for scenario_name in scenarios: scenario_id = scenario.get(scenario_name)["id"] # add app page metrics for each scenario test_result_ids.append(__scenario_app_page_metric_id(scenario_id, scenarios[scenario_name])) # add core action metrics for each scenario test_result_ids.append(__core_actions_metric_id(scenario_id)) test_result_ids.append(__correctness_metric_id(scenario_id, scenario_name)) return test_result_ids
def scenariocmd(action, name): if not paths.suite_defined(): return # cannot run without project env defined import scenario import code if action == "validate": if name: scenario_cfg = scenario.get(name) code.validate(name, scenario_cfg['id']) else: scenario.show_scenarios() elif action == "update": scenario.update(name) else: print("""To validate scenario: oimp scenario validate [scenario_name] To update all scenarios: oimp scenario update To update a scenario: oimp scenario update [name]""")
def __correctness_metrics(scenario_name, metrics, scenario_config): scenario_id = scenario.get(scenario_name)["id"] metric_id = __correctness_metric_id(scenario_id, scenario_name) sub_metrics = metrics[metric_id] return sub_metrics
def __scenario_core_action_metrics(scenario_name, metrics, scenario_config): scenario_id = scenario.get(scenario_name)["id"] metric_id = __core_actions_metric_id(scenario_id) sub_metrics = metrics[metric_id] return sub_metrics
def __correctness_metrics(scenario_name, metrics, scenario_config): scenario_id = scenario.get(scenario_name)["id"] metric_id = __correctness_metric_id(scenario_id, scenario_name) sub_metrics = metrics[metric_id] return sub_metrics
def __scenario_core_action_metrics(scenario_name, metrics, scenario_config): scenario_id = scenario.get(scenario_name)["id"] metric_id = __core_actions_metric_id(scenario_id) sub_metrics = metrics[metric_id] return sub_metrics