def get_num_workers(env_var: str) -> int: env_num_workers = os.environ.get(env_var, "") if len(env_num_workers) > 0: try: env_num_workers_number = int(env_num_workers) except ValueError: sys.exit(error_s("env vairable '%s' is not an integer" % env_var)) if env_num_workers_number <= 0: sys.exit(error_s("env variable '%s' is not positive" % env_var)) return env_num_workers_number return multiprocessing.cpu_count()
def generate_web_view( *, test_title: str, master_log: Path, test_exec_path: Path, timer_path: Optional[Path], additional_info: Optional[List[str]], generate_to_dir: Path, ) -> Path: """ Params: * num_jobs: Number of parallel workers to generate view. * test_title: The title you want to display on the page's tab and head. * master_log: Path to the JSON log produced by the test runner. * test_exec_path: The working directory at which all test commands were run. * timer_path: Path to the timer program used by the test runner, if one is used. * additional_info: Information you want to display additionally (list of lines). If None, the info area isn't shown (different from an empty list). * generate_to_dir: Where to write the files produced by this generator. Returns: The path to the index.html file. Point the browser to this URL to view it. """ if generate_to_dir.exists(): # Remove the existing directory entirely, so that files from a previous # run won't affect the UI. shutil.rmtree(generate_to_dir) os.makedirs(generate_to_dir) with open(master_log, 'r') as f: try: task_result_list: List[dict] = json.load(f) if not isinstance(task_result_list, list): raise TypeError("log data ought to be a list, but found %s" % type(task_result_list).__name__) # In the log, the "id" key is shared by repeated tasks that # correspond to the same test. Sorting ensures: # 1. Tasks are sorted in ascending order alphabetically according # to the "id" key. # 2. Tasks that correspond to the same test are put together, in # ascending order according to the repeat count 1...k. compute_sort_key = lambda e: (TaskResGetter.test_id(e), TaskResGetter.repeat_count(e)) sorted_task_results = sorted(task_result_list, key=compute_sort_key) except (json.JSONDecodeError, KeyError, TypeError) as e: sys.exit( score_utils.error_s("currupted log file %s: %s" % (master_log, e))) return _generate_web_view_impl( sorted_task_results=sorted_task_results, test_title=test_title, master_log=master_log, test_exec_path=test_exec_path, timer_path=timer_path, additional_info=additional_info, generate_to_dir=generate_to_dir, )
def main(): parser = argparse.ArgumentParser( description="Static site generator for test results", epilog= "For requirements of the timer and log file: see score_run.py --docs") parser.add_argument("--title", type=str, default="Tests", help="title of tests, default: 'Tests'") parser.add_argument("--log", metavar="LOG", type=str, required=True, help="path to the master log, written by score_run.py") parser.add_argument("--timer", metavar="PROG", type=str, default=None, help="path to the timer program used to run the tests") parser.add_argument("--test-exec-path", metavar="PATH", type=str, default=Path.cwd(), help="working directory the tests ran at") parser.add_argument("--to-dir", metavar="NEW_PATH", type=str, default="html", help="directory to write results (if the directory " "already exits, it will be replaced), default: ./html") args = parser.parse_args() if not Path(args.log).is_file(): sys.exit(score_utils.error_s("file not found: %s" % args.log)) test_exec_path = score_utils.maybe_start_with_home_prefix( Path(args.test_exec_path)) html_file_path: Path = generate_web_view( test_title=args.title, master_log=Path(args.log), test_exec_path=test_exec_path, timer_path=Path(args.timer) if args.timer else None, additional_info=None, generate_to_dir=Path(args.to_dir), ) return 0
def maybe_parse_flakiness_decls_from_dir( dirpath: Optional[Path]) -> Dict[str, List[str]]: if (dirpath == None) or (not dirpath.is_dir()): return {} file_count: int = 0 flaky_tests_decls: FlakeDecls = {} errors: List[str] = [] for item in [ e for e in os.listdir(dirpath) if e.endswith(FLAKY_TEST_RECORD_FILE_SUFFIX) ]: path = Path(dirpath, item) if not path.is_file(): continue file_count += 1 with open(path, 'r') as f: _parse_file(f, path, flaky_tests_decls, errors) if len(errors) > 0: err_exit(error_s('\n'.join(errors))) if file_count == 0: sys.stderr.write("[Warning] no file is named *%s under %s\n" % (FLAKY_TEST_RECORD_FILE_SUFFIX, dirpath)) return flaky_tests_decls
) from pylibs.runner_common import ( Args, TaskMetadata, TaskResult, TaskExceptions, GOLDEN_NOT_WRITTEN_PREFIX, LOG_FILE_BASE, ) IS_ATTY = sys.stdin.isatty() and sys.stdout.isatty() TERMINAL_COLS = int(os.popen('stty size', 'r').read().split()[1]) if IS_ATTY else 70 if TERMINAL_COLS <= 25: score_utils.err_exit( score_utils.error_s("terminal width (%d) is rediculously small" % TERMINAL_COLS)) def cap_width(s: str, width: int = TERMINAL_COLS) -> str: extra_space = width - len(s) return s if extra_space >= 0 else (s[:12] + "..." + s[len(s) - width - 15:]) def get_error_summary(task_result: TaskResult) -> Dict[str, Optional[str]]: if task_result["exit"]["ok"] == False: # do not use str(dir(..)): have ugly 'u' prefixes for Unicode strings real_exit = task_result["exit"]["real"] exit_error = "{ type: %s, repr: %s }" % (real_exit["type"], real_exit["repr"]) else: