def read_build_history(job_url): """Read total number of remembered builds and succeeded builds as well.""" log.info("Read build history") with log.indent(): api_query = jenkins_api_query_build_statuses(job_url) log.info(api_query) response = requests.get(api_query) builds = response.json()["builds"] last_build_info = builds[0] last_build = int(last_build_info["number"]) last_build_status = last_build_info["result"] # None, True, False if last_build_status is not None: last_build_status = last_build_status == "SUCCESS" total_builds = get_total_builds(builds) success_builds = get_success_builds(builds) total_builds_cnt = len(total_builds) success_builds_cnt = len(success_builds) log_builds(last_build, total_builds_cnt, success_builds_cnt) log.success("Done") return last_build, last_build_status, total_builds_cnt, success_builds_cnt
def perform_test(url, http_method, dry_run, payload, cfg, expected_status, test, results): """Call the selected REST API with the autogenerated payload.""" # pprint(payload) if dry_run: log.info("(dry run)") results.add_test_result(test, url, TestResult.DRY_RUN) else: if http_method == "POST": log.info("POSTing data") response = send_payload(url, payload, cfg["access_token"]) status_code = response.status_code log.info("HTTP status code {code}".format(code=status_code)) if status_code == expected_status: log.success("Success") results.add_test_result(test, url, TestResult.SUCCESS, status_code=status_code, payload=payload) else: log.error("Fail") results.add_test_result(test, url, TestResult.FAILURE, status_code=status_code, payload=payload)
def main(): """Entry point to the QA Dashboard.""" log.setLevel(log.INFO) log.info("Started") with log.indent(): log.info("Setup") config = Config() jenkins_url = config.get_jenkins_url() master_build_url = jenkins_url + config.get_master_build_job() log.success("Setup done") last_processed_build = read_last_processed() log.info("Last processed build: {n}".format(n=last_processed_build)) last_build, last_build_status, total_builds_cnt, success_builds_cnt = \ read_build_history(master_build_url) if last_build > last_processed_build: log.info("New build(s) detected!") with log.indent(): process_new_build(config, last_build, last_build_status, jenkins_url, master_build_url) write_last_processed(last_build) else: log.info("No new build(s) detected...")
def __init__(self): """Read and parse the configuration file.""" self.config = configparser.ConfigParser() with log.indent(): log.info("Reading config file") self.config.read(Config.CONFIG_FILE_NAME) log.success("Done")
def prepare_data_for_repositories(repositories, results, ci_jobs, job_statuses, clone_repositories_enabled, cleanup_repositories_enabled, code_quality_table_enabled, ci_jobs_table_enabled, code_coverage_threshold): """Perform clone/fetch repositories + run pylint + run docstyle script + accumulate results.""" log.info("Preparing data for QA Dashboard") with log.indent(): for repository in repositories: log.info("Repository " + repository) # clone or fetch the repository, but only if the cloning/fetching # is not disabled via CLI arguments if clone_repositories_enabled: clone_or_fetch_repository(repository) if code_quality_table_enabled: run_pylint(repository) run_docstyle_check(repository) run_cyclomatic_complexity_tool(repository) run_maintainability_index(repository) run_dead_code_detector(repository) run_common_errors_detector(repository) results.source_files[repository] = get_source_files(repository) results.repo_linter_checks[repository] = parse_pylint_results( repository) results.repo_docstyle_checks[ repository] = parse_docstyle_results(repository) results.repo_cyclomatic_complexity[repository] = \ parse_cyclomatic_complexity(repository) results.repo_maintainability_index[repository] = \ parse_maintainability_index(repository) results.dead_code[repository] = parse_dead_code(repository) results.common_errors[repository] = parse_common_errors( repository) # delete_work_files(repository) if cleanup_repositories_enabled: cleanup_repository(repository) if ci_jobs_table_enabled: for job_type in ci_job_types: url = ci_jobs.get_job_url(repository, job_type) name = ci_jobs.get_job_name(repository, job_type) badge = ci_jobs.get_job_badge(repository, job_type) job_status = job_statuses.get(name) results.ci_jobs_links[repository][job_type] = url results.ci_jobs_badges[repository][job_type] = badge results.ci_jobs_statuses[repository][job_type] = job_status results.unit_test_coverage[ repository] = read_unit_test_coverage(ci_jobs, repository) if code_quality_table_enabled: update_overall_status(results, repository, code_coverage_threshold) log.success("Data prepared")
def generate_quality_labels(results): """Generate quality labels for all repositories.""" with log.indent(): log.info("Generate quality labels") for repository in results.repositories: log.info(repository) generate_quality_label_for_repository(repository, results) log.success("Quality labels generated")
def run_common_errors_detector(repository): """Run common issues detector tool against the selected repository.""" with log.indent(): log.info("Running common issues detector for the repository " + repository) command = ("pushd repositories/{repo} >> /dev/null;" + "./detect-common-errors.sh > ../../{repo}.common_errors.txt;" + "popd >> /dev/null").format(repo=repository) os.system(command) log.success("Done")
def read_job_statuses(ci_jobs, ci_jobs_table_enabled, liveness_table_enabled): """Read job statuses from the CI, but only if its necessary.""" log.info("Read job statuses") if ci_jobs_table_enabled or liveness_table_enabled: log.success("Done") return read_ci_jobs_statuses(JENKINS_URL) else: log.warning("Disabled") return None
def check_api_endpoint(api): """Check that some API endpoint is callable.""" log.info("Checking: core API endpoint") with log.indent(): if not api.is_api_running(): log.error("Fatal: tested system is not available") sys.exit(1) else: log.success("ok")
def check_auth_token(api): """Check the authorization token for the core API.""" log.info("Checking: authorization token for the core API") with log.indent(): if api.check_auth_token_validity(): log.success("ok") else: log.error("Fatal: wrong token(?)") sys.exit(1)
def run_pylint(repository): """Run Pylint checker against the selected repository.""" with log.indent(): log.info("Running Pylint for the repository " + repository) command = ("pushd repositories/{repo} >> /dev/null;" + "./run-linter.sh > ../../{repo}.linter.txt;" + "popd >> /dev/null").format(repo=repository) os.system(command) log.success("Done")
def run_dead_code_detector(repository): """Run dead code detector tool against the selected repository.""" with log.indent(): log.info("Running dead code detector for the repository " + repository) command = ("pushd repositories/{repo} >> /dev/null;" + "./detect-dead-code.sh > ../../{repo}.dead_code.txt;" + "popd >> /dev/null").format(repo=repository) os.system(command) log.success("Done")
def check_environment_variable(env_var_name): """Check if the given environment variable exists.""" log.info("Checking: {e} environment variable existence".format( e=env_var_name)) if env_var_name not in os.environ: log.failure("Fatal: {e} environment variable has to be specified" .format(e=env_var_name)) sys.exit(1) else: log.success("ok")
def run_docstyle_check(repository): """Run PyDocsStyle checker against the selected repository.""" with log.indent(): log.info("Running DocStyle checker for the repository " + repository) command = ("pushd repositories/{repo} >> /dev/null;" + "./check-docstyle.sh > ../../{repo}.pydocstyle.txt;" + "popd >> /dev/null").format( repo=repository) os.system(command) log.success("Done")
def read_fuzzer_settings(filename): """Read fuzzer settings from the CSV file.""" log.info("Read fuzzer settings") with log.indent(): fuzzer_settings = read_csv_as_dicts(filename) if len(fuzzer_settings) == 1: log.success("Loaded 1 setting") else: log.success("Loaded {n} settings".format(n=len(fuzzer_settings))) return fuzzer_settings
def missing_api_token_warning(env_var_name): """Print warning or basic info about missing API token.""" if os.environ.get(env_var_name): log.success("OK: {name} environment is set and will be used as " "authorization token".format(name=env_var_name)) else: log.warning( "Warning: the {name} environment variable is not" " set.\n" "Most tests that require authorization will probably fail".format( name=env_var_name))
def generate_dashboard(results, ignored_files_for_pylint, ignored_files_for_pydocstyle): """Generate all pages with the dashboard and detailed information as well.""" log.info("Generating output") with log.indent(): log.info("Index page") generate_index_page(results) log.success("Index page generated") with log.indent(): log.info("Metrics page") generate_metrics_page(results) log.success("Metrics page generated") with log.indent(): log.info("Details about repository") if results.code_quality_table_enabled: for repository in results.repositories: log.info(repository) generate_details_page_for_repository( repository, results, ignored_files_for_pylint.get(repository, []), ignored_files_for_pydocstyle.get(repository, [])) generate_charts_page_for_repository(repository, results) log.success("Details generated") log.success("Output generated")
def get_access_token(dry_run, refresh_token, license_service_url): """Get the access token if possible.""" if not dry_run: log.info("Auth. token generation") with log.indent(): # we can retrieve access token by using refresh/offline token access_token = retrieve_access_token(refresh_token, license_service_url) if access_token is None: sys.exit(-1) log.success("Success") else: access_token = None return access_token
def get_source_files(repository): """Find all source files in the selected repository.""" log.info("Getting source files") command = ( "pushd repositories/{repo} > /dev/null; " + r"wc -l `find . -path ./venv -prune -o \( -name '*.py' -o -name '*.java' -o " + r"-name '*.ts' \) " + "-print` | head -n -1 > ../../{repo}.count; " + "popd > /dev/null").format(repo=repository) os.system(command) filenames = [] line_counts = {} total_lines = 0 count = 0 extensions = set() files_per_extension = {} with log.indent(): with open("{repo}.count".format(repo=repository)) as fin: for line in fin: with log.indent(): log.debug(line) count += 1 line_count, filename = parse_line_count(line) extension = get_file_extension(filename) # register possibly new extension extensions.add(extension) # update file count for such extension files_per_extension[extension] = files_per_extension.get( extension, 0) + 1 # register file name + line count filenames.append(filename) line_counts[filename] = line_count total_lines += line_count log.debug("Files: {files}".format(files=count)) log.debug("Lines: {lines}".format(lines=total_lines)) log.success("Done") return { "count": count, "filenames": filenames, "extensions": extensions, "files_per_extension": files_per_extension, "line_counts": line_counts, "total_lines": total_lines }
def setup(cli_arguments): """Perform BAF setup.""" log.info("Setup") refresh_token = None api_token = None with log.indent(): input_file = cli_arguments.input or DEFAULT_INPUT_FILE dry_run = cli_arguments.dry tags = parse_tags(cli_arguments.tags) if not dry_run: check_api_tokens_presence() license_service_url = read_url_from_env_var("OSIO_AUTH_SERVICE") refresh_token = os.environ.get("RECOMMENDER_REFRESH_TOKEN") api_token = os.environ.get("RECOMMENDER_API_TOKEN") user_key = os.environ.get("USER_KEY") if user_key is None and not license_service_url: log.error("OSIO_AUTH_SERVICE is not set") sys.exit(-1) else: license_service_url = "N/A" log.info("Dry run: " + enabled_disabled(dry_run)) log.info("Input file: " + input_file) log.info("Auth service URL: " + license_service_url) log.info("Run tests: " + tags_as_str(tags)) log.info("Refresh token: " + refresh_token_as_str(refresh_token)) log.info("User key: " + user_key_as_str(user_key)) log.success("Success") if api_token is not None: access_token = api_token elif user_key is None: access_token = get_access_token(dry_run, refresh_token, license_service_url) else: access_token = None return { "access_token": access_token, "user_key": user_key, "tags": tags, "dry_run": dry_run, "input_file": input_file }
def start_tests(cfg, tests, tags, component_analysis, stack_analysis): """Start all tests using the already loaded configuration.""" log.info("Run tests") with log.indent(): if no_tests(tests): log.error("No tests loaded!") sys.exit(-1) if len(tests) == 1: log.success("Loaded 1 test") else: log.success("Loaded {n} tests".format(n=len(tests))) if not tags: run_all_loaded_tests(cfg, tests, component_analysis, stack_analysis) else: run_tests_with_tags(cfg, tests, tags, component_analysis, stack_analysis)
def run_maintainability_index(repository): """Run Maintainability Index tool against the selected repository.""" with log.indent(): log.info("Running maintainability index checker for the repository " + repository) for i in range(ord('A'), 1 + ord('C')): rank = chr(i) command = ("pushd repositories/{repo} >> /dev/null;" + "radon mi -s -n {rank} -i venv . | ansi2html.py " + "> ../../{repo}.mi.{rank}.html;" + "popd >> /dev/null").format(repo=repository, rank=rank) os.system(command) command = ("pushd repositories/{repo} >> /dev/null;" + "radon mi -s -j -i venv . > ../../{repo}.mi.json;popd >> /dev/null"). \ format(repo=repository) os.system(command) log.success("Done")
def run_cyclomatic_complexity_tool(repository): """Run Cyclomatic Complexity tool against the selected repository.""" with log.indent(): log.info("Running cyclomatic complexity checker for the repository " + repository) for i in range(ord('A'), 1 + ord('F')): rank = chr(i) command = ("pushd repositories/{repo} >> /dev/null;" + "radon cc -a -s -n {rank} -i venv . |ansi2html.py > " + "../../{repo}.cc.{rank}.html;" + "popd >> /dev/null").format(repo=repository, rank=rank) os.system(command) command = ("pushd repositories/{repo} >> /dev/null;" + "radon cc -s -j -i venv . > ../../{repo}.cc.json;" + "popd >> /dev/null").format(repo=repository) os.system(command) log.success("Done")
def production_smoketests_status(ci_jobs): """Read total number of remembered builds and succeeded builds as well.""" log.info("Read smoketests status") job_url = ci_jobs.get_job_url("production", "smoketests") api_query = jenkins_api_query_build_statuses(job_url) response = requests.get(api_query) builds = response.json()["builds"] total_builds = [b for b in builds if b["result"] is not None] success_builds = [b for b in builds if b["result"] == "SUCCESS"] total_builds_cnt = len(total_builds) success_builds_cnt = len(success_builds) with log.indent(): log.info("Total builds: {n}".format(n=total_builds_cnt)) log.info("Success builds: {n}".format(n=success_builds_cnt)) log.success("Done") return total_builds_cnt, success_builds_cnt
def main(): """Entry point to the job checker.""" log.setLevel(log.INFO) log.info("Setup") with log.indent(): config = Config() repositories = Repositories(config) log.success("Setup done") ci_jobs = CIJobs() job_statuses = read_job_statuses(ci_jobs) with log.indent(): for repository in repositories.repolist: run_checker_for_repository(ci_jobs, config, job_statuses, repository) log.success("Data prepared")
def main(): """Entry point to the QA Dashboard.""" log.setLevel(log.INFO) log.info("Setup") with log.indent(): config = Config() repositories = Repositories(config) log.success("Setup done") ci_jobs = CIJobs() job_statuses = read_job_statuses(ci_jobs) with log.indent(): for repository in repositories.repolist: for job_type in ci_job_types: with log.indent(): url = ci_jobs.get_job_url(repository, job_type) name = ci_jobs.get_job_name(repository, job_type) badge = ci_jobs.get_job_badge(repository, job_type) job_status = job_statuses.get(name) if url is not None: api_query = jenkins_api_query_build_statuses(url) response = requests.get(api_query) builds = response.json()["builds"] failures = 0 for b in builds: if b["result"] != "SUCCESS": failures += 1 else: break if failures >= FAILURE_THRESHOLD: print("Repository: {}".format(repository)) print("URL to job: {}".format(url)) print("Failures: {}".format(failures)) print() log.success("Data prepared")
def get_source_files(repository): """Find all source files in the selected repository.""" log.info("Getting source files") command = ( "pushd repositories/{repo} > /dev/null; " + r"wc -l `find . -path ./venv -prune -o \( -name '*.py' -o -name '*.java' \) " + "-print` | head -n -1 > ../../{repo}.count; " + "popd > /dev/null").format(repo=repository) os.system(command) filenames = [] line_counts = {} total_lines = 0 count = 0 with log.indent(): with open("{repo}.count".format(repo=repository)) as fin: for line in fin: with log.indent(): log.debug(line) count += 1 line_count, filename = parse_line_count(line) filenames.append(filename) line_counts[filename] = line_count total_lines += line_count log.debug("Files: {files}".format(files=count)) log.debug("Lines: {lines}".format(lines=total_lines)) log.success("Done") return { "count": count, "filenames": filenames, "line_counts": line_counts, "total_lines": total_lines }
def main(): """Entry point to the CC reporter.""" log.setLevel(log.INFO) log.info("Config") with log.indent(): config = Config() results = Results() repositories = Repositories(config) log.success("Done") log.info("Prepare data for repositories") with log.indent(): prepare_data_for_repositories(repositories.repolist, results, config) log.success("Done") log.info("Generate coverage pages") with log.indent(): generate_coverage_pages(results) log.success("Done")
def generate_dashboard(results, ignored_files_for_pylint, ignored_files_for_pydocstyle): """Generate all pages with the dashboard and detailed information as well.""" log.info("Generating output") with log.indent(): log.info("Index page") generate_index_page(results) log.success("Index page generated") with log.indent(): log.info("Details about repository") if results.code_quality_table_enabled: for repository in results.repositories: log.info(repository) generate_details_page_for_repository(repository, results, ignored_files_for_pylint.get(repository, []), ignored_files_for_pydocstyle.get(repository, [])) generate_charts_page_for_repository(repository, results) log.success("Details generated") log.success("Output generated")
def wait_for_all_threads(threads): """Wait for all threads to finish.""" log.info("Waiting for all threads to finish") for t in threads: t.join() log.success("Done")
# unpack the pakfile lump from BSP zip_header, zip_length = struct.unpack( BSP_LUMP_DESC, file_bytes[BSP_OFFSET_TO_PAKFILE_LUMP:BSP_OFFSET_TO_PAKFILE_LUMP + 8], ) log.info("pakfile offset: 0x%04X", zip_header) log.info("pakfile length: 0x%04X", zip_length) # extract the pack zip to a temporary zip file log.info("extracting zip...") temp_zip_path = os.path.join(abs_path, "temp.zip") temp_zip = open(temp_zip_path, "wb+") temp_zip.write(file_bytes[zip_header:]) temp_zip.seek(0, os.SEEK_SET) log.success("extracted to temp.zip") # parse and print out the contents of the zip as it exists log.info("printout of current zip contents: ") zip_file = zipfile.ZipFile(temp_zip, mode="a") zip_file.printdir() # create a zipinfo structure for our evil file template_zinfo = zip_file.infolist()[0] zinfo = zipfile.ZipInfo(args.file_name, date_time=template_zinfo.date_time) zinfo.compress_type = template_zinfo.compress_type zinfo._compresslevel = template_zinfo._compresslevel zinfo.external_attr = template_zinfo.external_attr # filled in automatically zinfo.file_size = 0 zinfo.compress_size = 0