def check_system(api):
    """Check if all system endpoints are available and that tokens are valid."""
    # try to access system endpoints
    log.info("System check")
    with log.indent():
        check_api_endpoint(api)
        check_auth_token(api)
Exemple #2
0
def component_analysis_benchmark(queue, threads, component_analysis,
                                 thread_count, python_payload, maven_payload,
                                 npm_payload):
    """Component analysis benchmark."""
    g_python = ComponentGenerator().generator_for_ecosystem("pypi")
    g_maven = ComponentGenerator().generator_for_ecosystem("maven")
    g_npm = ComponentGenerator().generator_for_ecosystem("npm")
    generators = []

    if python_payload:
        generators.append(g_python)
    if maven_payload:
        generators.append(g_maven)
    if npm_payload:
        generators.append(g_npm)

    # don't start the generators from the 1st item
    for i in range(randint(10, 100)):
        for g in generators:
            next(g)

    for t in range(thread_count):
        g = generators[randint(0, len(generators) - 1)]
        ecosystem, component, version = next(g)
        with log.indent():
            log.info("Component analysis for E/P/V {} {} {}".format(
                ecosystem, component, version))
        t = Thread(target=component_analysis.start,
                   args=(t, ecosystem, component, version, queue))
        t.start()
        threads.append(t)
        # skip some items
        for i in range(randint(5, 25)):
            next(g)
def main():
    """Entry point to the QA Dashboard."""
    log.setLevel(log.INFO)

    log.info("Started")

    with log.indent():
        log.info("Setup")
        config = Config()
        jenkins_url = config.get_jenkins_url()
        master_build_url = jenkins_url + config.get_master_build_job()
        log.success("Setup done")

    last_processed_build = read_last_processed()

    log.info("Last processed build: {n}".format(n=last_processed_build))

    last_build, last_build_status, total_builds_cnt, success_builds_cnt = \
        read_build_history(master_build_url)

    if last_build > last_processed_build:
        log.info("New build(s) detected!")
        with log.indent():
            process_new_build(config, last_build, last_build_status,
                              jenkins_url, master_build_url)

        write_last_processed(last_build)
    else:
        log.info("No new build(s) detected...")
def read_build_history(job_url):
    """Read total number of remembered builds and succeeded builds as well."""
    log.info("Read build history")
    with log.indent():
        api_query = jenkins_api_query_build_statuses(job_url)
        log.info(api_query)
        response = requests.get(api_query)
        builds = response.json()["builds"]

        last_build_info = builds[0]
        last_build = int(last_build_info["number"])
        last_build_status = last_build_info["result"]

        # None, True, False
        if last_build_status is not None:
            last_build_status = last_build_status == "SUCCESS"

        total_builds = get_total_builds(builds)
        success_builds = get_success_builds(builds)
        total_builds_cnt = len(total_builds)
        success_builds_cnt = len(success_builds)

        log_builds(last_build, total_builds_cnt, success_builds_cnt)

        log.success("Done")
    return last_build, last_build_status, total_builds_cnt, success_builds_cnt
 def __init__(self):
     """Read and parse the configuration file."""
     self.config = configparser.ConfigParser()
     with log.indent():
         log.info("Reading config file")
         self.config.read(Config.CONFIG_FILE_NAME)
         log.success("Done")
def log_improvements(repositories, results):
    """Log improvements in repositories."""
    with log.indent():
        for repository in repositories:
            log.info("{repository} : {improvement}".format(
                repository=repository,
                improvement=results.improvement[repository]))
Exemple #7
0
def generate_dashboard(results, ignored_files_for_pylint,
                       ignored_files_for_pydocstyle):
    """Generate all pages with the dashboard and detailed information as well."""
    log.info("Generating output")

    with log.indent():
        log.info("Index page")
        generate_index_page(results)
        log.success("Index page generated")

    with log.indent():
        log.info("Metrics page")
        generate_metrics_page(results)
        log.success("Metrics page generated")

    with log.indent():
        log.info("Details about repository")
        if results.code_quality_table_enabled:
            for repository in results.repositories:
                log.info(repository)
                generate_details_page_for_repository(
                    repository, results,
                    ignored_files_for_pylint.get(repository, []),
                    ignored_files_for_pydocstyle.get(repository, []))
                generate_charts_page_for_repository(repository, results)
        log.success("Details generated")
    log.success("Output generated")
Exemple #8
0
def retrieve_access_token(refresh_token, auth_service_url):
    """Retrieve temporary access token by using refresh/offline token."""
    log.info("Trying to retrieve access token")
    if refresh_token is None:
        log.error(
            "aborting: RECOMMENDER_REFRESH_TOKEN environment variable is not set"
        )
        return None
    if auth_service_url is None:
        log.error(
            "aborting: OSIO_AUTH_SERVICE environment variable is not set")
        return None

    payload = {'refresh_token': refresh_token}
    url = urljoin(auth_service_url, _AUTH_ENDPOINT)
    response = requests.post(url, json=payload)

    assert response is not None and response.ok, "Error communicating with the OSIO AUTH service"
    data = response.json()

    # check the basic structure of the response
    token_structure = get_and_check_token_structure(data)

    log.info("Token seems to be correct")

    # seems like everything's ok, let's read the temporary access token
    return token_structure["access_token"]
def prepare_data_for_repositories(repositories, results, ci_jobs, job_statuses,
                                  clone_repositories_enabled,
                                  cleanup_repositories_enabled,
                                  code_quality_table_enabled,
                                  ci_jobs_table_enabled,
                                  code_coverage_threshold):
    """Perform clone/fetch repositories + run pylint + run docstyle script + accumulate results."""
    log.info("Preparing data for QA Dashboard")
    with log.indent():
        for repository in repositories:
            log.info("Repository " + repository)

            # clone or fetch the repository, but only if the cloning/fetching
            # is not disabled via CLI arguments
            if clone_repositories_enabled:
                clone_or_fetch_repository(repository)

            if code_quality_table_enabled:
                run_pylint(repository)
                run_docstyle_check(repository)
                run_cyclomatic_complexity_tool(repository)
                run_maintainability_index(repository)
                run_dead_code_detector(repository)
                run_common_errors_detector(repository)

                results.source_files[repository] = get_source_files(repository)
                results.repo_linter_checks[repository] = parse_pylint_results(
                    repository)
                results.repo_docstyle_checks[
                    repository] = parse_docstyle_results(repository)
                results.repo_cyclomatic_complexity[repository] = \
                    parse_cyclomatic_complexity(repository)
                results.repo_maintainability_index[repository] = \
                    parse_maintainability_index(repository)
                results.dead_code[repository] = parse_dead_code(repository)
                results.common_errors[repository] = parse_common_errors(
                    repository)

            # delete_work_files(repository)

            if cleanup_repositories_enabled:
                cleanup_repository(repository)

            if ci_jobs_table_enabled:
                for job_type in ci_job_types:
                    url = ci_jobs.get_job_url(repository, job_type)
                    name = ci_jobs.get_job_name(repository, job_type)
                    badge = ci_jobs.get_job_badge(repository, job_type)
                    job_status = job_statuses.get(name)
                    results.ci_jobs_links[repository][job_type] = url
                    results.ci_jobs_badges[repository][job_type] = badge
                    results.ci_jobs_statuses[repository][job_type] = job_status
                results.unit_test_coverage[
                    repository] = read_unit_test_coverage(ci_jobs, repository)
            if code_quality_table_enabled:
                update_overall_status(results, repository,
                                      code_coverage_threshold)

    log.success("Data prepared")
Exemple #10
0
def generate_quality_labels(results):
    """Generate quality labels for all repositories."""
    with log.indent():
        log.info("Generate quality labels")
        for repository in results.repositories:
            log.info(repository)
            generate_quality_label_for_repository(repository, results)
        log.success("Quality labels generated")
Exemple #11
0
def generate_details_pages(results, ignored_files_for_pylint,
                           ignored_files_for_pydocstyle):
    """Generate all details pages."""
    for repository in results.repositories:
        log.info(repository)
        generate_details_page_for_repository(
            repository, results, ignored_files_for_pylint.get(repository, []),
            ignored_files_for_pydocstyle.get(repository, []))
        generate_charts_page_for_repository(repository, results)
Exemple #12
0
def run_common_errors_detector(repository):
    """Run common issues detector tool against the selected repository."""
    with log.indent():
        log.info("Running common issues detector for the repository " + repository)
        command = ("pushd repositories/{repo} >> /dev/null;" +
                   "./detect-common-errors.sh > ../../{repo}.common_errors.txt;" +
                   "popd >> /dev/null").format(repo=repository)
        os.system(command)
        log.success("Done")
def check_auth_token(api):
    """Check the authorization token for the core API."""
    log.info("Checking: authorization token for the core API")
    with log.indent():
        if api.check_auth_token_validity():
            log.success("ok")
        else:
            log.error("Fatal: wrong token(?)")
            sys.exit(1)
Exemple #14
0
def run_pylint(repository):
    """Run Pylint checker against the selected repository."""
    with log.indent():
        log.info("Running Pylint for the repository " + repository)
        command = ("pushd repositories/{repo} >> /dev/null;" +
                   "./run-linter.sh > ../../{repo}.linter.txt;" +
                   "popd >> /dev/null").format(repo=repository)
        os.system(command)
        log.success("Done")
def check_api_endpoint(api):
    """Check that some API endpoint is callable."""
    log.info("Checking: core API endpoint")
    with log.indent():
        if not api.is_api_running():
            log.error("Fatal: tested system is not available")
            sys.exit(1)
        else:
            log.success("ok")
def run_all_loaded_tests(cfg, fuzzer_settings, tests, results):
    """Run all tests read from CSV file."""
    i = 1
    for test in tests:
        log.info("Starting test #{n} with name '{desc}'".format(
            n=i, desc=test["Name"]))
        with log.indent():
            run_test(cfg, fuzzer_settings, test, results)
        i += 1
def read_job_statuses(ci_jobs, ci_jobs_table_enabled, liveness_table_enabled):
    """Read job statuses from the CI, but only if its necessary."""
    log.info("Read job statuses")
    if ci_jobs_table_enabled or liveness_table_enabled:
        log.success("Done")
        return read_ci_jobs_statuses(JENKINS_URL)
    else:
        log.warning("Disabled")
        return None
Exemple #18
0
def run_dead_code_detector(repository):
    """Run dead code detector tool against the selected repository."""
    with log.indent():
        log.info("Running dead code detector for the repository " + repository)
        command = ("pushd repositories/{repo} >> /dev/null;" +
                   "./detect-dead-code.sh > ../../{repo}.dead_code.txt;" +
                   "popd >> /dev/null").format(repo=repository)
        os.system(command)
        log.success("Done")
def generate_reports(tests, results, cfg, total_time):
    """Generate reports with all BAF tests."""
    log.info("Generate reports")
    with log.indent():
        # cfg contains information whether to generate HTML, CSV, TSV etc. outputs
        generate_text_report_if_enabled(cfg, results)
        generate_html_report_if_enabled(cfg, results, tests, total_time)
        generate_csv_report_if_enabled(cfg, results)
        generate_tsv_report_if_enabled(cfg, results)
        generate_xml_report_if_enabled(cfg, results)
def check_environment_variable(env_var_name):
    """Check if the given environment variable exists."""
    log.info("Checking: {e} environment variable existence".format(
        e=env_var_name))
    if env_var_name not in os.environ:
        log.failure("Fatal: {e} environment variable has to be specified"
                    .format(e=env_var_name))
        sys.exit(1)
    else:
        log.success("ok")
Exemple #21
0
def run_docstyle_check(repository):
    """Run PyDocsStyle checker against the selected repository."""
    with log.indent():
        log.info("Running DocStyle checker for the repository " + repository)
        command = ("pushd repositories/{repo} >> /dev/null;" +
                   "./check-docstyle.sh > ../../{repo}.pydocstyle.txt;" +
                   "popd >> /dev/null").format(
            repo=repository)
        os.system(command)
        log.success("Done")
def read_fuzzer_settings(filename):
    """Read fuzzer settings from the CSV file."""
    log.info("Read fuzzer settings")
    with log.indent():
        fuzzer_settings = read_csv_as_dicts(filename)
        if len(fuzzer_settings) == 1:
            log.success("Loaded 1 setting")
        else:
            log.success("Loaded {n} settings".format(n=len(fuzzer_settings)))
    return fuzzer_settings
def start_tests(cfg, fuzzer_settings, tests, results, tags):
    """Start all tests using the already loaded configuration and fuzzer settings."""
    log.info("Run tests")
    with log.indent():
        if not tests or len(tests) == 0:
            log.error("No tests loaded!")
            sys.exit(-1)
        log_tests_loaded(tests)
        if not tags:
            run_all_loaded_tests(cfg, fuzzer_settings, tests, results)
        else:
            run_tests_with_tags(cfg, fuzzer_settings, tests, results, tags)
Exemple #24
0
def run_tests_with_tags(cfg, tests, tags, component_analysis, stack_analysis):
    """Run tests read from CSV file that are marged by any of tags provided in tags parameter."""
    i = 1
    for test in tests:
        test_tags = parse_tags(test["Tags"])
        test_name = test["Name"]
        if tags <= test_tags:
            run_test(cfg, test, i, component_analysis, stack_analysis)
            i += 1
        else:
            log.info("Skipping test #{n} with name '{desc}'".format(
                n=i, desc=test_name))
Exemple #25
0
def time_snap(text=None):
    """
    Prints the time since the last call to this function in seconds.
    It is possible to supply a message to print along with the time.

    Arguments:
        text {str} (optional) -- Message to print with the time.
    """
    global _last_time
    current_time = time()
    # TODO: This should probably be log.debug() instead.
    log.info(f"{current_time - _last_time} seconds - {text}")
    _last_time = current_time
def get_source_files(repository):
    """Find all source files in the selected repository."""
    log.info("Getting source files")
    command = (
        "pushd repositories/{repo} > /dev/null; " +
        r"wc -l `find . -path ./venv -prune -o \( -name '*.py' -o -name '*.java' -o "
        + r"-name '*.ts' \) " + "-print` | head -n -1 > ../../{repo}.count; " +
        "popd > /dev/null").format(repo=repository)
    os.system(command)
    filenames = []
    line_counts = {}
    total_lines = 0
    count = 0
    extensions = set()
    files_per_extension = {}

    with log.indent():
        with open("{repo}.count".format(repo=repository)) as fin:
            for line in fin:
                with log.indent():
                    log.debug(line)
                count += 1
                line_count, filename = parse_line_count(line)
                extension = get_file_extension(filename)

                # register possibly new extension
                extensions.add(extension)

                # update file count for such extension
                files_per_extension[extension] = files_per_extension.get(
                    extension, 0) + 1

                # register file name + line count
                filenames.append(filename)
                line_counts[filename] = line_count
                total_lines += line_count

        log.debug("Files: {files}".format(files=count))
        log.debug("Lines: {lines}".format(lines=total_lines))

    log.success("Done")

    return {
        "count": count,
        "filenames": filenames,
        "extensions": extensions,
        "files_per_extension": files_per_extension,
        "line_counts": line_counts,
        "total_lines": total_lines
    }
def run_tests_with_changed_items(url, http_method, dry_run, original_payload,
                                 cfg, expected_status, test, results):
    """Run tests with items changed from the original payload."""
    with log.indent():
        iteration = 1
        for how_many in range(1, 1 + len(original_payload)):
            # TODO: make it configurable
            for i in range(1, 5):
                with log.indent():
                    log.info("Iteration #{n}".format(n=iteration))
                    run_tests_with_changed_items_one_iteration(
                        url, http_method, dry_run, original_payload, cfg,
                        expected_status, how_many, test, results)
                    iteration += 1
def run_all_setup_tests(remove_items, add_items, change_types, mutate_payload,
                        url, http_method, dry_run, original_payload, cfg,
                        expected_status, test, results):
    """Run all tests that has been setup."""
    if remove_items:
        log.info("Run tests with items removed from original payload")
        run_tests_with_removed_items(url, http_method, dry_run,
                                     original_payload, cfg, expected_status,
                                     test, results)

    if add_items:
        log.info("Run tests with items added into the original payload")
        run_tests_with_added_items(url, http_method, dry_run, original_payload,
                                   cfg, expected_status, test, results)

    if change_types:
        log.info("Run tests with items changed from original payload")
        run_tests_with_changed_items(url, http_method, dry_run,
                                     original_payload, cfg, expected_status,
                                     test, results)

    if mutate_payload:
        log.info("Run tests with items mutated")
        run_tests_with_mutated_items(url, http_method, dry_run,
                                     original_payload, cfg, expected_status,
                                     test, results)
    def wait_for_stack_analysis(self, ecosystem, manifest, job_id, thread_id=""):
        """Wait for the stack analysis to finish."""
        endpoint = self.analysis_url() + "/" + job_id

        timeout = DEFAULT_TIMEOUT
        sleep_amount = DEFAULT_SLEEP_AMOUNT
        too_many_requests_cnt = 0

        for _ in range(timeout // sleep_amount):
            response = self.perform_get_request(endpoint)
            status_code = response.status_code
            log.info("thread# {t}  job# {j}  status code: {s}".format(
                t=thread_id, j=job_id, s=status_code))

            if status_code == 200:
                self.dump_response_if_enabled(ecosystem, manifest, response)
                return response
            # 401 code should be checked later
            elif status_code == 401:
                log.info("WARNING: got 401")
                return response
            elif status_code == 500 or status_code == 504:
                log.info("WARNING: got {c}".format(c=status_code))
            elif status_code == 429:
                too_many_requests_cnt += 1
                log.info("Additional sleep...")
                sleep(sleep_amount)
                if too_many_requests_cnt > 10:
                    raise Exception('429 Too Many Requests')
            elif status_code != 202:
                # print("warning, got wrong status code {c}".format(c=status_code))
                raise Exception('Bad HTTP status code {c}'.format(c=status_code))
            sleep(sleep_amount)
        else:
            raise Exception('Timeout waiting for the stack analysis results')
def get_access_token(dry_run, refresh_token, license_service_url):
    """Get the access token if possible."""
    if not dry_run:
        log.info("Auth. token generation")
        with log.indent():
            # we can retrieve access token by using refresh/offline token
            access_token = retrieve_access_token(refresh_token,
                                                 license_service_url)
            if access_token is None:
                sys.exit(-1)
            log.success("Success")
    else:
        access_token = None
    return access_token
def read_unit_test_coverage(ci_jobs, repository):
    """Read and process unit test coverage."""
    log.info("Reading unit test coverage")
    url = ci_jobs.get_console_output_url(repository)
    report_type = None
    if url is not None:
        response = requests.get(url)
        if response.status_code == 200:
            content = response.text.split("\n")
            unit_test_output = []
            for line in content:
                line = line.strip()
                # check where the test coverage begins
                if line_with_unit_test_header(line):
                    log_report_type("pycov")
                    report_type = "pycov"
                    unit_test_output.append(line)
                elif line_with_jacoco_test_header(line):
                    log_report_type("jacoco")
                    report_type = "jacoco"
                    # not needed to write the header
                    # unit_test_output.append(line)
                # check where the test coverage ends
                elif line_with_unit_test_summary(line, report_type):
                    unit_test_output.append(line)
                    write_unit_test_coverage(unit_test_output, repository)
                    return parse_unit_test_statistic(line)
                # check where the test coverage ends
                elif line_with_jacoco_test_footer(line, report_type):
                    # not needed to write the footer
                    # unit_test_output.append(line)
                    write_unit_test_coverage_as_csv(unit_test_output, repository)
                    p = ProjectCoverageReport(repository + ".coverage.csv")
                    p.convert_code_coverage_report(repository + ".coverage.txt")
                    return compute_jacoco_test_statistic(p)
                # now we know we have something to report
                elif report_type:
                    unit_test_output.append(line)
    log.warning("No coverage report found")
    return None
def generate_dashboard(results, ignored_files_for_pylint, ignored_files_for_pydocstyle):
    """Generate all pages with the dashboard and detailed information as well."""
    log.info("Generating output")

    with log.indent():
        log.info("Index page")
        generate_index_page(results)
        log.success("Index page generated")

    with log.indent():
        log.info("Details about repository")
        if results.code_quality_table_enabled:
            for repository in results.repositories:
                log.info(repository)
                generate_details_page_for_repository(repository, results,
                                                     ignored_files_for_pylint.get(repository, []),
                                                     ignored_files_for_pydocstyle.get(repository,
                                                                                      []))
                generate_charts_page_for_repository(repository, results)
        log.success("Details generated")
    log.success("Output generated")
def log_report_type(report_type):
    """Display info which unit test report type has been detected."""
    with log.indent():
        log.info("{report_type} report detected".format(report_type=report_type))
def log_coverage(statements, missed, coverage):
    """Log info about the coverage read from reports."""
    with log.indent():
        log.info("statements {s}".format(s=statements))
        log.info("missed     {m}".format(m=missed))
        log.info("coverage   {c}".format(c=coverage))