def main():
    """Entry point to the QA Dashboard."""
    log.setLevel(log.INFO)

    log.info("Started")

    with log.indent():
        log.info("Setup")
        config = Config()
        jenkins_url = config.get_jenkins_url()
        master_build_url = jenkins_url + config.get_master_build_job()
        log.success("Setup done")

    last_processed_build = read_last_processed()

    log.info("Last processed build: {n}".format(n=last_processed_build))

    last_build, last_build_status, total_builds_cnt, success_builds_cnt = \
        read_build_history(master_build_url)

    if last_build > last_processed_build:
        log.info("New build(s) detected!")
        with log.indent():
            process_new_build(config, last_build, last_build_status,
                              jenkins_url, master_build_url)

        write_last_processed(last_build)
    else:
        log.info("No new build(s) detected...")
Exemplo n.º 2
0
def generate_dashboard(results, ignored_files_for_pylint,
                       ignored_files_for_pydocstyle):
    """Generate all pages with the dashboard and detailed information as well."""
    log.info("Generating output")

    with log.indent():
        log.info("Index page")
        generate_index_page(results)
        log.success("Index page generated")

    with log.indent():
        log.info("Metrics page")
        generate_metrics_page(results)
        log.success("Metrics page generated")

    with log.indent():
        log.info("Details about repository")
        if results.code_quality_table_enabled:
            for repository in results.repositories:
                log.info(repository)
                generate_details_page_for_repository(
                    repository, results,
                    ignored_files_for_pylint.get(repository, []),
                    ignored_files_for_pydocstyle.get(repository, []))
                generate_charts_page_for_repository(repository, results)
        log.success("Details generated")
    log.success("Output generated")
def run_tests_with_changed_items(url, http_method, dry_run, original_payload,
                                 cfg, expected_status, test, results):
    """Run tests with items changed from the original payload."""
    with log.indent():
        iteration = 1
        for how_many in range(1, 1 + len(original_payload)):
            # TODO: make it configurable
            for i in range(1, 5):
                with log.indent():
                    log.info("Iteration #{n}".format(n=iteration))
                    run_tests_with_changed_items_one_iteration(
                        url, http_method, dry_run, original_payload, cfg,
                        expected_status, how_many, test, results)
                    iteration += 1
def get_source_files(repository):
    """Find all source files in the selected repository."""
    log.info("Getting source files")
    command = (
        "pushd repositories/{repo} > /dev/null; " +
        r"wc -l `find . -path ./venv -prune -o \( -name '*.py' -o -name '*.java' -o "
        + r"-name '*.ts' \) " + "-print` | head -n -1 > ../../{repo}.count; " +
        "popd > /dev/null").format(repo=repository)
    os.system(command)
    filenames = []
    line_counts = {}
    total_lines = 0
    count = 0
    extensions = set()
    files_per_extension = {}

    with log.indent():
        with open("{repo}.count".format(repo=repository)) as fin:
            for line in fin:
                with log.indent():
                    log.debug(line)
                count += 1
                line_count, filename = parse_line_count(line)
                extension = get_file_extension(filename)

                # register possibly new extension
                extensions.add(extension)

                # update file count for such extension
                files_per_extension[extension] = files_per_extension.get(
                    extension, 0) + 1

                # register file name + line count
                filenames.append(filename)
                line_counts[filename] = line_count
                total_lines += line_count

        log.debug("Files: {files}".format(files=count))
        log.debug("Lines: {lines}".format(lines=total_lines))

    log.success("Done")

    return {
        "count": count,
        "filenames": filenames,
        "extensions": extensions,
        "files_per_extension": files_per_extension,
        "line_counts": line_counts,
        "total_lines": total_lines
    }
Exemplo n.º 5
0
def component_analysis_benchmark(queue, threads, component_analysis,
                                 thread_count, python_payload, maven_payload,
                                 npm_payload):
    """Component analysis benchmark."""
    g_python = ComponentGenerator().generator_for_ecosystem("pypi")
    g_maven = ComponentGenerator().generator_for_ecosystem("maven")
    g_npm = ComponentGenerator().generator_for_ecosystem("npm")
    generators = []

    if python_payload:
        generators.append(g_python)
    if maven_payload:
        generators.append(g_maven)
    if npm_payload:
        generators.append(g_npm)

    # don't start the generators from the 1st item
    for i in range(randint(10, 100)):
        for g in generators:
            next(g)

    for t in range(thread_count):
        g = generators[randint(0, len(generators) - 1)]
        ecosystem, component, version = next(g)
        with log.indent():
            log.info("Component analysis for E/P/V {} {} {}".format(
                ecosystem, component, version))
        t = Thread(target=component_analysis.start,
                   args=(t, ecosystem, component, version, queue))
        t.start()
        threads.append(t)
        # skip some items
        for i in range(randint(5, 25)):
            next(g)
def read_build_history(job_url):
    """Read total number of remembered builds and succeeded builds as well."""
    log.info("Read build history")
    with log.indent():
        api_query = jenkins_api_query_build_statuses(job_url)
        log.info(api_query)
        response = requests.get(api_query)
        builds = response.json()["builds"]

        last_build_info = builds[0]
        last_build = int(last_build_info["number"])
        last_build_status = last_build_info["result"]

        # None, True, False
        if last_build_status is not None:
            last_build_status = last_build_status == "SUCCESS"

        total_builds = get_total_builds(builds)
        success_builds = get_success_builds(builds)
        total_builds_cnt = len(total_builds)
        success_builds_cnt = len(success_builds)

        log_builds(last_build, total_builds_cnt, success_builds_cnt)

        log.success("Done")
    return last_build, last_build_status, total_builds_cnt, success_builds_cnt
def log_fuzzer_setting(fuzzer_setting):
    """Display basic information about fuzzer settings."""
    log.info("Fuzzer setting:")
    with log.indent():
        log.info("Iteration deep: " + fuzzer_setting["Iteration deep"])
        log.info("List length in range from {n} to {m}".format(
            n=fuzzer_setting["List min length"],
            m=fuzzer_setting["List max length"]))
        log.info("Dict length in range from {n} to {m}".format(
            n=fuzzer_setting["Dictionary min length"],
            m=fuzzer_setting["Dictionary max length"]))
        log.info("Dict keys length in range from {n} to {m}".format(
            n=fuzzer_setting["Min dictionary key length"],
            m=fuzzer_setting["Max dictionary key length"]))
        log.info("Strings length in range from {n} to {m}".format(
            n=fuzzer_setting["Min string length"],
            m=fuzzer_setting["Max string length"]))
        log.info("Dictionary characters:                  " +
                 fuzzer_setting["Dictionary characters"])
        log.info("String characters:                      " +
                 fuzzer_setting["String characters"])
        log.info("Allow NaN in floats:                    " +
                 fuzzer_setting["Allow NaN"])
        log.info("Allow Inf in floats:                    " +
                 fuzzer_setting["Allow Inf"])
        log.info("Generate strings for SQL injection:     " +
                 fuzzer_setting["SQL injection strings"])
        log.info("Generate strings for Gremlin injection: " +
                 fuzzer_setting["Gremlin injection strings"])
Exemplo n.º 8
0
def check_system(api):
    """Check if all system endpoints are available and that tokens are valid."""
    # try to access system endpoints
    log.info("System check")
    with log.indent():
        check_api_endpoint(api)
        check_auth_token(api)
 def __init__(self):
     """Read and parse the configuration file."""
     self.config = configparser.ConfigParser()
     with log.indent():
         log.info("Reading config file")
         self.config.read(Config.CONFIG_FILE_NAME)
         log.success("Done")
Exemplo n.º 10
0
def log_improvements(repositories, results):
    """Log improvements in repositories."""
    with log.indent():
        for repository in repositories:
            log.info("{repository} : {improvement}".format(
                repository=repository,
                improvement=results.improvement[repository]))
Exemplo n.º 11
0
def prepare_data_for_repositories(repositories, results, ci_jobs, job_statuses,
                                  clone_repositories_enabled,
                                  cleanup_repositories_enabled,
                                  code_quality_table_enabled,
                                  ci_jobs_table_enabled,
                                  code_coverage_threshold):
    """Perform clone/fetch repositories + run pylint + run docstyle script + accumulate results."""
    log.info("Preparing data for QA Dashboard")
    with log.indent():
        for repository in repositories:
            log.info("Repository " + repository)

            # clone or fetch the repository, but only if the cloning/fetching
            # is not disabled via CLI arguments
            if clone_repositories_enabled:
                clone_or_fetch_repository(repository)

            if code_quality_table_enabled:
                run_pylint(repository)
                run_docstyle_check(repository)
                run_cyclomatic_complexity_tool(repository)
                run_maintainability_index(repository)
                run_dead_code_detector(repository)
                run_common_errors_detector(repository)

                results.source_files[repository] = get_source_files(repository)
                results.repo_linter_checks[repository] = parse_pylint_results(
                    repository)
                results.repo_docstyle_checks[
                    repository] = parse_docstyle_results(repository)
                results.repo_cyclomatic_complexity[repository] = \
                    parse_cyclomatic_complexity(repository)
                results.repo_maintainability_index[repository] = \
                    parse_maintainability_index(repository)
                results.dead_code[repository] = parse_dead_code(repository)
                results.common_errors[repository] = parse_common_errors(
                    repository)

            # delete_work_files(repository)

            if cleanup_repositories_enabled:
                cleanup_repository(repository)

            if ci_jobs_table_enabled:
                for job_type in ci_job_types:
                    url = ci_jobs.get_job_url(repository, job_type)
                    name = ci_jobs.get_job_name(repository, job_type)
                    badge = ci_jobs.get_job_badge(repository, job_type)
                    job_status = job_statuses.get(name)
                    results.ci_jobs_links[repository][job_type] = url
                    results.ci_jobs_badges[repository][job_type] = badge
                    results.ci_jobs_statuses[repository][job_type] = job_status
                results.unit_test_coverage[
                    repository] = read_unit_test_coverage(ci_jobs, repository)
            if code_quality_table_enabled:
                update_overall_status(results, repository,
                                      code_coverage_threshold)

    log.success("Data prepared")
Exemplo n.º 12
0
def generate_quality_labels(results):
    """Generate quality labels for all repositories."""
    with log.indent():
        log.info("Generate quality labels")
        for repository in results.repositories:
            log.info(repository)
            generate_quality_label_for_repository(repository, results)
        log.success("Quality labels generated")
Exemplo n.º 13
0
def run_pylint(repository):
    """Run Pylint checker against the selected repository."""
    with log.indent():
        log.info("Running Pylint for the repository " + repository)
        command = ("pushd repositories/{repo} >> /dev/null;" +
                   "./run-linter.sh > ../../{repo}.linter.txt;" +
                   "popd >> /dev/null").format(repo=repository)
        os.system(command)
        log.success("Done")
Exemplo n.º 14
0
def run_dead_code_detector(repository):
    """Run dead code detector tool against the selected repository."""
    with log.indent():
        log.info("Running dead code detector for the repository " + repository)
        command = ("pushd repositories/{repo} >> /dev/null;" +
                   "./detect-dead-code.sh > ../../{repo}.dead_code.txt;" +
                   "popd >> /dev/null").format(repo=repository)
        os.system(command)
        log.success("Done")
Exemplo n.º 15
0
def run_common_errors_detector(repository):
    """Run common issues detector tool against the selected repository."""
    with log.indent():
        log.info("Running common issues detector for the repository " + repository)
        command = ("pushd repositories/{repo} >> /dev/null;" +
                   "./detect-common-errors.sh > ../../{repo}.common_errors.txt;" +
                   "popd >> /dev/null").format(repo=repository)
        os.system(command)
        log.success("Done")
Exemplo n.º 16
0
def run_all_loaded_tests(cfg, fuzzer_settings, tests, results):
    """Run all tests read from CSV file."""
    i = 1
    for test in tests:
        log.info("Starting test #{n} with name '{desc}'".format(
            n=i, desc=test["Name"]))
        with log.indent():
            run_test(cfg, fuzzer_settings, test, results)
        i += 1
Exemplo n.º 17
0
def check_auth_token(api):
    """Check the authorization token for the core API."""
    log.info("Checking: authorization token for the core API")
    with log.indent():
        if api.check_auth_token_validity():
            log.success("ok")
        else:
            log.error("Fatal: wrong token(?)")
            sys.exit(1)
Exemplo n.º 18
0
def check_api_endpoint(api):
    """Check that some API endpoint is callable."""
    log.info("Checking: core API endpoint")
    with log.indent():
        if not api.is_api_running():
            log.error("Fatal: tested system is not available")
            sys.exit(1)
        else:
            log.success("ok")
Exemplo n.º 19
0
def run_docstyle_check(repository):
    """Run PyDocsStyle checker against the selected repository."""
    with log.indent():
        log.info("Running DocStyle checker for the repository " + repository)
        command = ("pushd repositories/{repo} >> /dev/null;" +
                   "./check-docstyle.sh > ../../{repo}.pydocstyle.txt;" +
                   "popd >> /dev/null").format(
            repo=repository)
        os.system(command)
        log.success("Done")
def generate_reports(tests, results, cfg, total_time):
    """Generate reports with all BAF tests."""
    log.info("Generate reports")
    with log.indent():
        # cfg contains information whether to generate HTML, CSV, TSV etc. outputs
        generate_text_report_if_enabled(cfg, results)
        generate_html_report_if_enabled(cfg, results, tests, total_time)
        generate_csv_report_if_enabled(cfg, results)
        generate_tsv_report_if_enabled(cfg, results)
        generate_xml_report_if_enabled(cfg, results)
Exemplo n.º 21
0
def read_fuzzer_settings(filename):
    """Read fuzzer settings from the CSV file."""
    log.info("Read fuzzer settings")
    with log.indent():
        fuzzer_settings = read_csv_as_dicts(filename)
        if len(fuzzer_settings) == 1:
            log.success("Loaded 1 setting")
        else:
            log.success("Loaded {n} settings".format(n=len(fuzzer_settings)))
    return fuzzer_settings
def main():
    """Entry point to the job checker."""
    log.setLevel(log.INFO)
    log.info("Setup")
    with log.indent():
        config = Config()
        repositories = Repositories(config)

    log.success("Setup done")

    ci_jobs = CIJobs()

    job_statuses = read_job_statuses(ci_jobs)

    with log.indent():
        for repository in repositories.repolist:
            run_checker_for_repository(ci_jobs, config, job_statuses,
                                       repository)

    log.success("Data prepared")
Exemplo n.º 23
0
def main():
    """Entry point to the CC reporter."""
    log.setLevel(log.INFO)

    log.info("Config")
    with log.indent():
        config = Config()
        results = Results()
        repositories = Repositories(config)
    log.success("Done")

    log.info("Prepare data for repositories")
    with log.indent():
        prepare_data_for_repositories(repositories.repolist, results, config)
    log.success("Done")

    log.info("Generate coverage pages")
    with log.indent():
        generate_coverage_pages(results)
    log.success("Done")
Exemplo n.º 24
0
def prepare_data_for_repositories(repositories, results, config):
    """Accumulate results."""
    results.repositories = repositories
    for repository in repositories:
        log.info(repository)
        with log.indent():
            results.source_files[repository] = get_source_files(repository)
            results.unit_test_coverage[repository] = []
            for week in range(0, 2):
                log.info("Week " + str(week))
                with log.indent():
                    coverage = read_unit_test_coverage_for_week(
                        repository, week)
                    results.unit_test_coverage[repository].append(coverage)

            update_improvement(results, repository)
            update_coverage_delta(results, repository)
            update_coverage_threshold_pass(results, repository, config)
    log.info("Improvements")
    log_improvements(repositories, results)
def main():
    """Entry point to the QA Dashboard."""
    log.setLevel(log.INFO)
    log.info("Setup")
    with log.indent():
        config = Config()
        repositories = Repositories(config)

    log.success("Setup done")

    ci_jobs = CIJobs()

    job_statuses = read_job_statuses(ci_jobs)

    with log.indent():
        for repository in repositories.repolist:

            for job_type in ci_job_types:
                with log.indent():
                    url = ci_jobs.get_job_url(repository, job_type)
                    name = ci_jobs.get_job_name(repository, job_type)
                    badge = ci_jobs.get_job_badge(repository, job_type)
                    job_status = job_statuses.get(name)

                    if url is not None:
                        api_query = jenkins_api_query_build_statuses(url)
                        response = requests.get(api_query)
                        builds = response.json()["builds"]
                        failures = 0
                        for b in builds:
                            if b["result"] != "SUCCESS":
                                failures += 1
                            else:
                                break
                        if failures >= FAILURE_THRESHOLD:
                            print("Repository: {}".format(repository))
                            print("URL to job: {}".format(url))
                            print("Failures:   {}".format(failures))
                            print()

    log.success("Data prepared")
Exemplo n.º 26
0
def setup(cli_arguments):
    """Perform BAF setup."""
    log.info("Setup")
    with log.indent():
        input_file = get_input_file(cli_arguments)
        dry_run = cli_arguments.dry
        generate_text = cli_arguments.text
        generate_html = cli_arguments.html
        generate_csv = cli_arguments.csv
        generate_tsv = cli_arguments.tsv
        generate_xml = cli_arguments.xml
        tags = parse_tags(cli_arguments.tags)
        header = cli_arguments.header or "Fuzz tests"
        license_service_url = "N/A"
        refresh_token = None

        if not dry_run:
            check_api_tokens_presence()
            license_service_url = read_url_from_env_var("OSIO_AUTH_SERVICE")
            refresh_token = os.environ.get("RECOMMENDER_REFRESH_TOKEN")

            if not license_service_url:
                log.error("OSIO_AUTH_SERVICE is not set")
                sys.exit(-1)

        log.info("Dry run:          " + enabled_disabled(dry_run))
        log.info("Input file:       " + input_file)
        log.info("Text generator:   " + enabled_disabled(generate_text))
        log.info("HTML generator:   " + enabled_disabled(generate_html))
        log.info("CSV generator:    " + enabled_disabled(generate_csv))
        log.info("TSV generator:    " + enabled_disabled(generate_tsv))
        log.info("XML generator:    " + enabled_disabled(generate_xml))
        log.info("Auth service URL: " + license_service_url)
        log.info("Run tests:        " + tags_as_str(tags))
        log.info("Refresh token:    " + refresh_token_as_str(refresh_token))
        log.info("Header:           " + header)
        log.info("Success")

    access_token = get_access_token(dry_run, refresh_token,
                                    license_service_url)

    return {
        "access_token": access_token,
        "generate_html": generate_html,
        "generate_text": generate_text,
        "generate_csv": generate_csv,
        "generate_tsv": generate_tsv,
        "generate_xml": generate_xml,
        "tags": tags,
        "dry_run": dry_run,
        "input_file": input_file,
        "header": header
    }
Exemplo n.º 27
0
def start_tests(cfg, fuzzer_settings, tests, results, tags):
    """Start all tests using the already loaded configuration and fuzzer settings."""
    log.info("Run tests")
    with log.indent():
        if not tests or len(tests) == 0:
            log.error("No tests loaded!")
            sys.exit(-1)
        log_tests_loaded(tests)
        if not tags:
            run_all_loaded_tests(cfg, fuzzer_settings, tests, results)
        else:
            run_tests_with_tags(cfg, fuzzer_settings, tests, results, tags)
def generate_dashboard(results, ignored_files_for_pylint, ignored_files_for_pydocstyle):
    """Generate all pages with the dashboard and detailed information as well."""
    log.info("Generating output")

    with log.indent():
        log.info("Index page")
        generate_index_page(results)
        log.success("Index page generated")

    with log.indent():
        log.info("Details about repository")
        if results.code_quality_table_enabled:
            for repository in results.repositories:
                log.info(repository)
                generate_details_page_for_repository(repository, results,
                                                     ignored_files_for_pylint.get(repository, []),
                                                     ignored_files_for_pydocstyle.get(repository,
                                                                                      []))
                generate_charts_page_for_repository(repository, results)
        log.success("Details generated")
    log.success("Output generated")
Exemplo n.º 29
0
def run_tests_with_removed_items(url, http_method, dry_run, original_payload,
                                 cfg, expected_status, test, results):
    """Run tests with items removed from the original payload."""
    iteration = 0
    with log.indent():
        items_count = len(original_payload)
        # lexicographics ordering
        remove_flags_list = list(
            itertools.product([True, False], repeat=items_count))
        # the last item contains (False, False, False...) and we are not interested
        # in removing ZERO items
        remove_flags_list = remove_flags_list[:-1]

        with log.indent():
            log.info("Iteration #{n}".format(n=iteration))
            with log.indent():
                for remove_flags in remove_flags_list:
                    run_tests_with_removed_items_one_iteration(
                        url, http_method, dry_run, original_payload, cfg,
                        expected_status, items_count, remove_flags, test,
                        results)
            iteration += 1
def run_checker_for_repository(ci_jobs, config, job_statuses, repository):
    """Run job checker for selected repository."""
    for job_type in ci_job_types:
        with log.indent():
            url = ci_jobs.get_job_url(repository, job_type)
            name = ci_jobs.get_job_name(repository, job_type)
            badge = ci_jobs.get_job_badge(repository, job_type)
            job_status = job_statuses.get(name)

            if url is not None:
                check_ci_status(url, repository)
                check_job_status(job_status)
                check_job_badge(badge)
Exemplo n.º 31
0
def run_test(cfg, test, i, component_analysis, stack_analysis):
    """Run one selected test."""
    test_name = test["Name"]
    log.info("Starting test #{n} with name '{desc}'".format(n=i,
                                                            desc=test_name))
    with log.indent():
        start = time()

        threads = []
        queue = Queue()

        with log.indent():
            component_analysis_count = int(test["Component analysis"])
            stack_analysis_count = int(test["Stack analysis"])
            python_payload = test["Python payload"] in ("Yes", "yes")
            maven_payload = test["Maven payload"] in ("Yes", "yes")
            npm_payload = test["NPM payload"] in ("Yes", "yes")

            component_analysis_benchmark(queue, threads, component_analysis,
                                         component_analysis_count,
                                         python_payload, maven_payload,
                                         npm_payload)
            stack_analysis_benchmark(queue, threads, stack_analysis,
                                     stack_analysis_count, python_payload,
                                     maven_payload, npm_payload)

        wait_for_all_threads(threads)
        queue_size = queue.qsize()
        check_number_of_results(queue_size, component_analysis_count,
                                stack_analysis_count)

        end = time()
        # TODO: use better approach to join paths
        filename = RESULT_DIRECTORY + "/" + test_name.replace(" ",
                                                              "_") + ".csv"
        log.info("Generating test report into file '{filename}'".format(
            filename=filename))
        generate_csv_report(queue, test, start, end, end - start, filename)
Exemplo n.º 32
0
def log_report_type(report_type):
    """Display info which unit test report type has been detected."""
    with log.indent():
        log.info("{report_type} report detected".format(report_type=report_type))
Exemplo n.º 33
0
def log_coverage(statements, missed, coverage):
    """Log info about the coverage read from reports."""
    with log.indent():
        log.info("statements {s}".format(s=statements))
        log.info("missed     {m}".format(m=missed))
        log.info("coverage   {c}".format(c=coverage))