Exemplo n.º 1
0
def main():
    """Entry point to the Analytics API Load Tests."""
    log.setLevel(log.INFO)
    cli_arguments = cli_parser.parse_args()
    if cli_arguments.version:
        show_version()
        sys.exit(0)
    else:
        cfg = setup(cli_arguments)

        coreapi_url = os.environ.get('F8A_SERVER_API_URL', None)
        component_analysis = ComponentAnalysis(coreapi_url,
                                               cfg["access_token"],
                                               cfg["user_key"], True)
        stack_analysis = StackAnalysis(coreapi_url, cfg["access_token"],
                                       cfg["user_key"], True)

        check_system(component_analysis)

        try:
            tests = read_csv_as_dicts(cfg["input_file"])
        except Exception as e:
            log.error("Test description can not be read")
            log.error(e)
            sys.exit(0)

        t1 = time()
        tags = cfg["tags"]
        start_tests(cfg, tests, tags, component_analysis, stack_analysis)
        t2 = time()
        log.info("Start time: {}".format(t1))
        log.info("End time:   {}".format(t2))
        log.info("Duration:   {}".format(t2 - t1))
def main():
    """Entry point to the QA Dashboard."""
    log.setLevel(log.INFO)

    log.info("Started")

    with log.indent():
        log.info("Setup")
        config = Config()
        jenkins_url = config.get_jenkins_url()
        master_build_url = jenkins_url + config.get_master_build_job()
        log.success("Setup done")

    last_processed_build = read_last_processed()

    log.info("Last processed build: {n}".format(n=last_processed_build))

    last_build, last_build_status, total_builds_cnt, success_builds_cnt = \
        read_build_history(master_build_url)

    if last_build > last_processed_build:
        log.info("New build(s) detected!")
        with log.indent():
            process_new_build(config, last_build, last_build_status,
                              jenkins_url, master_build_url)

        write_last_processed(last_build)
    else:
        log.info("No new build(s) detected...")
Exemplo n.º 3
0
def main():
    """Entry point to the CC reporter."""
    log.setLevel(log.INFO)

    log.info("Config")
    with log.indent():
        config = Config()
        results = Results()
        repositories = Repositories(config)
    log.success("Done")

    log.info("Prepare data for repositories")
    with log.indent():
        prepare_data_for_repositories(repositories.repolist, results, config)
    log.success("Done")

    log.info("Generate coverage pages")
    with log.indent():
        generate_coverage_pages(results)
    log.success("Done")
def main():
    """Entry point to the job checker."""
    log.setLevel(log.INFO)
    log.info("Setup")
    with log.indent():
        config = Config()
        repositories = Repositories(config)

    log.success("Setup done")

    ci_jobs = CIJobs()

    job_statuses = read_job_statuses(ci_jobs)

    with log.indent():
        for repository in repositories.repolist:
            run_checker_for_repository(ci_jobs, config, job_statuses,
                                       repository)

    log.success("Data prepared")
def main():
    """Entry point to the QA Dashboard."""
    log.setLevel(log.INFO)
    log.info("Setup")
    with log.indent():
        config = Config()
        repositories = Repositories(config)

    log.success("Setup done")

    ci_jobs = CIJobs()

    job_statuses = read_job_statuses(ci_jobs)

    with log.indent():
        for repository in repositories.repolist:

            for job_type in ci_job_types:
                with log.indent():
                    url = ci_jobs.get_job_url(repository, job_type)
                    name = ci_jobs.get_job_name(repository, job_type)
                    badge = ci_jobs.get_job_badge(repository, job_type)
                    job_status = job_statuses.get(name)

                    if url is not None:
                        api_query = jenkins_api_query_build_statuses(url)
                        response = requests.get(api_query)
                        builds = response.json()["builds"]
                        failures = 0
                        for b in builds:
                            if b["result"] != "SUCCESS":
                                failures += 1
                            else:
                                break
                        if failures >= FAILURE_THRESHOLD:
                            print("Repository: {}".format(repository))
                            print("URL to job: {}".format(url))
                            print("Failures:   {}".format(failures))
                            print()

    log.success("Data prepared")
Exemplo n.º 6
0
def main():
    """Entry point to the Bayesian API Fuzzer."""
    log.setLevel(log.INFO)
    cli_arguments = cli_parser.parse_args()
    if cli_arguments.version:
        show_version()
        sys.exit(0)
    else:
        cfg = setup(cli_arguments)
        fuzzer_settings = read_fuzzer_settings("fuzzer_settings.csv")
        results = Results()

        try:
            tests = read_csv_as_dicts(cfg["input_file"])
        except Exception as e:
            log.error("Test description can not be read")
            log.error(e)
            sys.exit(0)

        t1 = time()
        tags = cfg["tags"]
        start_tests(cfg, fuzzer_settings, tests, results, tags)
        t2 = time()
        generate_reports(tests, results, cfg, t2 - t1)
Exemplo n.º 7
0
def main():
    """Entry point to the QA Dashboard."""
    log.setLevel(log.INFO)
    log.info("Setup")
    with log.indent():
        config = Config()
        cli_arguments = cli_parser.parse_args()
        repositories = Repositories(config)

        # some CLI arguments are used to DISABLE given feature of the dashboard,
        # but let's not use double negation everywhere :)
        ci_jobs_table_enabled = not cli_arguments.disable_ci_jobs
        code_quality_table_enabled = not cli_arguments.disable_code_quality
        liveness_table_enabled = not cli_arguments.disable_liveness
        sla_table_enabled = not cli_arguments.disable_sla
        clone_repositories_enabled = cli_arguments.clone_repositories
        cleanup_repositories_enabled = cli_arguments.cleanup_repositories

        log.info("Environment variables check")
        with log.indent():
            check_environment_variables()
        log.success("Environment variables check done")

    log.success("Setup done")

    results = Results()

    # list of repositories to check
    results.repositories = repositories.repolist

    # we need to know which tables are enabled or disabled to proper process the template
    results.sla_table_enabled = sla_table_enabled
    results.liveness_table_enabled = liveness_table_enabled
    results.code_quality_table_enabled = code_quality_table_enabled
    results.ci_jobs_table_enabled = ci_jobs_table_enabled

    results.teams = teams
    results.sprint = config.get_sprint()
    log.info("Sprint: " + results.sprint)

    ci_jobs = CIJobs()

    job_statuses = read_job_statuses(ci_jobs, ci_jobs_table_enabled, liveness_table_enabled)

    results.smoke_tests_total_builds, results.smoke_tests_success_builds = \
        production_smoketests_status(ci_jobs)

    results.sprint_plan_url = config.get_sprint_plan_url()
    log.info("Sprint plan URL: " + results.sprint_plan_url)
    code_coverage_threshold = get_code_coverage_threshold(cli_arguments, config)

    for team in teams:
        results.issues_list_url[team] = config.get_list_of_issues_url(team)

    if liveness_table_enabled:
        prepare_data_for_liveness_table(results, ci_jobs, job_statuses)

    prepare_data_for_repositories(repositories.repolist, results, ci_jobs, job_statuses,
                                  clone_repositories_enabled, cleanup_repositories_enabled,
                                  code_quality_table_enabled, ci_jobs_table_enabled,
                                  code_coverage_threshold)

    if sla_table_enabled:
        prepare_data_for_sla_table(results)

    if code_quality_table_enabled and liveness_table_enabled:
        export_into_csv(results, repositories.repolist)

    generate_dashboard(results, ignored_files_for_pylint, ignored_files_for_pydocstyle)
    generate_charts(results)
Exemplo n.º 8
0
from fastlog import log

log.setLevel(log.DEBUG)

log.info("log.info")
log.success("log.success")
log.failure("log.failure")

with log.indent():
    log.debug("log.debug")
    log.warning("log.warning")

log.separator()

log.hexdump(list(map(chr, range(256))))