Beispiel #1
0
def main():
    """Entry point to the database integrity tests."""
    cli_arguments = cli_parser.parse_args()
    set_log_level(cli_arguments.log_level)

    s3_tests_enabled = not cli_arguments.disable_s3_tests
    gremlin_tests_enabled = not cli_arguments.disable_gremlin_tests

    s3interface = None
    if s3_tests_enabled:
        s3configuration = S3Configuration()
        s3interface = S3Interface(s3configuration)
        s3interface.connect()

    gremlinInterface = None
    if gremlin_tests_enabled:
        gremlinConfiguration = GremlinConfiguration()
        gremlinInterface = GremlinInterface(gremlinConfiguration)

    initial_checks(s3interface, gremlinInterface)

    if cli_arguments.check:
        logging.info("Only initial check is performed, exiting")
        sys.exit()

    check_packages_in_s3(s3interface)
def main():
    """Entry point to the Analytics API Load Tests."""
    log.setLevel(log.INFO)
    cli_arguments = cli_parser.parse_args()
    if cli_arguments.version:
        show_version()
        sys.exit(0)
    else:
        cfg = setup(cli_arguments)

        coreapi_url = os.environ.get('F8A_SERVER_API_URL', None)
        component_analysis = ComponentAnalysis(coreapi_url,
                                               cfg["access_token"],
                                               cfg["user_key"], True)
        stack_analysis = StackAnalysis(coreapi_url, cfg["access_token"],
                                       cfg["user_key"], True)

        check_system(component_analysis)

        try:
            tests = read_csv_as_dicts(cfg["input_file"])
        except Exception as e:
            log.error("Test description can not be read")
            log.error(e)
            sys.exit(0)

        t1 = time()
        tags = cfg["tags"]
        start_tests(cfg, tests, tags, component_analysis, stack_analysis)
        t2 = time()
        log.info("Start time: {}".format(t1))
        log.info("End time:   {}".format(t2))
        log.info("Duration:   {}".format(t2 - t1))
Beispiel #3
0
def main():
    """Entry point to the performance tests."""
    cli_arguments = cli_parser.parse_args()
    check_environment_variables()

    coreapi_url = os.environ.get('F8A_API_URL', None)
    jobs_api_url = os.environ.get('F8A_JOB_API_URL', None)
    gremlin_api_url = os.environ.get('F8A_GREMLIN_URL', None)

    recommender_api_token = os.environ.get('RECOMMENDER_API_TOKEN')
    job_api_token = os.environ.get('JOB_API_TOKEN')

    aws_access_key_id = os.environ.get('AWS_ACCESS_KEY_ID')
    aws_secret_access_key = os.environ.get('AWS_SECRET_ACCESS_KEY')
    s3_region_name = os.environ.get('S3_REGION_NAME')
    deployment_prefix = os.environ.get('DEPLOYMENT_PREFIX', 'STAGE')

    core_api = CoreApi(coreapi_url, recommender_api_token)
    jobs_api = JobsApi(jobs_api_url, job_api_token)
    gremlin_api = GremlinApi(gremlin_api_url)

    s3 = S3Interface(aws_access_key_id, aws_secret_access_key, s3_region_name, deployment_prefix)

    check_system(core_api, jobs_api, s3)

    # set the flag that enables dumping JSON responses into files
    # that allow us to further analyze the data
    core_api.dump_json_responses = cli_arguments.dump
    jobs_api.dump_json_responses = cli_arguments.dump

    # if user specifies the manifest file for the stack analysis, set
    # the appropriate attribute
    core_api.stack_analysis_manifest = cli_arguments.manifest

    if cli_arguments.sla:
        run_benchmarks_sla(core_api, jobs_api, s3)
    else:
        run_benchmarks(core_api, jobs_api, gremlin_api, s3,
                       cli_arguments.stack_analysis_benchmark,
                       cli_arguments.component_analysis_benchmark,
                       cli_arguments.package_query_to_graph_benchmark,
                       cli_arguments.package_version_query_to_graph_benchmark,
                       cli_arguments.parallel,
                       cli_arguments.thread_max)
def main():
    """Entry point to the Bayesian API Fuzzer."""
    log.setLevel(log.INFO)
    cli_arguments = cli_parser.parse_args()
    if cli_arguments.version:
        show_version()
        sys.exit(0)
    else:
        cfg = setup(cli_arguments)
        fuzzer_settings = read_fuzzer_settings("fuzzer_settings.csv")
        results = Results()

        try:
            tests = read_csv_as_dicts(cfg["input_file"])
        except Exception as e:
            log.error("Test description can not be read")
            log.error(e)
            sys.exit(0)

        t1 = time()
        tags = cfg["tags"]
        start_tests(cfg, fuzzer_settings, tests, results, tags)
        t2 = time()
        generate_reports(tests, results, cfg, t2 - t1)
def main():
    """Entry point to the QA Dashboard."""
    #log.setLevel(log.critical)
    log.critical("Setup")
    #with log.indent():
    config = Config()
    cli_arguments = cli_parser.parse_args()
    repositories = Repositories(config)

    # some CLI arguments are used to DISABLE given feature of the dashboard,
    # but let's not use double negation everywhere :)
    ci_jobs_table_enabled = not cli_arguments.disable_ci_jobs
    code_quality_table_enabled = not cli_arguments.disable_code_quality
    liveness_table_enabled = not cli_arguments.disable_liveness
    sla_table_enabled = not cli_arguments.disable_sla
    clone_repositories_enabled = cli_arguments.clone_repositories
    cleanup_repositories_enabled = cli_arguments.cleanup_repositories

    log.critical("Environment variables check")
    #with log.indent():
    check_environment_variables()
    log.critical("Environment variables check done")

    log.critical("Setup done")

    results = Results()

    # list of repositories to check
    results.repositories = repositories.repolist

    # we need to know which tables are enabled or disabled to proper process the template
    results.sla_table_enabled = sla_table_enabled
    results.liveness_table_enabled = liveness_table_enabled
    results.code_quality_table_enabled = code_quality_table_enabled
    results.ci_jobs_table_enabled = ci_jobs_table_enabled

    results.teams = teams
    results.sprint = config.get_sprint()
    log.critical("Sprint: " + results.sprint)

    ci_jobs = CIJobs()

    job_statuses = read_job_statuses(ci_jobs, ci_jobs_table_enabled,
                                     liveness_table_enabled)

    results.smoke_tests_total_builds, results.smoke_tests_success_builds = \
        production_smoketests_status(ci_jobs)

    results.sprint_plan_url = config.get_sprint_plan_url()
    log.critical("Sprint plan URL: " + results.sprint_plan_url)
    code_coverage_threshold = get_code_coverage_threshold(
        cli_arguments, config)

    for team in teams:
        results.issues_list_url[team] = config.get_list_of_issues_url(team)

    if liveness_table_enabled:
        prepare_data_for_liveness_table(results, ci_jobs, job_statuses)

    prepare_data_for_repositories(repositories.repolist, results, ci_jobs,
                                  job_statuses, clone_repositories_enabled,
                                  cleanup_repositories_enabled,
                                  code_quality_table_enabled,
                                  ci_jobs_table_enabled,
                                  code_coverage_threshold)

    if sla_table_enabled:
        prepare_data_for_sla_table(results)

    if code_quality_table_enabled and liveness_table_enabled:
        export_into_csv(results, repositories.repolist)

    #generate_dashboard(results, ignored_files_for_pylint, ignored_files_for_pydocstyle)
    #print(results)
    #generate_charts(results)
    #generate_quality_labels(results)

    jobs = all_ci_badges(results)

    results_sla = {
        'Stage': results.stage,
        'production': results.production,
        'perf_tests_statistic': results.perf_tests_statistic,
        'generated_on': results.generated_on,
        'ci_jobs': jobs
    }
    results_json = []
    for repo in results.repositories:
        data = {
            'repository':
            repo,
            'source_count':
            results.source_files[repo]['count'],
            'source_lines':
            results.source_files[repo]['total_lines'],
            'linter_total':
            results.repo_linter_checks[repo]['total'],
            'linter_passed':
            results.repo_linter_checks[repo]['passed'],
            'linter_failed':
            results.repo_linter_checks[repo]['failed'],
            'linter_passed_percent':
            results.repo_linter_checks[repo]['passed%'],
            'linter_failed_percent':
            results.repo_linter_checks[repo]['failed%'],
            'docstyle_total':
            results.repo_docstyle_checks[repo]['total'],
            'docstyle_passed':
            results.repo_docstyle_checks[repo]['passed'],
            'docstyle_failed':
            results.repo_docstyle_checks[repo]['failed'],
            'docstyle_passed_percent':
            results.repo_docstyle_checks[repo]['passed%'],
            'docstyle_failed_percent':
            results.repo_docstyle_checks[repo]['failed%'],
            'code_coverage':
            results.unit_test_coverage[repo],
            'cyclomatic_complexity':
            results.repo_cyclomatic_complexity[repo],
            'maintainability_index':
            results.repo_maintainability_index[repo],
            'status':
            results.overall_status[repo],
            'remarks':
            results.remarks[repo],
        }
        results_json.append(data)

    results_data = {'quality': results_json, 'others': results_sla}
    with open("results.json", "w") as f:
        json.dump(results_data, f)

    firebase_api_key = os.environ.get("FIREBASE_API_KEY")
    auth_domain = os.environ.get("AUTH_DOMAIN")
    database_url = os.environ.get("DATABASEURL")
    storage_bucket = os.environ.get('STORAGE_BUCKET')
    config = {
        "apiKey": firebase_api_key,
        "authDomain": auth_domain,
        "databaseURL": database_url,
        "storageBucket": storage_bucket,
    }

    firebase = pyrebase.initialize_app(config)
    storage = firebase.storage()
    storage.child("dashboard_data/results.json").put("results.json")
def main():
    """Entry point to the QA Dashboard."""
    log.setLevel(log.INFO)
    log.info("Setup")
    with log.indent():
        config = Config()
        cli_arguments = cli_parser.parse_args()
        repositories = Repositories(config)

        # some CLI arguments are used to DISABLE given feature of the dashboard,
        # but let's not use double negation everywhere :)
        ci_jobs_table_enabled = not cli_arguments.disable_ci_jobs
        code_quality_table_enabled = not cli_arguments.disable_code_quality
        liveness_table_enabled = not cli_arguments.disable_liveness
        sla_table_enabled = not cli_arguments.disable_sla
        clone_repositories_enabled = cli_arguments.clone_repositories
        cleanup_repositories_enabled = cli_arguments.cleanup_repositories

        log.info("Environment variables check")
        with log.indent():
            check_environment_variables()
        log.success("Environment variables check done")

    log.success("Setup done")

    results = Results()

    # list of repositories to check
    results.repositories = repositories.repolist

    # we need to know which tables are enabled or disabled to proper process the template
    results.sla_table_enabled = sla_table_enabled
    results.liveness_table_enabled = liveness_table_enabled
    results.code_quality_table_enabled = code_quality_table_enabled
    results.ci_jobs_table_enabled = ci_jobs_table_enabled

    results.teams = teams
    results.sprint = config.get_sprint()
    log.info("Sprint: " + results.sprint)

    ci_jobs = CIJobs()

    job_statuses = read_job_statuses(ci_jobs, ci_jobs_table_enabled,
                                     liveness_table_enabled)

    results.smoke_tests_total_builds, results.smoke_tests_success_builds = \
        production_smoketests_status(ci_jobs)

    results.sprint_plan_url = config.get_sprint_plan_url()
    log.info("Sprint plan URL: " + results.sprint_plan_url)
    code_coverage_threshold = get_code_coverage_threshold(
        cli_arguments, config)

    for team in teams:
        results.issues_list_url[team] = config.get_list_of_issues_url(team)

    if liveness_table_enabled:
        prepare_data_for_liveness_table(results, ci_jobs, job_statuses)

    prepare_data_for_repositories(repositories.repolist, results, ci_jobs,
                                  job_statuses, clone_repositories_enabled,
                                  cleanup_repositories_enabled,
                                  code_quality_table_enabled,
                                  ci_jobs_table_enabled,
                                  code_coverage_threshold)

    if sla_table_enabled:
        prepare_data_for_sla_table(results)

    if code_quality_table_enabled and liveness_table_enabled:
        export_into_csv(results, repositories.repolist)

    generate_dashboard(results, ignored_files_for_pylint,
                       ignored_files_for_pydocstyle)
    generate_charts(results)
    generate_quality_labels(results)