def prepare_data_for_repositories(repositories, results, ci_jobs, job_statuses,
                                  clone_repositories_enabled,
                                  cleanup_repositories_enabled,
                                  code_quality_table_enabled,
                                  ci_jobs_table_enabled,
                                  code_coverage_threshold):
    """Perform clone/fetch repositories + run pylint + run docstyle script + accumulate results."""
    log.critical("Preparing data for QA Dashboard")
    #with log.indent():
    all_repos = len(repositories)
    i = 0
    for repository in repositories:
        i += 1
        log.critical("Repository {}  ({}/{})".format(repository, i, all_repos))

        # clone or fetch the repository, but only if the cloning/fetching
        # is not disabled via CLI arguments
        if clone_repositories_enabled:
            clone_or_fetch_repository(repository)

        if code_quality_table_enabled:
            run_pylint(repository)
            run_docstyle_check(repository)
            run_cyclomatic_complexity_tool(repository)
            run_maintainability_index(repository)
            run_dead_code_detector(repository)
            run_common_errors_detector(repository)

            results.source_files[repository] = get_source_files(repository)
            results.repo_linter_checks[repository] = parse_pylint_results(
                repository)
            results.repo_docstyle_checks[repository] = parse_docstyle_results(
                repository)
            results.repo_cyclomatic_complexity[repository] = \
                parse_cyclomatic_complexity(repository)
            results.repo_maintainability_index[repository] = \
                parse_maintainability_index(repository)
            results.dead_code[repository] = parse_dead_code(repository)
            results.common_errors[repository] = parse_common_errors(repository)

        # delete_work_files(repository)

        if cleanup_repositories_enabled:
            cleanup_repository(repository)

        if ci_jobs_table_enabled:
            for job_type in ci_job_types:
                url = ci_jobs.get_job_url(repository, job_type)
                name = ci_jobs.get_job_name(repository, job_type)
                badge = ci_jobs.get_job_badge(repository, job_type)
                job_status = job_statuses.get(name)
                results.ci_jobs_links[repository][job_type] = url
                results.ci_jobs_badges[repository][job_type] = badge
                results.ci_jobs_statuses[repository][job_type] = job_status
            results.unit_test_coverage[repository] = read_unit_test_coverage(
                ci_jobs, repository)
        if code_quality_table_enabled:
            update_overall_status(results, repository, code_coverage_threshold)

    log.critical("Data prepared")
def prepare_data_for_repositories(repositories, results, config):
    """Accumulate results."""
    results.repositories = repositories
    for repository in repositories:
        log.debug(repository)
        # with log.indent():
        results.source_files[repository] = get_source_files(repository)
        results.unit_test_coverage[repository] = []
        for week in range(0, 2):
            log.debug("Week " + str(week))
            # with log.indent():
            coverage = read_unit_test_coverage_for_week(repository, week)
            results.unit_test_coverage[repository].append(coverage)

        update_improvement(results, repository)
        update_coverage_delta(results, repository)
        update_coverage_threshold_pass(results, repository, config)
    log.debug("Improvements")
    log_improvements(repositories, results)