def test_check_all_files_with_combined_coverage() -> None:
    report = create_report(
        meta=ReportMetadata(branch_coverage=True),
        files={
            "a.py":
            FileCoverageModel(summary=CoverageSummaryModel(
                covered_lines=5,
                num_statements=5,
                covered_branches=1,
                num_branches=5,
            )),
            "b.py":
            FileCoverageModel(summary=CoverageSummaryModel(
                covered_lines=5,
                num_statements=5,
                covered_branches=3,
                num_branches=5,
            )),
        },
    )

    assert (check_all(
        report, Config(file_combined_coverage_min=Decimal("60.0"))) == Pass())
    assert check_all(
        report, Config(file_combined_coverage_min=Decimal("80.0"))) == Fail([
            'File: "a.py" failed COMBINED line plus branch coverage metric' +
            ", expected 80.0, was 60.0000"
        ])
def test_check_all_files_with_branch_coverage() -> None:
    report = create_report(
        meta=ReportMetadata(branch_coverage=True),
        files={
            "a.py":
            FileCoverageModel(summary=CoverageSummaryModel(
                covered_lines=5,
                num_statements=5,
                covered_branches=1,
                num_branches=2,
            )),
            "b.py":
            FileCoverageModel(summary=CoverageSummaryModel(
                covered_lines=5,
                num_statements=5,
                covered_branches=3,
                num_branches=4,
            )),
        },
    )

    assert check_all(
        report, Config(file_branch_coverage_min=Decimal("50.0"))) == Pass()
    assert check_all(
        report, Config(file_branch_coverage_min=Decimal("75.0"))
    ) == Fail([
        'File: "a.py" failed BRANCH coverage metric, expected 75.0, was 50.0000'
    ])
def test_module_level_config() -> None:
    report = create_report(
        meta=ReportMetadata(branch_coverage=True),
        files={
            "src/model/a.py":
            FileCoverageModel(summary=CoverageSummaryModel(
                covered_lines=5,
                num_statements=5,
                covered_branches=1,
                num_branches=2,
            )),
            "src/model/b.py":
            FileCoverageModel(summary=CoverageSummaryModel(
                covered_lines=5,
                num_statements=5,
                covered_branches=3,
                num_branches=4,
            )),
            "src/cli/command.py":
            FileCoverageModel(summary=CoverageSummaryModel(
                covered_lines=5,
                num_statements=5,
                covered_branches=4,
                num_branches=4,
            )),
        },
    )

    assert (check_all(
        report,
        Config(modules={
            "src/model/":
            ModuleConfig(file_branch_coverage_min=Decimal("50.0"))
        }),
    ) == Pass())
    assert (check_all(
        report,
        Config(
            modules={
                "src/model/":
                ModuleConfig(file_branch_coverage_min=Decimal("75.0")),
                "src/model/a":
                ModuleConfig(file_branch_coverage_min=Decimal("50.0")),
            }),
    ) == Pass())
    assert check_all(
        report,
        Config(
            modules={
                "src/model/":
                ModuleConfig(file_branch_coverage_min=Decimal("80.0")),
                "src/model/a":
                ModuleConfig(file_branch_coverage_min=Decimal("50.0")),
            }),
    ) == Fail([
        'File: "src/model/b.py" failed BRANCH coverage metric' +
        ", expected 80.0, was 75.0000"
    ])
def test_check_totals() -> None:
    assert (check_all(
        create_report(
            totals=CoverageSummaryModel(covered_lines=3, num_statements=4)),
        Config(line_coverage_min=Decimal("75.0")),
    ) == Pass())
    assert (check_all(
        create_report(
            totals=CoverageSummaryModel(covered_lines=2, num_statements=3)),
        Config(line_coverage_min=Decimal("67.0")),
    ) == Fail(
        ["Total line coverage metric failed, expected 67.0, was 66.6667"]))
def test_check_totals__with_number_missing_lines_max() -> None:
    report = create_report(
        totals=CoverageSummaryModel(covered_lines=3, num_statements=7))
    assert (check_all(
        report, Config(line_coverage_min=Decimal(0),
                       number_missing_lines_max=5)) == Pass())
    assert (check_all(
        report, Config(line_coverage_min=Decimal(0),
                       number_missing_lines_max=4)) == Pass())
    assert check_all(
        report,
        Config(line_coverage_min=Decimal(0),
               number_missing_lines_max=3)) == Fail([
                   "Total number missing lines max failed, expected 3, was 4"
               ])
def test_check_all_files() -> None:
    report = create_report(
        files={
            "a.py":
            FileCoverageModel(summary=CoverageSummaryModel(covered_lines=1,
                                                           num_statements=2)),
            "b.py":
            FileCoverageModel(summary=CoverageSummaryModel(covered_lines=3,
                                                           num_statements=4)),
        })

    assert check_all(report,
                     Config(file_line_coverage_min=Decimal("50.0"))) == Pass()
    assert check_all(
        report, Config(file_line_coverage_min=Decimal("75.0"))
    ) == Fail([
        'File: "a.py" failed LINE coverage metric, expected 75.0, was 50.0000'
    ])
def test_check_totals_with_branch_coverage() -> None:
    report = create_report(
        meta=ReportMetadata(branch_coverage=True),
        totals=CoverageSummaryModel(
            covered_lines=5,
            num_statements=5,
            covered_branches=3,
            num_branches=4,
        ),
    )
    assert (check_all(
        report,
        Config(branch_coverage_min=Decimal("75.0")),
    ) == Pass())
    assert (check_all(
        report,
        Config(branch_coverage_min=Decimal("75.001")),
    ) == Fail(
        ["Total branch coverage metric failed, expected 75.001, was 75.0000"]))
Example #8
0
def main() -> int:
    args = parser.parse_args(namespace=ArgsNamespace())
    report = read_report(args.coverage_json)
    config_from_file = read_config(args.config)
    config = combine_config_with_args(args, config_from_file)
    all_checks = check_all(report, config)
    if all_checks.result:
        print(colors.OKGREEN + "Success!" + colors.ENDC)
    else:
        print(f"Failed with {len(all_checks.problems)} errors")
        for problem in all_checks.problems:
            print(colors.FAIL + problem + colors.ENDC)
    return bool_to_return_status(all_checks.result)
def test_checking_branch_coverage_fails_without_branch_report() -> None:
    report = create_report(meta=ReportMetadata(branch_coverage=False))
    expected_error_message = "missing number of branches or number of branches covered"

    with pytest.raises(ValueError) as e:
        check_all(report, Config(branch_coverage_min=Decimal("50.0")))
    assert str(e.value) == expected_error_message

    with pytest.raises(ValueError) as e:
        check_all(report, Config(combined_coverage_min=Decimal("50.0")))
    assert str(e.value) == expected_error_message

    with pytest.raises(ValueError) as e:
        check_all(report, Config(file_branch_coverage_min=Decimal("75.0")))
    assert str(e.value) == expected_error_message