Esempio n. 1
0
def test_returning_bool(num_failing, num_na):
    failures = CReturnValues.objects.generate(failing=num_failing)
    CReturnValues.objects.generate(na=num_na)
    summary = run_validator(CReturnValues, "returning_bool")
    expected_status = SummaryEx(failures=failures).complete().status
    assert summary == SummaryEx(
        status=expected_status,
        num_passing=None,
        num_na=None,
        num_allowed_to_fail=None,
    ).complete()
Esempio n. 2
0
def test_returning_bool(num_failing, num_na):
    failures = IReturnValues.objects.generate(failing=num_failing)
    IReturnValues.objects.generate(na=num_na)
    summary = run_validator(IReturnValues, "returning_bool")
    assert summary == SummaryEx(num_passing=20,
                                num_na=num_na,
                                failures=failures).complete()
def test_bad_related_names(caplog):
    summary = run_validator(RelatedFields, "bad_related_names")
    assert summary == SummaryEx(num_passing=20).complete()
    for name, level, message in caplog.record_tuples:
        if name == "datavalidation" and level == logging.WARNING:
            break
    else:
        assert False, "expected a warning about related names"
Esempio n. 4
0
def test_summary_ex_pretty_print():
    summary = SummaryEx(num_passing=1, num_na=2, failures=[3,
                                                           4]).pretty_print()
    assert summary == ("PASSED: 1\n"
                       "FAILED: 2\n"
                       "NA: 2\n"
                       "Allowed to Fail: 0\n"
                       "Failing Ids: 3, 4")

    summary = SummaryEx(num_passing=None, num_na=None,
                        failures=[1, 2, 3, 4]).pretty_print()
    assert summary == ("FAILED: 4\n"
                       "Allowed to Fail: 0\n"
                       "Failing Ids: 1, 2, 3...")

    summary = SummaryEx(num_passing=1, exc_type="ValueError()",
                        exc_obj_pk=1).pretty_print()
    assert summary == "EXCEPTION: ValueError() (object pk=1)"
def test_second_database():
    failure = SecondDatabase.objects.generate(failing=1)
    summary = run_validator(SecondDatabase, method_name="test_foobar")
    assert summary == SummaryEx(num_passing=20, num_na=0,
                                failures=failure).complete()
    # assert the FailingObject table was updated
    ct = ContentType.objects.get_for_model(SecondDatabase)
    assert FailingObject.objects.using("default").filter(
        content_type=ct).count() == 1
Esempio n. 6
0
def test_allowed_to_fail():
    failures = IReturnValues.objects.generate(failing=10)
    IReturnValues.objects.generate(na=5)
    summary = run_validator(IReturnValues, "allowed_to_fail")
    assert summary == SummaryEx(
        num_passing=20,
        num_na=5,
        num_allowed_to_fail=10,
        failures=failures,
    ).complete()
Esempio n. 7
0
def test_summary_ex_instantiation():
    """ creation of SummaryEx from summaries and return values """
    # should not throw errors
    SummaryEx()
    SummaryEx().complete()
    SummaryEx(num_passing=1, num_na=2, num_allowed_to_fail=3,
              failures=[]).complete()
    SummaryEx.from_exception_info(
        ExceptionInfo(exc_type="TypeError")).complete()
    SummaryEx.from_return_value(True)
    SummaryEx.from_return_value(False)
    SummaryEx(status=Status.PASSING).complete()
    SummaryEx(failures=list(TestModel.objects.all()[:2])).complete()

    # should throw errors
    try:
        SummaryEx(num_passing=None).complete()
        assert False, "expected an exception"
    except TypeError:
        pass

    try:
        SummaryEx(failures=True).complete()  # noqa
        assert False, "expected an exception"
    except TypeError:
        pass

    try:
        SummaryEx.from_summary(Summary(failures=None)).complete()  # noqa
        assert False, "expected an exception"
    except TypeError:
        pass
Esempio n. 8
0
def test_summary_ex_equality():
    assert SummaryEx(num_passing=1) == SummaryEx(num_passing=1)
    assert SummaryEx(failures=[1, 2]) == SummaryEx(failures=[2, 1])
    assert SummaryEx(exc_obj_pk=1) == SummaryEx(exc_obj_pk=1)
    assert (SummaryEx.from_summary(
        Summary(num_passing=1, failures=[2])) == SummaryEx(num_passing=1,
                                                           failures=[2]))

    assert SummaryEx(num_passing=1) != SummaryEx(num_passing=2)
    assert SummaryEx(num_passing=1) != SummaryEx(num_passing=None)
    assert SummaryEx(failures=[]) != SummaryEx(failures=None)
    assert SummaryEx(failures=[1, 2, 3]) != SummaryEx(failures=[2, 1])
Esempio n. 9
0
def test_returning_summary():
    summary = run_validator(CReturnValues, "returning_summary")
    assert summary == SummaryEx.from_summary(
        Summary(num_passing=20, num_na=0, failures=[])).complete()
Esempio n. 10
0
def test_returning_list_of_model_ids(num_failing, num_na):
    failures = CReturnValues.objects.generate(failing=num_failing)
    CReturnValues.objects.generate(na=num_na)
    summary = run_validator(CReturnValues, "returning_list_of_model_ids")
    assert summary == SummaryEx.from_return_value(failures).complete()
def test_useless_select_related(caplog):
    summary = run_validator(RelatedFields, "useless_select_related")
    print(summary.__dict__)
    assert summary == SummaryEx.from_return_value(PASS).complete()
    assert_no_warnings(caplog)
def test_select_related_m2m(caplog):
    summary = run_validator(RelatedFields, "prefetch_related_m2m")
    assert summary == SummaryEx(num_passing=20).complete()
    assert_no_warnings(caplog)