Exemple #1
0
def test_returning_none():
    summary = run_validator(IReturnValues, "returning_none")
    assert summary.exc_type.startswith("TypeError")
    assert summary.num_passing is None
    assert summary.num_na is None
    assert summary.num_allowed_to_fail is None
    assert summary.failures is None
Exemple #2
0
def test_returning_bool(num_failing, num_na):
    failures = IReturnValues.objects.generate(failing=num_failing)
    IReturnValues.objects.generate(na=num_na)
    summary = run_validator(IReturnValues, "returning_bool")
    assert summary == SummaryEx(num_passing=20,
                                num_na=num_na,
                                failures=failures).complete()
Exemple #3
0
def test_raising_exception():
    summary = run_validator(IReturnValues, "raising_exception")
    assert summary.exc_type.startswith("ValueError")
    assert summary.exc_traceback is not None
    assert summary.num_passing is None
    assert summary.num_na is None
    assert summary.num_allowed_to_fail is None
    assert summary.failures is None
def test_bad_related_names(caplog):
    summary = run_validator(RelatedFields, "bad_related_names")
    assert summary == SummaryEx(num_passing=20).complete()
    for name, level, message in caplog.record_tuples:
        if name == "datavalidation" and level == logging.WARNING:
            break
    else:
        assert False, "expected a warning about related names"
def test_second_database():
    failure = SecondDatabase.objects.generate(failing=1)
    summary = run_validator(SecondDatabase, method_name="test_foobar")
    assert summary == SummaryEx(num_passing=20, num_na=0,
                                failures=failure).complete()
    # assert the FailingObject table was updated
    ct = ContentType.objects.get_for_model(SecondDatabase)
    assert FailingObject.objects.using("default").filter(
        content_type=ct).count() == 1
Exemple #6
0
def test_allowed_to_fail():
    failures = IReturnValues.objects.generate(failing=10)
    IReturnValues.objects.generate(na=5)
    summary = run_validator(IReturnValues, "allowed_to_fail")
    assert summary == SummaryEx(
        num_passing=20,
        num_na=5,
        num_allowed_to_fail=10,
        failures=failures,
    ).complete()
Exemple #7
0
def test_returning_bool(num_failing, num_na):
    failures = CReturnValues.objects.generate(failing=num_failing)
    CReturnValues.objects.generate(na=num_na)
    summary = run_validator(CReturnValues, "returning_bool")
    expected_status = SummaryEx(failures=failures).complete().status
    assert summary == SummaryEx(
        status=expected_status,
        num_passing=None,
        num_na=None,
        num_allowed_to_fail=None,
    ).complete()
Exemple #8
0
def test_returning_summary():
    summary = run_validator(CReturnValues, "returning_summary")
    assert summary == SummaryEx.from_summary(
        Summary(num_passing=20, num_na=0, failures=[])).complete()
Exemple #9
0
def test_returning_list_of_model_ids(num_failing, num_na):
    failures = CReturnValues.objects.generate(failing=num_failing)
    CReturnValues.objects.generate(na=num_na)
    summary = run_validator(CReturnValues, "returning_list_of_model_ids")
    assert summary == SummaryEx.from_return_value(failures).complete()
def test_useless_select_related(caplog):
    summary = run_validator(RelatedFields, "useless_select_related")
    print(summary.__dict__)
    assert summary == SummaryEx.from_return_value(PASS).complete()
    assert_no_warnings(caplog)
def test_select_related_m2m(caplog):
    summary = run_validator(RelatedFields, "prefetch_related_m2m")
    assert summary == SummaryEx(num_passing=20).complete()
    assert_no_warnings(caplog)