예제 #1
0
파일: tests.py 프로젝트: jkroman2/cove-ocds
def test_get_releases_aggregates():
    assert get_releases_aggregates({}) == EMPTY_RELEASE_AGGREGATE
    assert get_releases_aggregates({"releases": []}) == EMPTY_RELEASE_AGGREGATE
    release_aggregate_3_empty = EMPTY_RELEASE_AGGREGATE.copy()
    release_aggregate_3_empty["release_count"] = 3
    assert (get_releases_aggregates({"releases":
                                     [{}, {},
                                      {}]}) == release_aggregate_3_empty)

    with open(os.path.join("cove_ocds", "fixtures",
                           "release_aggregate.json")) as fp:
        data = json.load(fp)

    assert (get_releases_aggregates({"releases": data["releases"]
                                     }) == EXPECTED_RELEASE_AGGREGATE)

    # test if a release is duplicated
    actual = get_releases_aggregates(
        {"releases": data["releases"] + data["releases"]})
    actual_cleaned = {key: actual[key] for key in actual if "doc" not in key}
    actual_cleaned.pop("contracts_without_awards")

    expected_cleaned = {
        key: EXPECTED_RELEASE_AGGREGATE[key]
        for key in EXPECTED_RELEASE_AGGREGATE if "doc" not in key
    }
    expected_cleaned["tags"] = {"planning": 2, "tender": 2}
    expected_cleaned.pop("contracts_without_awards")
    expected_cleaned["release_count"] = 2
    expected_cleaned["duplicate_release_ids"] = ["1"]

    assert actual_cleaned == expected_cleaned

    with open(os.path.join("cove_ocds", "fixtures",
                           "samplerubbish.json")) as fp:
        data = json.load(fp)

    actual = get_releases_aggregates(data)
    actual_cleaned = {
        key: actual[key]
        for key in actual if isinstance(actual[key], (str, int, float))
    }

    assert actual_cleaned == EXPECTED_RELEASE_AGGREGATE_RANDOM

    with open(os.path.join("cove_ocds", "fixtures", "badfile.json")) as fp:
        data = json.load(fp)

    actual = get_releases_aggregates(data, ignore_errors=True)

    assert actual == {}
예제 #2
0
def test_get_releases_aggregates():
    assert get_releases_aggregates({}) == EMPTY_RELEASE_AGGREGATE
    assert get_releases_aggregates({'releases': []}) == EMPTY_RELEASE_AGGREGATE
    release_aggregate_3_empty = EMPTY_RELEASE_AGGREGATE.copy()
    release_aggregate_3_empty['release_count'] = 3
    assert get_releases_aggregates({'releases': [{}, {}, {}]}) == release_aggregate_3_empty

    with open(os.path.join('cove_ocds', 'fixtures', 'release_aggregate.json')) as fp:
        data = json.load(fp)

    assert get_releases_aggregates({'releases': data['releases']}) == EXPECTED_RELEASE_AGGREGATE

    # test if a release is duplicated
    actual = get_releases_aggregates({'releases': data['releases'] + data['releases']})
    actual_cleaned = {key: actual[key] for key in actual if 'doc' not in key}
    actual_cleaned.pop('contracts_without_awards')

    expected_cleaned = {key: EXPECTED_RELEASE_AGGREGATE[key] for key in EXPECTED_RELEASE_AGGREGATE if 'doc' not in key}
    expected_cleaned['tags'] = {'planning': 2, 'tender': 2}
    expected_cleaned.pop('contracts_without_awards')
    expected_cleaned['release_count'] = 2
    expected_cleaned['duplicate_release_ids'] = ['1']

    assert actual_cleaned == expected_cleaned

    with open(os.path.join('cove_ocds', 'fixtures', 'samplerubbish.json')) as fp:
        data = json.load(fp)

    actual = get_releases_aggregates(data)
    actual_cleaned = {key: actual[key] for key in actual if isinstance(actual[key], (str, int, float))}

    assert actual_cleaned == EXPECTED_RELEASE_AGGREGATE_RANDOM

    with open(os.path.join('cove', 'fixtures', 'badfile.json')) as fp:
        data = json.load(fp)

    actual = get_releases_aggregates(data, ignore_errors=True)

    assert actual == {}
예제 #3
0
def test_get_releases_aggregates():
    assert get_releases_aggregates({}) == EMPTY_RELEASE_AGGREGATE
    assert get_releases_aggregates({'releases': []}) == EMPTY_RELEASE_AGGREGATE
    release_aggregate_3_empty = EMPTY_RELEASE_AGGREGATE.copy()
    release_aggregate_3_empty['release_count'] = 3
    assert get_releases_aggregates({'releases': [{}, {}, {}]}) == release_aggregate_3_empty

    with open(os.path.join('cove_ocds', 'fixtures', 'release_aggregate.json')) as fp:
        data = json.load(fp)

    assert get_releases_aggregates({'releases': data['releases']}) == EXPECTED_RELEASE_AGGREGATE

    # test if a release is duplicated
    actual = get_releases_aggregates({'releases': data['releases'] + data['releases']})
    actual_cleaned = {key: actual[key] for key in actual if 'doc' not in key}
    actual_cleaned.pop('contracts_without_awards')

    expected_cleaned = {key: EXPECTED_RELEASE_AGGREGATE[key] for key in EXPECTED_RELEASE_AGGREGATE if 'doc' not in key}
    expected_cleaned['tags'] = {'planning': 2, 'tender': 2}
    expected_cleaned.pop('contracts_without_awards')
    expected_cleaned['release_count'] = 2
    expected_cleaned['duplicate_release_ids'] = ['1']

    assert actual_cleaned == expected_cleaned

    with open(os.path.join('cove_ocds', 'fixtures', 'samplerubbish.json')) as fp:
        data = json.load(fp)

    actual = get_releases_aggregates(data)
    actual_cleaned = {key: actual[key] for key in actual if isinstance(actual[key], (str, int, float))}

    assert actual_cleaned == EXPECTED_RELEASE_AGGREGATE_RANDOM

    with open(os.path.join('cove_ocds', 'fixtures', 'badfile.json')) as fp:
        data = json.load(fp)

    actual = get_releases_aggregates(data, ignore_errors=True)

    assert actual == {}
예제 #4
0
def test_get_releases_aggregates_dict(json_data):
    assume(type(json_data) is dict)
    get_releases_aggregates(json_data)
예제 #5
0
def test_get_releases_aggregates(json_data):
    get_releases_aggregates(json_data)
def common_checks_ocds(context,
                       upload_dir,
                       json_data,
                       schema_obj,
                       api=False,
                       cache=True):
    schema_name = schema_obj.pkg_schema_name
    common_checks = common_checks_context(upload_dir,
                                          json_data,
                                          schema_obj,
                                          schema_name,
                                          context,
                                          fields_regex=True,
                                          api=api,
                                          cache=cache)
    validation_errors = common_checks["context"]["validation_errors"]

    new_validation_errors = []
    for (json_key, values) in validation_errors:
        error = json.loads(json_key)
        new_message = validation_error_lookup.get(error["message_type"])
        if new_message:
            error["message_safe"] = conditional_escape(new_message)
        else:
            if "message_safe" in error:
                error["message_safe"] = mark_safe(error["message_safe"])
            else:
                error["message_safe"] = conditional_escape(error["message"])

        schema_block, ref_info = lookup_schema(
            schema_obj.get_pkg_schema_obj(deref=True), error["path_no_number"])
        if schema_block and error["message_type"] != "required":
            if "description" in schema_block:
                error["schema_title"] = escape(schema_block.get("title", ""))
                error["schema_description_safe"] = mark_safe(
                    bleach.clean(md.render(schema_block["description"]),
                                 tags=bleach.sanitizer.ALLOWED_TAGS + ["p"]))
            if ref_info:
                ref = ref_info["reference"]["$ref"]
                if ref.endswith("release-schema.json"):
                    ref = ""
                else:
                    ref = ref.strip("#")
                ref_path = "/".join(ref_info["path"])
                schema = "release-schema.json"
            else:
                ref = ""
                ref_path = error["path_no_number"]
                schema = "release-package-schema.json"
            error["docs_ref"] = format_html("{},{},{}", schema, ref, ref_path)

        new_validation_errors.append(
            [json.dumps(error, sort_keys=True), values])
    common_checks["context"]["validation_errors"] = new_validation_errors

    context.update(common_checks["context"])

    if schema_name == "record-package-schema.json":
        context["records_aggregates"] = get_records_aggregates(
            json_data, ignore_errors=bool(validation_errors))
        # Do this for records, as there's no record-schema.json (this probably
        # causes problems for flatten-tool)
        context["schema_url"] = schema_obj.pkg_schema_url
    else:
        additional_codelist_values = get_additional_codelist_values(
            schema_obj, json_data)
        closed_codelist_values = {
            key: value
            for key, value in additional_codelist_values.items()
            if not value["isopen"]
        }
        open_codelist_values = {
            key: value
            for key, value in additional_codelist_values.items()
            if value["isopen"]
        }

        context.update({
            "releases_aggregates":
            get_releases_aggregates(json_data,
                                    ignore_errors=bool(validation_errors)),
            "additional_closed_codelist_values":
            closed_codelist_values,
            "additional_open_codelist_values":
            open_codelist_values,
        })

    additional_checks = run_additional_checks(json_data,
                                              TEST_CLASSES["additional"],
                                              ignore_errors=True,
                                              return_on_error=None)

    context.update({"additional_checks": additional_checks})

    context = add_conformance_rule_errors(context, json_data, schema_obj)
    return context