def test_schemaregistry_basic_forwards_compatibility(): """ Forward compatibility: A new schema is forward compatible if the previous schema can read data written in this schema. """ msg = "adding a field is a forward compatible change" res = ReaderWriterCompatibilityChecker().get_compatibility( schema1, schema2) assert res == SchemaCompatibilityResult.compatible(), msg msg = "adding a field is a forward compatible change" res = ReaderWriterCompatibilityChecker().get_compatibility( schema1, schema3) assert res == SchemaCompatibilityResult.compatible(), msg msg = "adding a field is a forward compatible change" res = ReaderWriterCompatibilityChecker().get_compatibility( schema2, schema3) assert res == SchemaCompatibilityResult.compatible(), msg msg = "adding a field is a forward compatible change" res = ReaderWriterCompatibilityChecker().get_compatibility( schema3, schema2) assert res == SchemaCompatibilityResult.compatible(), msg msg = "removing a default is not a transitively compatible change" # Only schema 2 is checked! # res = ReaderWriterCompatibilityChecker().get_compatibility(schema3, schema1) # assert res != SchemaCompatibilityResult.compatible(), msg res = ReaderWriterCompatibilityChecker().get_compatibility( schema2, schema1) assert res == SchemaCompatibilityResult.compatible(), msg
def test_schema_compatibility_missing_union_branch(): incompatible_pairs = [ (INT_UNION_SCHEMA, INT_STRING_UNION_SCHEMA, {"reader union lacking writer type: STRING"}, {"/1"}), (STRING_UNION_SCHEMA, INT_STRING_UNION_SCHEMA, {"reader union lacking writer type: INT"}, {"/0"}), (INT_UNION_SCHEMA, UNION_INT_RECORD1, {"reader union lacking writer type: RECORD"}, {"/1"}), (INT_UNION_SCHEMA, UNION_INT_RECORD2, {"reader union lacking writer type: RECORD"}, {"/1"}), (UNION_INT_RECORD1, UNION_INT_RECORD2, {"reader union lacking writer type: RECORD"}, {"/1"}), (INT_UNION_SCHEMA, UNION_INT_ENUM1_AB, {"reader union lacking writer type: ENUM"}, {"/1"}), (INT_UNION_SCHEMA, UNION_INT_FIXED_4_BYTES, {"reader union lacking writer type: FIXED"}, {"/1"}), (INT_UNION_SCHEMA, UNION_INT_BOOLEAN, {"reader union lacking writer type: BOOLEAN"}, {"/1"}), (INT_UNION_SCHEMA, LONG_UNION_SCHEMA, {"reader union lacking writer type: LONG"}, {"/0"}), (INT_UNION_SCHEMA, FLOAT_UNION_SCHEMA, {"reader union lacking writer type: FLOAT"}, {"/0"}), (INT_UNION_SCHEMA, DOUBLE_UNION_SCHEMA, {"reader union lacking writer type: DOUBLE"}, {"/0"}), (INT_UNION_SCHEMA, BYTES_UNION_SCHEMA, {"reader union lacking writer type: BYTES"}, {"/0"}), (INT_UNION_SCHEMA, UNION_INT_ARRAY_INT, {"reader union lacking writer type: ARRAY"}, {"/1"}), (INT_UNION_SCHEMA, UNION_INT_MAP_INT, {"reader union lacking writer type: MAP"}, {"/1"}), (INT_UNION_SCHEMA, UNION_INT_NULL, {"reader union lacking writer type: NULL"}, {"/1"}), ( INT_UNION_SCHEMA, INT_LONG_FLOAT_DOUBLE_UNION_SCHEMA, { "reader union lacking writer type: LONG", "reader union lacking writer type: FLOAT", "reader union lacking writer type: DOUBLE" }, {"/1", "/2", "/3"} ), ( A_DINT_B_DINT_UNION_RECORD1, A_DINT_B_DINT_STRING_UNION_RECORD1, {"reader union lacking writer type: STRING"}, {"/fields/1/type/1"} ), ] for (reader, writer, message, location) in incompatible_pairs: result = ReaderWriterCompatibilityChecker().get_compatibility(reader, writer) assert result.compatibility is SchemaCompatibilityType.incompatible assert result.messages == message assert result.locations == location
def test_schema_compatibility_reader_field_missing_default_value(): incompatible_pairs = [ (A_INT_RECORD1, EMPTY_RECORD1, "a", "/fields/0"), (A_INT_B_DINT_RECORD1, EMPTY_RECORD1, "a", "/fields/0"), ] for (reader, writer, message, location) in incompatible_pairs: result = ReaderWriterCompatibilityChecker().get_compatibility(reader, writer) assert result.compatibility is SchemaCompatibilityType.incompatible assert len(result.messages) == 1 and len(result.locations) == 1 assert message == "".join(result.messages) assert location == "".join(result.locations)
def test_schema_compatibility_name_mismatch(): incompatible_pairs = [(ENUM1_AB_SCHEMA, ENUM2_AB_SCHEMA, "expected: Enum2", "/name"), (EMPTY_RECORD2, EMPTY_RECORD1, "expected: Record1", "/name"), (FIXED_4_BYTES, FIXED_4_ANOTHER_NAME, "expected: AnotherName", "/name"), (A_DINT_B_DENUM_1_RECORD1, A_DINT_B_DENUM_2_RECORD1, "expected: Enum2", "/fields/1/type/name")] for (reader, writer, message, location) in incompatible_pairs: result = ReaderWriterCompatibilityChecker().get_compatibility(reader, writer) assert result.compatibility is SchemaCompatibilityType.incompatible assert message in result.messages assert location in result.locations
def test_schema_compatibility_missing_enum_symbols(): incompatible_pairs = [ # str(set) representation (ENUM1_AB_SCHEMA, ENUM1_ABC_SCHEMA, "{'C'}", "/symbols"), (ENUM1_BC_SCHEMA, ENUM1_ABC_SCHEMA, "{'A'}", "/symbols"), (RECORD1_WITH_ENUM_AB, RECORD1_WITH_ENUM_ABC, "{'C'}", "/fields/0/type/symbols"), ] for (reader, writer, message, location) in incompatible_pairs: result = ReaderWriterCompatibilityChecker().get_compatibility(reader, writer) assert result.compatibility is SchemaCompatibilityType.incompatible assert message in result.messages assert location in result.locations
def test_schema_compatibility_fixed_size_mismatch(): incompatible_fixed_pairs = [ (FIXED_4_BYTES, FIXED_8_BYTES, "expected: 8, found: 4", "/size"), (FIXED_8_BYTES, FIXED_4_BYTES, "expected: 4, found: 8", "/size"), (A_DINT_B_DFIXED_8_BYTES_RECORD1, A_DINT_B_DFIXED_4_BYTES_RECORD1, "expected: 4, found: 8", "/fields/1/type/size"), (A_DINT_B_DFIXED_4_BYTES_RECORD1, A_DINT_B_DFIXED_8_BYTES_RECORD1, "expected: 8, found: 4", "/fields/1/type/size"), ] for (reader, writer, message, location) in incompatible_fixed_pairs: result = ReaderWriterCompatibilityChecker().get_compatibility(reader, writer) assert result.compatibility is SchemaCompatibilityType.incompatible assert location in result.locations, f"expected {location}, found {result.location}" assert message in result.messages, f"expected {message}, found {result.message}"
def test_schemaregistry_basic_backwards_transitive_compatibility(): """ Backward transitive compatibility: A new schema is backward compatible if it can be used to read the data written in all previous schemas. """ msg = "iteratively adding fields with defaults is a compatible change" res = ReaderWriterCompatibilityChecker().get_compatibility( schema8, schema1) assert res == SchemaCompatibilityResult.compatible(), msg res = ReaderWriterCompatibilityChecker().get_compatibility( schema8, schema2) assert res == SchemaCompatibilityResult.compatible(), msg msg = "adding a field with default is a backward compatible change" res = ReaderWriterCompatibilityChecker().get_compatibility( schema2, schema1) assert res == SchemaCompatibilityResult.compatible(), msg msg = "removing a default is a compatible change, but not transitively" res = ReaderWriterCompatibilityChecker().get_compatibility( schema3, schema2) assert res == SchemaCompatibilityResult.compatible(), msg msg = "removing a default is not a transitively compatible change" res = ReaderWriterCompatibilityChecker().get_compatibility( schema3, schema2) assert res == SchemaCompatibilityResult.compatible(), msg res = ReaderWriterCompatibilityChecker().get_compatibility( schema3, schema1) assert res != SchemaCompatibilityResult.compatible(), msg
def test_basic_full_compatibility(): """Full compatibility: A new schema is fully compatible if it’s both backward and forward compatible.""" msg = "adding a field with default is a backward and a forward compatible change" res = ReaderWriterCompatibilityChecker().get_compatibility( schema2, schema1) assert res == SchemaCompatibilityResult.compatible(), msg res = ReaderWriterCompatibilityChecker().get_compatibility( schema1, schema2) assert res == SchemaCompatibilityResult.compatible(), msg msg = "transitively adding a field without a default is not a compatible change" # Only schema 2 is checked! # res = ReaderWriterCompatibilityChecker().get_compatibility(schema3, schema1) # assert res != SchemaCompatibilityResult.compatible(), msg # res = ReaderWriterCompatibilityChecker().get_compatibility(schema1, schema3) # assert res != SchemaCompatibilityResult.compatible(), msg res = ReaderWriterCompatibilityChecker().get_compatibility( schema3, schema2) assert res == SchemaCompatibilityResult.compatible(), msg res = ReaderWriterCompatibilityChecker().get_compatibility( schema2, schema3) assert res == SchemaCompatibilityResult.compatible(), msg msg = "transitively removing a field without a default is not a compatible change" # Only schema 2 is checked! # res = ReaderWriterCompatibilityChecker().get_compatibility(schema1, schema3) # assert res == SchemaCompatibilityResult.compatible(), msg # res = ReaderWriterCompatibilityChecker().get_compatibility(schema3, schema1) # assert res == SchemaCompatibilityResult.compatible(), msg res = ReaderWriterCompatibilityChecker().get_compatibility( schema1, schema2) assert res == SchemaCompatibilityResult.compatible(), msg res = ReaderWriterCompatibilityChecker().get_compatibility( schema1, schema2) assert res == SchemaCompatibilityResult.compatible(), msg
def test_schemaregistry_basic_forwards_transitive_compatibility(): """ Forward transitive compatibility: A new schema is forward compatible if all previous schemas can read data written in this schema. """ msg = "iteratively removing fields with defaults is a compatible change" res = ReaderWriterCompatibilityChecker().get_compatibility( schema8, schema1) assert res == SchemaCompatibilityResult.compatible(), msg res = ReaderWriterCompatibilityChecker().get_compatibility( schema2, schema1) assert res == SchemaCompatibilityResult.compatible(), msg msg = "adding default to a field is a compatible change" res = ReaderWriterCompatibilityChecker().get_compatibility( schema3, schema2) assert res == SchemaCompatibilityResult.compatible(), msg msg = "removing a field with a default is a compatible change" res = ReaderWriterCompatibilityChecker().get_compatibility( schema2, schema1) assert res == SchemaCompatibilityResult.compatible(), msg msg = "removing a default is not a transitively compatible change" res = ReaderWriterCompatibilityChecker().get_compatibility( schema2, schema1) assert res == SchemaCompatibilityResult.compatible(), msg res = ReaderWriterCompatibilityChecker().get_compatibility( schema3, schema1) assert res != SchemaCompatibilityResult.compatible(), msg
def test_schema_compatibility_type_mismatch(): incompatible_pairs = [ (NULL_SCHEMA, INT_SCHEMA, "reader type: NULL not compatible with writer type: INT", "/"), (NULL_SCHEMA, LONG_SCHEMA, "reader type: NULL not compatible with writer type: LONG", "/"), (BOOLEAN_SCHEMA, INT_SCHEMA, "reader type: BOOLEAN not compatible with writer type: INT", "/"), (INT_SCHEMA, NULL_SCHEMA, "reader type: INT not compatible with writer type: NULL", "/"), (INT_SCHEMA, BOOLEAN_SCHEMA, "reader type: INT not compatible with writer type: BOOLEAN", "/"), (INT_SCHEMA, LONG_SCHEMA, "reader type: INT not compatible with writer type: LONG", "/"), (INT_SCHEMA, FLOAT_SCHEMA, "reader type: INT not compatible with writer type: FLOAT", "/"), (INT_SCHEMA, DOUBLE_SCHEMA, "reader type: INT not compatible with writer type: DOUBLE", "/"), (LONG_SCHEMA, FLOAT_SCHEMA, "reader type: LONG not compatible with writer type: FLOAT", "/"), (LONG_SCHEMA, DOUBLE_SCHEMA, "reader type: LONG not compatible with writer type: DOUBLE", "/"), (FLOAT_SCHEMA, DOUBLE_SCHEMA, "reader type: FLOAT not compatible with writer type: DOUBLE", "/"), (DOUBLE_SCHEMA, STRING_SCHEMA, "reader type: DOUBLE not compatible with writer type: STRING", "/"), (FIXED_4_BYTES, STRING_SCHEMA, "reader type: FIXED not compatible with writer type: STRING", "/"), (STRING_SCHEMA, BOOLEAN_SCHEMA, "reader type: STRING not compatible with writer type: BOOLEAN", "/"), (STRING_SCHEMA, INT_SCHEMA, "reader type: STRING not compatible with writer type: INT", "/"), (BYTES_SCHEMA, NULL_SCHEMA, "reader type: BYTES not compatible with writer type: NULL", "/"), (BYTES_SCHEMA, INT_SCHEMA, "reader type: BYTES not compatible with writer type: INT", "/"), (A_INT_RECORD1, INT_SCHEMA, "reader type: RECORD not compatible with writer type: INT", "/"), (INT_ARRAY_SCHEMA, LONG_ARRAY_SCHEMA, "reader type: INT not compatible with writer type: LONG", "/items"), (INT_MAP_SCHEMA, INT_ARRAY_SCHEMA, "reader type: MAP not compatible with writer type: ARRAY", "/"), (INT_ARRAY_SCHEMA, INT_MAP_SCHEMA, "reader type: ARRAY not compatible with writer type: MAP", "/"), (INT_MAP_SCHEMA, LONG_MAP_SCHEMA, "reader type: INT not compatible with writer type: LONG", "/values"), (INT_SCHEMA, ENUM2_AB_SCHEMA, "reader type: INT not compatible with writer type: ENUM", "/"), (ENUM2_AB_SCHEMA, INT_SCHEMA, "reader type: ENUM not compatible with writer type: INT", "/"), ( FLOAT_SCHEMA, INT_LONG_FLOAT_DOUBLE_UNION_SCHEMA, "reader type: FLOAT not compatible with writer type: DOUBLE", "/" ), (LONG_SCHEMA, INT_FLOAT_UNION_SCHEMA, "reader type: LONG not compatible with writer type: FLOAT", "/"), (INT_SCHEMA, INT_FLOAT_UNION_SCHEMA, "reader type: INT not compatible with writer type: FLOAT", "/"), (INT_LIST_RECORD, LONG_LIST_RECORD, "reader type: INT not compatible with writer type: LONG", "/fields/0/type"), (NULL_SCHEMA, INT_SCHEMA, "reader type: NULL not compatible with writer type: INT", "/"), ] for (reader, writer, message, location) in incompatible_pairs: result = ReaderWriterCompatibilityChecker().get_compatibility(reader, writer) assert result.compatibility is SchemaCompatibilityType.incompatible assert message in result.messages assert location in result.locations
def test_schemaregistry_basic_backwards_compatibility(): """ Backward compatibility: A new schema is backward compatible if it can be used to read the data written in the previous schema. """ msg = "adding a field with default is a backward compatible change" res = ReaderWriterCompatibilityChecker().get_compatibility( schema2, schema1) assert res == SchemaCompatibilityResult.compatible(), msg msg = "adding a field w/o default is NOT a backward compatible change" res = ReaderWriterCompatibilityChecker().get_compatibility( schema3, schema1) assert res != SchemaCompatibilityResult.compatible(), msg msg = "changing field name with alias is a backward compatible change" res = ReaderWriterCompatibilityChecker().get_compatibility( schema4, schema1) assert res == SchemaCompatibilityResult.compatible(), msg msg = "evolving a field type to a union is a backward compatible change" res = ReaderWriterCompatibilityChecker().get_compatibility( schema6, schema1) assert res == SchemaCompatibilityResult.compatible(), msg msg = "removing a type from a union is NOT a backward compatible change" res = ReaderWriterCompatibilityChecker().get_compatibility( schema1, schema6) assert res != SchemaCompatibilityResult.compatible(), msg msg = "adding a new type in union is a backward compatible change" res = ReaderWriterCompatibilityChecker().get_compatibility( schema7, schema6) assert res == SchemaCompatibilityResult.compatible(), msg msg = "removing a type from a union is NOT a backward compatible change" res = ReaderWriterCompatibilityChecker().get_compatibility( schema6, schema7) assert res != SchemaCompatibilityResult.compatible(), msg
def test_simple_schema_promotion(): reader = parse_json_ignore_trailing( json.dumps({ "name": "foo", "type": "record", "fields": [{ "type": "int", "name": "f1" }] })) field_alias_reader = parse_json_ignore_trailing( json.dumps({ "name": "foo", "type": "record", "fields": [{ "type": "int", "name": "bar", "aliases": ["f1"] }] })) record_alias_reader = parse_json_ignore_trailing( json.dumps({ "name": "other", "type": "record", "fields": [{ "type": "int", "name": "f1" }], "aliases": ["foo"] })) writer = parse_json_ignore_trailing( json.dumps({ "name": "foo", "type": "record", "fields": [{ "type": "int", "name": "f1" }, { "type": "string", "name": "f2", }] })) # alias testing res = ReaderWriterCompatibilityChecker().get_compatibility( field_alias_reader, writer) assert res.compatibility is SchemaCompatibilityType.compatible, res.locations res = ReaderWriterCompatibilityChecker().get_compatibility( record_alias_reader, writer) assert res.compatibility is SchemaCompatibilityType.compatible, res.locations res = ReaderWriterCompatibilityChecker().get_compatibility(reader, writer) assert res == SchemaCompatibilityResult.compatible(), res res = ReaderWriterCompatibilityChecker().get_compatibility(writer, reader) assert res != SchemaCompatibilityResult.compatible(), res writer = parse_json_ignore_trailing( json.dumps({ "type": "record", "name": "CA", "namespace": "ns1", "fields": [{ "type": "string", "name": "provider" }, { "type": ["null", "string"], "name": "name", "default": None }, { "type": ["null", "string"], "name": "phone", "default": None }, { "type": ["null", "string"], "name": "email", "default": None }, { "type": ["null", "string"], "name": "reference", "default": None }, { "type": ["null", "double"], "name": "price", "default": None }] })) reader = parse_json_ignore_trailing( json.dumps({ "type": "record", "name": "CA", "namespace": "ns1", "fields": [{ "type": "string", "name": "provider" }, { "type": ["null", "string"], "name": "name", "default": None }, { "type": ["null", "string"], "name": "phone", "default": None }, { "type": ["null", "string"], "name": "email", "default": None }, { "type": ["null", "string"], "name": "reference", "default": None }, { "type": ["null", "double"], "name": "price", "default": None }, { "type": ["null", "string"], "name": "status_date", "default": None }] })) res = ReaderWriterCompatibilityChecker().get_compatibility(writer=writer, reader=reader) assert res == SchemaCompatibilityResult.compatible(), res
def test_basic_full_transitive_compatibility(): """ Full transitive compatibility: A new schema is fully compatible if it’s both transitively backward and transitively forward compatible with the entire schema history. """ msg = "iteratively adding fields with defaults is a compatible change" res = ReaderWriterCompatibilityChecker().get_compatibility( schema8, schema1) assert res == SchemaCompatibilityResult.compatible(), msg res = ReaderWriterCompatibilityChecker().get_compatibility( schema1, schema8) assert res == SchemaCompatibilityResult.compatible(), msg res = ReaderWriterCompatibilityChecker().get_compatibility( schema8, schema2) assert res == SchemaCompatibilityResult.compatible(), msg res = ReaderWriterCompatibilityChecker().get_compatibility( schema2, schema8) assert res == SchemaCompatibilityResult.compatible(), msg msg = "iteratively removing fields with defaults is a compatible change" res = ReaderWriterCompatibilityChecker().get_compatibility( schema1, schema8) assert res == SchemaCompatibilityResult.compatible(), msg res = ReaderWriterCompatibilityChecker().get_compatibility( schema8, schema1) assert res == SchemaCompatibilityResult.compatible(), msg res = ReaderWriterCompatibilityChecker().get_compatibility( schema1, schema2) assert res == SchemaCompatibilityResult.compatible(), msg res = ReaderWriterCompatibilityChecker().get_compatibility( schema2, schema1) assert res == SchemaCompatibilityResult.compatible(), msg msg = "adding default to a field is a compatible change" res = ReaderWriterCompatibilityChecker().get_compatibility( schema2, schema3) assert res == SchemaCompatibilityResult.compatible(), msg res = ReaderWriterCompatibilityChecker().get_compatibility( schema3, schema2) assert res == SchemaCompatibilityResult.compatible(), msg msg = "removing a field with a default is a compatible change" res = ReaderWriterCompatibilityChecker().get_compatibility( schema1, schema2) assert res == SchemaCompatibilityResult.compatible(), msg res = ReaderWriterCompatibilityChecker().get_compatibility( schema2, schema1) assert res == SchemaCompatibilityResult.compatible(), msg msg = "adding a field with default is a compatible change" res = ReaderWriterCompatibilityChecker().get_compatibility( schema2, schema1) assert res == SchemaCompatibilityResult.compatible(), msg res = ReaderWriterCompatibilityChecker().get_compatibility( schema1, schema2) assert res == SchemaCompatibilityResult.compatible(), msg msg = "removing a default from a field compatible change" res = ReaderWriterCompatibilityChecker().get_compatibility( schema3, schema2) assert res == SchemaCompatibilityResult.compatible(), msg res = ReaderWriterCompatibilityChecker().get_compatibility( schema2, schema3) assert res == SchemaCompatibilityResult.compatible(), msg msg = "transitively adding a field without a default is not a compatible change" res = ReaderWriterCompatibilityChecker().get_compatibility( schema3, schema2) assert res == SchemaCompatibilityResult.compatible(), msg res = ReaderWriterCompatibilityChecker().get_compatibility( schema3, schema1) assert res != SchemaCompatibilityResult.compatible(), msg res = ReaderWriterCompatibilityChecker().get_compatibility( schema2, schema3) assert res == SchemaCompatibilityResult.compatible(), msg res = ReaderWriterCompatibilityChecker().get_compatibility( schema1, schema3) assert res == SchemaCompatibilityResult.compatible(), msg msg = "transitively removing a field without a default is not a compatible change" res = ReaderWriterCompatibilityChecker().get_compatibility( schema1, schema2) assert res == SchemaCompatibilityResult.compatible(), msg res = ReaderWriterCompatibilityChecker().get_compatibility( schema1, schema3) assert res == SchemaCompatibilityResult.compatible(), msg res = ReaderWriterCompatibilityChecker().get_compatibility( schema2, schema1) assert res == SchemaCompatibilityResult.compatible(), msg res = ReaderWriterCompatibilityChecker().get_compatibility( schema3, schema1) assert res != SchemaCompatibilityResult.compatible(), msg
def are_compatible(reader: Schema, writer: Schema) -> bool: return ReaderWriterCompatibilityChecker().get_compatibility( reader, writer).compatibility is SchemaCompatibilityType.compatible
def test_simple_schema_promotion(): reader = parse(json.dumps({"name": "foo", "type": "record", "fields": [{"type": "int", "name": "f1"}]})) writer = parse( json.dumps({ "name": "foo", "type": "record", "fields": [{ "type": "int", "name": "f1" }, { "type": "string", "name": "f2" }] }) ) res = ReaderWriterCompatibilityChecker().get_compatibility(reader, writer) assert res.compatibility is SchemaCompatibilityType.compatible, res res = ReaderWriterCompatibilityChecker().get_compatibility(writer, reader) assert res.compatibility is SchemaCompatibilityType.incompatible, res writer = parse( json.dumps({ "type": "record", "name": "CA", "namespace": "ns1", "fields": [{ "type": "string", "name": "provider" }, { "type": ["null", "string"], "name": "name", "default": None }, { "type": ["null", "string"], "name": "phone", "default": None }, { "type": ["null", "string"], "name": "email", "default": None }, { "type": ["null", "string"], "name": "reference", "default": None }, { "type": ["null", "double"], "name": "price", "default": None }] }) ) reader = parse( json.dumps({ "type": "record", "name": "CA", "namespace": "ns1", "fields": [{ "type": "string", "name": "provider" }, { "type": ["null", "string"], "name": "name", "default": None }, { "type": ["null", "string"], "name": "phone", "default": None }, { "type": ["null", "string"], "name": "email", "default": None }, { "type": ["null", "string"], "name": "reference", "default": None }, { "type": ["null", "double"], "name": "price", "default": None }, { "type": ["null", "string"], "name": "status_date", "default": None }] }) ) res = ReaderWriterCompatibilityChecker().get_compatibility(writer=writer, reader=reader) assert res.compatibility is SchemaCompatibilityType.compatible, res