def check_simple_subschema( simplified_reader_schema, simplified_writer_schema, original_reader_type, original_writer_type, location ) -> SchemaCompatibilityResult: rec_result = compatibility_rec(simplified_reader_schema, simplified_writer_schema, location) if is_compatible(rec_result): return rec_result return type_mismatch(original_reader_type, original_writer_type, location)
def count_uniquely_compatible_schemas(reader_type: Instance, reader_schema, writer_schema, location: List[str]) -> int: # allOf/anyOf/oneOf subschemas do not enforce order, as a consequence the # new schema may change the order of the entries without breaking # compatibility. # # Subschemas of these keywords evaluate the instance completely # independently such that the results of one such subschema MUST NOT # impact the results of sibling subschemas. Therefore subschemas may be # applied in any order. # # https://json-schema.org/draft/2020-12/json-schema-core.html#rfc.section.10.2 @dataclass(unsafe_hash=True, frozen=True) class Node: reader: str pos: int reader_node_schema = [(Node("reader", reader_pos), schema) for reader_pos, schema in enumerate(reader_schema)] writer_node_schema = [(Node("writer", writer_pos), schema) for writer_pos, schema in enumerate(writer_schema)] compatible_edges = list() top_nodes = set() for (reader_node, reader_subschema), (writer_node, writer_subschema) in product( reader_node_schema, writer_node_schema): rec_result = compatibility_rec( reader_subschema, writer_subschema, location + [reader_type.value, str(writer_node.pos)]) if is_compatible(rec_result): top_nodes.add(reader_node) compatible_edges.append((reader_node, writer_node)) compatibility_graph = nx.Graph(compatible_edges) matching = nx.algorithms.bipartite.maximum_matching( compatibility_graph, top_nodes) # Dividing by two because the result has the arrows for both directions assert len(matching) % 2 == 0, "the length of the matching must be even" return len(matching) // 2
def compatibility_subschemas(reader_schema, writer_schema, location: List[str]) -> SchemaCompatibilityResult: # https://json-schema.org/draft/2020-12/json-schema-core.html#rfc.section.10 # pylint: disable=too-many-return-statements reader_subschemas_and_type = maybe_get_subschemas_and_type(reader_schema) writer_subschemas_and_type = maybe_get_subschemas_and_type(writer_schema) reader_subschemas: Optional[List[Any]] reader_type: JSONSCHEMA_TYPES if reader_subschemas_and_type is not None: reader_subschemas = reader_subschemas_and_type[0] reader_type = reader_subschemas_and_type[1] reader_has_subschema = reader_type in (Subschema.ALL_OF, Subschema.ANY_OF, Subschema.ONE_OF) else: reader_subschemas = None reader_type = get_type_of(reader_schema) reader_has_subschema = False writer_subschemas: Optional[List[Any]] writer_type: JSONSCHEMA_TYPES if writer_subschemas_and_type is not None: writer_subschemas = writer_subschemas_and_type[0] writer_type = writer_subschemas_and_type[1] writer_has_subschema = writer_type in (Subschema.ALL_OF, Subschema.ANY_OF, Subschema.ONE_OF) else: writer_subschemas = None writer_type = get_type_of(writer_schema) writer_has_subschema = False is_reader_special_case = reader_has_subschema and not writer_has_subschema and is_simple_subschema(reader_schema) is_writer_special_case = not reader_has_subschema and writer_has_subschema and is_simple_subschema(writer_schema) subschema_location = location + [get_name_of(reader_type)] if is_reader_special_case: assert reader_subschemas return check_simple_subschema(reader_subschemas[0], writer_schema, reader_type, writer_type, subschema_location) if is_writer_special_case: assert writer_subschemas return check_simple_subschema(reader_schema, writer_subschemas[0], reader_type, writer_type, subschema_location) if reader_type in (Subschema.ANY_OF, Subschema.ONE_OF) and not writer_has_subschema: assert isinstance(reader_type, Subschema) for reader_subschema in reader_schema[reader_type.value]: rec_result = compatibility_rec(reader_subschema, writer_schema, subschema_location) if is_compatible(rec_result): return rec_result return type_mismatch(reader_type, writer_type, subschema_location) if reader_subschemas is not None and writer_subschemas is not None: if reader_type not in (Subschema.ANY_OF, writer_type): return SchemaCompatibilityResult.incompatible( Incompatibility.combined_type_changed, message=f"incompatible subschema change, from {reader_type} to {writer_type}", location=subschema_location, ) len_reader_subschemas = len(reader_subschemas) len_writer_subschemas = len(writer_subschemas) if reader_type == Subschema.ALL_OF and len_writer_subschemas < len_reader_subschemas: msg = ( f"Not all required schemas were provided, number of required " f"schemas increased from {len_writer_subschemas} to " f"{len_reader_subschemas}" ) return SchemaCompatibilityResult.incompatible( Incompatibility.product_type_extended, message=msg, location=subschema_location, ) # oneOf/anyOf differ on annotation collection not validation. if reader_type in (Subschema.ANY_OF, Subschema.ONE_OF) and len_writer_subschemas > len_reader_subschemas: msg = ( f"Not all schemas are accepted, number of schemas " f"reduced from {len_writer_subschemas} to " f"{len_reader_subschemas}" ) return SchemaCompatibilityResult.incompatible( Incompatibility.sum_type_narrowed, message=msg, location=subschema_location, ) if reader_type == Subschema.ALL_OF: qty_of_required_compatible_subschemas = len_reader_subschemas else: qty_of_required_compatible_subschemas = len_writer_subschemas compatible_schemas_count = count_uniquely_compatible_schemas( reader_type, reader_subschemas, writer_subschemas, subschema_location, ) if compatible_schemas_count < qty_of_required_compatible_subschemas: return SchemaCompatibilityResult.incompatible( Incompatibility.combined_type_subschemas_changed, message="subschemas are incompatible", location=subschema_location, ) return SchemaCompatibilityResult.compatible() return type_mismatch(reader_type, writer_type, subschema_location)