def prepare_operands(s1, s2): # First, we reload schemas using jsonref to resolve $ref # before starting canonicalization. # At the moment, we will get undefined behaviour for recursive/circual refs. s1 = jsonref.loads(json.dumps(s1)) s2 = jsonref.loads(json.dumps(s2)) # Canonicalize and embed checkers for both lhs # and rhs schemas before starting the subtype checking. # This also validates input schemas and canonicalized schemas. print_db("LHS", s1) print_db() s1 = simplify_schema_and_embed_checkers( canonicalize_schema(s1)) print_db("LHS_canonical", s1) print_db() print_db("RHS", s2) print_db() s2 = simplify_schema_and_embed_checkers( canonicalize_schema(s2)) print_db("RHS_canonical", s2) print_db() return s1, s2
def main(): json_schema_uri = 'https://raw.githubusercontent.com/biocompute-objects/BCO_Specification/1.3.1/schemas/biocomputeobject.json' parser = argparse.ArgumentParser() parser.add_argument('json_schema_uri', default=json_schema_uri, help="json schema uri") parser.add_argument('json', type=argparse.FileType('r'), help="json to validate") args = parser.parse_args() data = json.load(args.json) #schema = jsonref.load(args.json_schema_uri, jsonschema=True) schema = jsonref.loads(f'{{ "$ref": "{json_schema_uri}" }}', jsonschema=True) return jsonschema.validate(data, schema) # Load JSON Schema from the repository by using URL or local file using absolute path and 'file:' prefix schema = jsonref.loads(f'{{ "$ref": "{schema_uri}" }}', jsonschema=True) data = {} # Use the extended validator to fill in `data` with default values from the schema for err in DefaultValidatingDraft7Validator(schema).iter_errors(data): # print validation errors with a schema path to make them easier to read/trace print( f'{err.message} in the schema path {err.schema.get("$id") }#{"/".join(err.schema_path)}' ) # Pretty print `data` showing the assigned default values. print(json.dumps(data, indent=2))
def open_jsonref(fileUrl): # function to retrieve either a json file from url or from disk import jsonref import requests if fileUrl[0:4] == "http": # es URL pointer = requests.get(fileUrl) return jsonref.loads(pointer.content.decode('utf-8')) else: # es file file = open(fileUrl, "r") return jsonref.loads(file.read())
def item_type_schema(): """Item type Jsonschema for records.""" schema_in_bytes = resource_string( 'rero_ils.modules.item_types.jsonschemas', '/item_types/item_type-v0.0.1.json') schema = jsonref.loads(schema_in_bytes.decode('utf8')) return schema
def __init__( self, schema_filename=None, root_schema_dict=None, main_sheet_name="main", rollup=False, root_id="ocid", use_titles=False, ): self.sub_sheets = {} self.main_sheet = Sheet() self.sub_sheet_mapping = {} self.main_sheet_name = main_sheet_name self.rollup = rollup self.root_id = root_id self.use_titles = use_titles if root_schema_dict is None and schema_filename is None: raise ValueError("One of schema_filename or root_schema_dict must be supplied") if root_schema_dict is not None and schema_filename is not None: raise ValueError("Only one of schema_filename or root_schema_dict should be supplied") if schema_filename: if schema_filename.startswith("http"): import requests r = requests.get(schema_filename) self.root_schema_dict = jsonref.loads(r.text, object_pairs_hook=OrderedDict) else: with codecs.open(schema_filename, encoding="utf-8") as schema_file: self.root_schema_dict = jsonref.load(schema_file, object_pairs_hook=OrderedDict) else: self.root_schema_dict = root_schema_dict
def acq_account_schema(): """Acq account Jsonschema for records.""" schema_in_bytes = resource_string( 'rero_ils.modules.acq_accounts.jsonschemas', '/acq_accounts/acq_account-v0.0.1.json') schema = jsonref.loads(schema_in_bytes.decode('utf8')) return schema
def test_dumps(self): json = """[1, 2, {"$ref": "#/0"}, 3]""" loaded = loads(json) # The string version should load the reference assert str(loaded) == "[1, 2, 1, 3]" # Our dump function should write the original reference assert dumps(loaded) == json
def __init__(self, ui, schema, font_size=11, font_size_form=None, data=DATA): """ :param ui: object containing ui-schema :param schema: structure containing the schema """ self.ui = ui # Use jsonref.loads to resolve $ref in schema. If we want to use external # references we need external resolver. schema_resolve_external = OpenapiResolver(schema).resolve() self.schema = jsonref.loads(json.dumps(schema_resolve_external)) self.resolver = jsonschema.RefResolver.from_schema(self.schema) self.font_size = font_size self.font_size_form = font_size_form or font_size self.data = data or {} self.line_feed = 5 * self.font_size self.root = ET.Element("html") head = ET.SubElement(self.root, "head") style = ET.SubElement(head, "link", { "href": "form.css", "rel": "stylesheet" }) self.body = ET.SubElement(self.root, "body") self.form = ET.SubElement(self.body, "form", attrib={ "action": "", "method": "post" })
def test_list(): d = { 'foo': [ { "$ref": "#/definitions/user" } ], 'definitions': { 'user': { 'properties': { 'first_name': { 'type': 'string' } } } } } json_obj = jsonref.loads(json.dumps(d)) assert type(json_obj['foo'][0]) == jsonref.JsonRef post_process_spec( json_obj, on_container_callbacks=(replace_jsonref_proxies_callback,)) assert type(json_obj['foo'][0]) == dict assert d['definitions']['user'] == json_obj['foo'][0]
def notification_schema(): """Notifications Jsonschema for records.""" schema_in_bytes = resource_string( 'rero_ils.modules.notifications.jsonschemas', '/notifications/notification-v0.0.1.json') schema = jsonref.loads(schema_in_bytes.decode('utf8')) return schema
def _load_json_schema(self, filename): """ Loads the given schema file """ absolute_path = '{}/{}'.format(self.schema_location, filename) base_uri = 'file://{}/'.format(absolute_path) with open(absolute_path) as schema_file: schemas_file = schema_file.read() return jsonref.loads(schemas_file, base_uri=base_uri, jsonschema=True)
def __init__(self, schema_filename=None, root_schema_dict=None, rollup=False, root_id=None, use_titles=False, disable_local_refs=False, truncation_length=3, exclude_deprecated_fields=False): self.sub_sheets = {} self.main_sheet = Sheet() self.sub_sheet_mapping = {} self.do_rollup = rollup self.rollup = set() self.root_id = root_id self.use_titles = use_titles self.truncation_length = truncation_length self.title_lookup = TitleLookup() self.flattened = {} self.exclude_deprecated_fields = exclude_deprecated_fields if root_schema_dict is None and schema_filename is None: raise ValueError( 'One of schema_filename or root_schema_dict must be supplied') if root_schema_dict is not None and schema_filename is not None: raise ValueError( 'Only one of schema_filename or root_schema_dict should be supplied' ) if schema_filename: if schema_filename.startswith('http'): import requests r = requests.get(schema_filename) self.root_schema_dict = jsonref.loads( r.text, object_pairs_hook=OrderedDict) else: if disable_local_refs: with codecs.open(schema_filename, encoding="utf-8") as schema_file: self.root_schema_dict = jsonref.load( schema_file, object_pairs_hook=OrderedDict, loader=JsonLoaderLocalRefsDisabled()) else: if sys.version_info[:2] > (3, 0): base_uri = pathlib.Path( os.path.realpath(schema_filename)).as_uri() else: base_uri = urlparse.urljoin( 'file:', urllib.pathname2url( os.path.abspath(schema_filename))) with codecs.open(schema_filename, encoding="utf-8") as schema_file: self.root_schema_dict = jsonref.load( schema_file, object_pairs_hook=OrderedDict, base_uri=base_uri) else: self.root_schema_dict = root_schema_dict
def unimarc_languages(self, key, value): """Get languages. languages: 008 and 041 [$a, repetitive] """ languages = utils.force_list(value.get('a')) to_return = [] schema_in_bytes = resource_string( 'rero_ils.jsonschemas', 'common/languages-v0.0.1.json' ) schema = jsonref.loads(schema_in_bytes.decode('utf8')) langs = schema['language']['enum'] for language in languages: if language in langs: to_return.append({'value': language, 'type': 'bf:Language'}) translatedsfrom = utils.force_list(value.get('c')) if translatedsfrom: self['translatedFrom'] = [] for translatedfrom in translatedsfrom: self['translatedFrom'].append(translatedfrom) return to_return
def patron_transaction_event_schema(): """Patron transaction event Jsonschema for records.""" schema_in_bytes = resource_string( 'rero_ils.modules.patron_transaction_events.jsonschemas', 'patron_transaction_events/patron_transaction_event-v0.0.1.json') schema = jsonref.loads(schema_in_bytes.decode('utf8')) return schema
def render_schema(path, filename): if "resolve" in flask.request.args.keys(): with open(path) as schema: print(path) print(config.SCHEMAS_ABS_DIR) return flask.jsonify( jsonref.loads(schema.read(), base_uri=config.SCHEMAS_BASE_URI)) return flask.send_from_directory(config.SCHEMAS_RELATIVE_PATH, filename)
def _derefence_json(schema: t.Union[str, dict]) -> t.Any: """Dereference JSON schema. Args: schema: JSON schema as a string or dictionary. Returns: Dereferenced schema. Raises: ValueError: Wrong `schema` type. """ if isinstance(schema, str): return jsonref.loads(schema, jsonschema=True) if isinstance(schema, dict): return jsonref.loads(json.dumps(schema), jsonschema=True) raise ValueError(schema)
def test_dump(self, tmpdir): json = """[1, 2, {"$ref": "#/0"}, 3]""" loaded = loads(json) # The string version should load the reference assert str(loaded) == "[1, 2, 1, 3]" dump(loaded, tmpdir.join("out.json")) # Our dump function should write the original reference assert tmpdir.join("out.json").read() == json
def _deref_schema(schema_str, schema_host): loader = CustomJsonrefLoader(schema_url=schema_host) deref_obj = jsonref.loads(schema_str, loader=loader, object_pairs_hook=OrderedDict) # Force evaluation of jsonref.loads here repr(deref_obj) return deref_obj
def patron_type_schema(): """Patron type Jsonschema for records.""" schema_in_bytes = resource_string( 'rero_ils.modules.patron_types.jsonschemas', '/patron_types/patron_type-v0.0.1.json', ) schema = jsonref.loads(schema_in_bytes.decode('utf8')) return schema
def load_config(file): if hasattr(file, 'read'): obj_with_refs = yaml.safe_load(file) else: with open(file, 'r') as f: obj_with_refs = yaml.safe_load(f) obj_without_refs = unref(jsonref.loads(json.dumps(obj_with_refs))) return obj_without_refs
def organisation_schema(): """Organisation Jsonschema for records.""" schema_in_bytes = resource_string( 'rero_ils.modules.organisations.jsonschemas', 'organisations/organisation-v0.0.1.json', ) schema = jsonref.loads(schema_in_bytes.decode('utf8')) return schema
def get_schema(self, **kwargs): schema_name = kwargs.get('schema_name', 'schema.json') s3 = boto3.resource('s3') object_key = f'{self._folder_name}/{schema_name}' stored_object = s3.Object(self._bucket_name, object_key).get() stored_schema_string = stored_object['Body'].read() schema = jsonref.loads(stored_schema_string, cls=AlgDecoder) return schema
def validate_with_refs(schema_path, data): base_path = os.path.abspath(os.path.dirname(schema_path)) base_uri = 'file://{}/'.format(base_path) with open(schema_path) as subnet_model_raw: subnet_model = jsonref.loads(subnet_model_raw.read(), base_uri=base_uri, jsonschema=True) validate(instance=data, schema=subnet_model)
def circ_policy_schema(): """Patron Jsonschema for records.""" schema_in_bytes = resource_string( 'rero_ils.modules.circ_policies.jsonschemas', 'circ_policies/circ_policy-v0.0.1.json', ) schema = jsonref.loads(schema_in_bytes.decode('utf8')) return schema
def get_schema(monkeypatch, schema_in_bytes): """Get json schema and replace $refs. For the resolving of the $ref we have to catch the request.get and get the referenced json schema directly from the resource. :param monkeypatch: https://docs.pytest.org/en/stable/monkeypatch.html :schema_in_bytes: schema in bytes. :returns: resolved json schema. """ # apply the monkeypatch for requests.get to mocked_requests_get monkeypatch.setattr(requests, "get", mocked_requests_get) schema = jsonref.loads(schema_in_bytes.decode('utf8')) # Replace all remaining $refs while schema != jsonref.loads(jsonref.dumps(schema)): schema = jsonref.loads(jsonref.dumps(schema)) return schema
def json_schema(request): """ JSON objects that correspond to JSON schemas. """ path = root.joinpath(request.param) text = path.read_text() # Dereference schemas as workaround for https://github.com/Julian/jsonschema/issues/447 schema = jsonref.loads(text, jsonschema=True) return schema
def open_jsonref(fileUrl): import requests if fileUrl[0:4] == "http": # es URL try: pointer = requests.get(fileUrl) output = jsonref.loads(pointer.content.decode('utf-8'), load_on_repr=False) return output except: return "" else: # es file try: file = open(fileUrl, "r") return jsonref.loads(file.read()) except: return ""
def process_bundles(source_dir=None, target_dir=None): bundles = {} source_dir = source_dir bundle_dir = os.path.join(source_dir, "bundles") with open("./schemas/anchore-engine-api.yaml", 'r') as FH: anchore_engine_api_swagger = jsonref.loads( json.dumps(yaml.safe_load(FH))) bundleschema = anchore_engine_api_swagger['definitions'][ 'PolicyBundle'] if os.path.isdir(bundle_dir): for bundlefile in os.listdir(bundle_dir): patt = re.match("(.*)\.json$", bundlefile) if patt: bundlename = patt.group(1) thefile = os.path.join(bundle_dir, bundlefile) try: print("\tOpening bundle {}...".format(thefile), end='') with open(thefile, 'r') as FH: print("done!") buf = FH.read() digest = "{}:{}".format( "sha256", hashlib.sha256(buf.encode('utf8')).hexdigest()) bundle = json.loads(buf) print("\tValidating bundle {}...".format(thefile), end='') jsonschema.validate(bundle, bundleschema) print("done!") print("\tProcessing bundle {}...".format(thefile), end='') bundle_record = { 'location': '/'.join([ 'bundles', bundlename, "{}.json".format(digest) ]), 'type': 'bundle', 'name': bundlename, #bundle.get('name', 'N/A'), 'description': bundle.get('description', bundle.get('comment', "N/A")), 'digest': digest, } bundles[bundlename] = bundle_record print("done!") except Exception as err: print("\nERROR parsing bundle {} - exception: {}".format( bundlefile, err)) raise err return (bundles)
def load_jsonschema(config_file): base_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'schemas', config_file['version']) filename = os.path.join(base_path, "config_schema.json") if not os.path.exists(filename): raise Exception('Version in "{}" is unsupported'.format(filename)) with open(filename, "r") as f: return loads(f.read(), base_uri='file://{}/'.format(base_path), jsonschema=True)
def __init__(self, file_name): base_path = os.path.dirname(os.path.abspath(__file__)) json_path = os.path.join(base_path, file_name) base_uri = f"file://{base_path}/" with open(json_path) as input_file: self.update( jsonref.loads(input_file.read(), base_uri=base_uri, jsonschema=True))
def _get_combined_schema(self, schema): combined_schema = {} swagger = self._get_api_doc() definitions = jsonref.loads(json.dumps(swagger))['components']['schemas'] definition_schema = definitions[schema] json_schemas = definition_schema['allOf'] if definition_schema.get('allOf') else [definition_schema] for json_schema in json_schemas: combined_schema.update(json_schema) combined_schema['additionalProperties'] = False return combined_schema
def __init__(self, schema_filename=None, root_schema_dict=None, rollup=False, root_id=None, use_titles=False, disable_local_refs=False, truncation_length=3, exclude_deprecated_fields=False): self.sub_sheets = {} self.main_sheet = Sheet() self.sub_sheet_mapping = {} self.rollup = rollup self.root_id = root_id self.use_titles = use_titles self.truncation_length = truncation_length self.title_lookup = TitleLookup() self.flattened = {} self.exclude_deprecated_fields = exclude_deprecated_fields if root_schema_dict is None and schema_filename is None: raise ValueError('One of schema_filename or root_schema_dict must be supplied') if root_schema_dict is not None and schema_filename is not None: raise ValueError('Only one of schema_filename or root_schema_dict should be supplied') if schema_filename: if schema_filename.startswith('http'): import requests r = requests.get(schema_filename) self.root_schema_dict = jsonref.loads(r.text, object_pairs_hook=OrderedDict) else: if disable_local_refs: with codecs.open(schema_filename, encoding="utf-8") as schema_file: self.root_schema_dict = jsonref.load(schema_file, object_pairs_hook=OrderedDict, loader=JsonLoaderLocalRefsDisabled()) else: if sys.version_info[:2] > (3, 0): base_uri = pathlib.Path(os.path.realpath(schema_filename)).as_uri() else: base_uri = urlparse.urljoin('file:', urllib.pathname2url(os.path.abspath(schema_filename))) with codecs.open(schema_filename, encoding="utf-8") as schema_file: self.root_schema_dict = jsonref.load(schema_file, object_pairs_hook=OrderedDict, base_uri=base_uri) else: self.root_schema_dict = root_schema_dict
def test_dict(): d = { 'foo': { "$ref": "#/definitions/user" }, 'definitions': { 'user': { 'properties': { 'first_name': { 'type': 'string' } } } } } json_obj = jsonref.loads(json.dumps(d)) assert isinstance(json_obj['foo'], jsonref.JsonRef) replace_jsonref_proxies(json_obj) assert isinstance(json_obj['foo'], dict) assert d['definitions']['user'] == json_obj['foo']
def test_nested_dict(): d = { 'foo': { "$ref": "#/definitions/user" }, 'definitions': { 'user': { 'type': 'object', 'properties': { 'first_name': { 'type': 'string' }, 'address': { '$ref': '#/definitions/address', } } }, 'address': { 'type': 'object', 'properties': { 'street': { 'type': 'string' } } } } } json_obj = jsonref.loads(json.dumps(d)) assert type(json_obj['foo']) == jsonref.JsonRef assert type(json_obj['foo']['properties']['address']) == jsonref.JsonRef post_process_spec( json_obj, on_container_callbacks=(replace_jsonref_proxies_callback,)) assert type(json_obj['foo']) == dict assert type(json_obj['foo']['properties']['address']) == dict assert d['definitions']['address'] == \ json_obj['foo']['properties']['address']
def test_nested_dict(): d = { 'foo': { "$ref": "#/definitions/user" }, 'definitions': { 'user': { 'type': 'object', 'properties': { 'first_name': { 'type': 'string' }, 'address': { '$ref': '#/definitions/address', } } }, 'address': { 'type': 'object', 'properties': { 'street': { 'type': 'string' } } } } } json_obj = jsonref.loads(json.dumps(d)) assert isinstance(json_obj['foo'], jsonref.JsonRef) assert isinstance(json_obj['foo']['properties']['address'], jsonref.JsonRef) replace_jsonref_proxies(json_obj) assert isinstance(json_obj['foo'], dict) assert isinstance(json_obj['foo']['properties']['address'], dict) assert d['definitions']['address'] == \ json_obj['foo']['properties']['address']
def test_list(): d = { 'foo': [ { "$ref": "#/definitions/user" } ], 'definitions': { 'user': { 'properties': { 'first_name': { 'type': 'string' } } } } } json_obj = jsonref.loads(json.dumps(d)) assert type(json_obj['foo'][0]) == jsonref.JsonRef replace_jsonref_proxies(json_obj) assert type(json_obj['foo'][0]) == dict assert d['definitions']['user'] == json_obj['foo'][0]
def get_schema_fields(schema_filename): r = requests.get(schema_filename) return set(schema_dict_fields_generator(jsonref.loads(r.text, object_pairs_hook=OrderedDict)))
def csl_data_validator(): text = root.joinpath('csl-data.json').read_text() schema = jsonref.loads(text, jsonschema=True) Validator = jsonschema.validators.validator_for(schema) return Validator(schema)
# -*- coding:utf-8 -*- from pprint import pprint import jsonref # An example json document json_str = """{"real": [1, 2, 3, 4], "ref": {"$ref": "#/real"}}""" data = jsonref.loads(json_str) pprint(data) # Reference is not evaluated until here # {'real': [1, 2, 3, 4], 'ref': [1, 2, 3, 4]}
# -*- coding:utf-8 -*- import os.path import pprint as p import jsonref s = """ { "x": {"$ref": "./files/x.json#x"}, "y": {"$ref": "./files/y.json#y"} } """ # absolute path is ok cwd = os.path.dirname(os.path.abspath(__file__)) s = s.replace('"./', '"file://{}/'.format(cwd)) print(s) data = jsonref.loads(s) p.pprint(data)
def test_loads(self): json = """{"a": 1, "b": {"$ref": "#/a"}}""" assert loads(json) == {"a": 1, "b": 1}
def test_loads_kwargs(self): json = """{"a": 5.5, "b": {"$ref": "#/a"}}""" loaded = loads(json, parse_float=lambda x: int(float(x))) assert loaded["a"] == loaded["b"] == 5