def merge(self, base, head, meta=None, merge_options=None): """Merge head into base. base -- Old JSON document you are merging into. head -- New JSON document for merging into base. merge_options -- Optional dictionary with merge options. Keys of merge_options must be names of the strategies. Values must be dictionaries of merge options as in the mergeOptions schema element. Options in merge_options are applied to all instances of a strategy. Values in schema override values given in merge_options. Returns an updated base document """ schema = JSONValue(self.schema) if base is None: base = JSONValue(undef=True) else: base = JSONValue(base) head = JSONValue(head) if merge_options is None: merge_options = {} # backwards compatibility jsonmerge<=1.6.0 if meta is not None: merge_options['version'] = { 'metadata': meta } walk = WalkInstance(self, base, head, merge_options) return walk.descend(schema, base, head).val
def get_schema(self, meta=None): """Get JSON schema for the merged document. meta -- Optional JSON schema for the meta-data. Returns a JSON schema for documents returned by the merge() method. """ if meta is not None: # This is kind of ugly - schema for meta data # can again contain references to external schemas. # # Since we already have in place all the machinery # to resolve these references in the merge schema, # we (ab)use it here to do the same for meta data # schema. m = Merger(meta) m.validator.resolver.store.update(self.validator.resolver.store) w = WalkSchema(m) meta = w._resolve_refs(JSONValue(meta), resolve_base=True).val schema = JSONValue(self.schema) walk = WalkSchema(self) return walk.descend(schema, meta).val
def merge(self, walk, base, head, schema, meta, limit=None, unique=None, ignoreDups=True, **kwargs): # backwards compatibility if unique is False: ignoreDups = False if base.is_undef(): base = JSONValue(val=[], ref=base.ref) else: base = JSONValue(list(base.val), base.ref) if not ignoreDups or not base.val or base.val[-1]['value'] != head.val: base.val.append(walk.add_meta(head.val, meta)) if limit is not None: base.val = base.val[-limit:] return base
def merge(self, walk, base, head, schema, meta, objclass_menu=None, objClass='_default', **kwargs): if not walk.is_type(head, "object"): raise HeadInstanceError( "Head for an 'object' merge strategy is not an object", head) if objclass_menu is None: objclass_menu = {'_default': dict} objcls = objclass_menu.get(objClass) if objcls is None: raise SchemaError("objClass '%s' not recognized" % objClass, schema) if base.is_undef(): base = JSONValue(objcls(), base.ref) else: if not walk.is_type(base, "object"): raise BaseInstanceError( "Base for an 'object' merge strategy is not an object", base) base = JSONValue(objcls(base.val), base.ref) for k, v in head.items(): subschema = JSONValue(undef=True) # get subschema for this element if not schema.is_undef(): p = schema.get('properties') if not p.is_undef(): subschema = p.get(k) if subschema.is_undef(): p = schema.get('patternProperties') if not p.is_undef(): for pattern, s in p.items(): if re.search(pattern, k): subschema = s if subschema.is_undef(): p = schema.get('additionalProperties') # additionalProperties can be boolean in draft 4 if not p.is_undef() and walk.is_type(p, "object"): subschema = p base[k] = walk.descend(subschema, base.get(k), v, meta) return base
def merge(self, walk, base, head, schema, meta, idRef="id", ignoreId=None, **kwargs): if not walk.is_type(head, "array"): raise HeadInstanceError("Head for an 'arrayMergeById' merge strategy is not an array", head) # nopep8 if base.is_undef(): base = JSONValue([], base.ref) else: if not walk.is_type(base, "array"): raise BaseInstanceError("Base for an 'arrayMergeById' merge strategy is not an array", base) # nopep8 base = JSONValue(list(base.val), base.ref) subschema = schema.get('items') if walk.is_type(subschema, "array"): raise SchemaError("'arrayMergeById' not supported when 'items' is an array", subschema) def iter_index_key_item(jv): for i, item in enumerate(jv): try: key = walk.resolver.resolve_fragment(item.val, idRef) except jsonschema.RefResolutionError: continue yield i, key, item for i, key_1, item_1 in iter_index_key_item(head): for j, key_2, item_2 in iter_index_key_item(head): if j < i: if key_1 == key_2: raise HeadInstanceError("Id '%s' was not unique in head" % (key_1,), item_1) else: break for i, head_key, head_item in iter_index_key_item(head): if head_key == ignoreId: continue matching_j = [] for j, base_key, base_item in iter_index_key_item(base): if base_key == head_key: matching_j.append(j) matched_item = base_item if len(matching_j) == 1: # If there was exactly one match, we replace it with a merged item j = matching_j[0] base[j] = walk.descend(subschema, matched_item, head_item, meta) elif len(matching_j) == 0: # If there wasn't a match, we append a new object base.append(walk.descend(subschema, JSONValue(undef=True), head_item, meta)) else: j = matching_j[1] raise BaseInstanceError("Id '%s' was not unique in base" % (base_key,), base[j]) return base
def merge(self, walk, base, head, schema, limit=None, unique=None, ignoreDups=True, metadata=None, **kwargs): # backwards compatibility if unique is False: ignoreDups = False if metadata is not None: if not walk.is_type(JSONValue(val=metadata), "object"): raise SchemaError( "'metadata' option does not contain an object") if base.is_undef(): base = JSONValue(val=[], ref=base.ref) last_entry = JSONValue(undef=True) else: if not walk.is_type(base, "array"): raise BaseInstanceError( "Base is not an array. " "Base not previously generated with this strategy?", base) base = JSONValue(list(base.val), base.ref) if base.val: last_entry = base[-1] if not walk.is_type(last_entry, "object"): raise BaseInstanceError( "Last entry in the versioned array is not an object. " "Base not previously generated with this strategy?", last_entry) if 'value' not in last_entry.val: raise BaseInstanceError( "Last entry in the versioned array has no 'value' property. " "Base not previously generated with this strategy?", last_entry) else: last_entry = JSONValue(undef=True) if not ignoreDups or last_entry.is_undef( ) or last_entry['value'].val != head.val: base.val.append(self.add_metadata(head.val, metadata)) if limit is not None: base.val = base.val[-limit:] return base
def merge(self, walk, base, head, schema, meta, idRef="id", ignoreId=None, **kwargs): if not walk.is_type(head, "array"): raise HeadInstanceError("Head for an 'arrayMergeById' merge strategy is not an array") # nopep8 if base.is_undef(): base = JSONValue([], base.ref) else: if not walk.is_type(base, "array"): raise BaseInstanceError("Base for an 'arrayMergeById' merge strategy is not an array") # nopep8 base = JSONValue(list(base.val), base.ref) subschema = schema.get('items') if walk.is_type(subschema, "array"): raise SchemaError("'arrayMergeById' not supported when 'items' is an array") def iter_index_key_item(jv): for i, item in enumerate(jv): try: key = walk.resolver.resolve_fragment(item.val, idRef) except jsonschema.RefResolutionError: continue yield i, key, item for i, key_1, item_1 in iter_index_key_item(head): for j, key_2, item_2 in iter_index_key_item(head): if j < i: if key_1 == key_2: raise HeadInstanceError("Id was not unique") else: break for i, head_key, head_item in iter_index_key_item(head): if head_key == ignoreId: continue key_count = 0 for j, base_key, base_item in iter_index_key_item(base): if base_key == head_key: key_count += 1 # If there was a match, we replace with a merged item base.val[j] = walk.descend(subschema, base_item, head_item, meta).val if key_count == 0: # If there wasn't a match, we append a new object base.val.append(walk.descend(subschema, JSONValue(undef=True), head_item, meta).val) if key_count > 1: raise BaseInstanceError("Id was not unique") return base
def merge(self, walk, base, head, schema, meta, **kwargs): if not walk.is_type(head, "array"): raise HeadInstanceError("Head for an 'append' merge strategy is not an array", head) if base.is_undef(): base = JSONValue([], base.ref) else: if not walk.is_type(base, "array"): raise BaseInstanceError("Base for an 'append' merge strategy is not an array", base) base = JSONValue(list(base.val), base.ref) base.val += head.val return base
def merge(self, walk, base, head, schema, meta, incompatible=None, **kwargs): if incompatible is None: incompatible = [] else: incompatible = [set(terms) for terms in incompatible] if head is None or head.is_undef(): return base if not walk.is_type(head, "array"): head = JSONValue(val=[head.val], ref=head.ref) if base.is_undef(): base = JSONValue({}, base.ref) elif not walk.is_type(base, "array"): base = JSONValue(val=[base.val], ref=base.ref) else: base = JSONValue(list(base.val), base.ref) def valInArray(val, ary): if isinstance(val, JSONValue): val = val.val return val in [(isinstance(v, JSONValue) and v.val) or v for v in ary] def findIncompat(val): for _set in incompatible: if val in _set: return _set return set() for newval in head.val: if valInArray(newval, base.val): continue choices = findIncompat(newval) for choice in choices: if newval == choice: continue elif valInArray(choice, base): base.val = [v for v in base.val if v != choice] base.val.append(newval) return base
def merge(self, walk, base, head, schema, idRef="id", ignoreId=None, **kwargs): if not walk.is_type(head, "array"): raise HeadInstanceError("Head is not an array", head) # nopep8 if base.is_undef(): base = JSONValue([], base.ref) else: if not walk.is_type(base, "array"): raise BaseInstanceError("Base is not an array", base) # nopep8 base = JSONValue(list(base.val), base.ref) subschema = schema.get('items') if walk.is_type(subschema, "array"): raise SchemaError("This strategy is not supported when 'items' is an array", subschema) for i, key_1, item_1 in self.iter_index_key_item(walk, head, idRef): for j, key_2, item_2 in self.iter_index_key_item(walk, head, idRef): if j < i: if key_1 == key_2: raise HeadInstanceError("Id '%s' was not unique in head" % (key_1,), item_1) else: break for i, head_key, head_item in self.iter_index_key_item(walk, head, idRef): if head_key == ignoreId: continue matching_j = [] for j, base_key, base_item in self.iter_index_key_item(walk, base, idRef): if base_key == head_key: matching_j.append(j) matched_item = base_item if len(matching_j) == 1: # If there was exactly one match, we replace it with a merged item j = matching_j[0] base[j] = walk.descend(subschema, matched_item, head_item) elif len(matching_j) == 0: # If there wasn't a match, we append a new object base.append(walk.descend(subschema, JSONValue(undef=True), head_item)) else: j = matching_j[1] raise BaseInstanceError("Id '%s' was not unique in base" % (base_key,), base[j]) return base
def get_schema(self, walk, schema, limit=None, metadataSchema=None, **kwargs): if metadataSchema is not None: item = dict(walk.resolve_subschema_option_refs(metadataSchema)) else: item = {} if 'properties' not in item: item['properties'] = {} else: item['properties'] = dict(item['properties']) item['properties']['value'] = schema.val rv = {"type": "array", "items": item} if limit is not None: rv['maxItems'] = limit return JSONValue(rv, schema.ref)
def get_schema(self, meta=None, merge_options=None): """Get JSON schema for the merged document. merge_options -- Optional dictionary with merge options. Keys of merge_options must be names of the strategies. Values must be dictionaries of merge options as in the mergeOptions schema element. Options in merge_options are applied to all instances of a strategy. Values in schema override values given in merge_options. Returns a JSON schema for documents returned by the merge() method. """ if merge_options is None: merge_options = {} # backwards compatibility jsonmerge<=1.6.0 if meta is not None: merge_options['version'] = { 'metadataSchema': meta } schema = JSONValue(self.schema) walk = WalkSchema(self, merge_options) return walk.descend(schema).val
def merge(self, walk, base, head, schema, meta, **kwargs): if base is None: ref = "" else: ref = base.ref return JSONValue("foo", ref)
def descend_schema(self, walk, schema, meta): ref = schema.val.get("$ref") if ref is None: return None if ref in self.refs_descended: return schema if walk.resolver.is_remote_ref(ref): return schema self.refs_descended.add(ref) with walk.resolver.resolving(ref) as resolved: rinstance = JSONValue(resolved, ref) if not walk.is_type(rinstance, 'object'): raise SchemaError("'$ref' does not point to an object") result = walk.descend(rinstance, meta) resolved.clear() resolved.update(result.val) return schema
def get_key(self, walk, item, idRef): if walk.is_type(JSONValue(idRef), 'array'): key = [walk.resolver.resolve_fragment(item.val, i) for i in idRef] else: key = walk.resolver.resolve_fragment(item.val, idRef) return key
def descend_instance(self, walk, schema, base, head, meta): ref = schema.val.get("$ref") if ref is None: return None with walk.resolver.resolving(ref) as resolved: return walk.descend(JSONValue(resolved, ref), base, head, meta)
def _resolve_refs(self, schema, resolve_base=False): assert isinstance(schema, JSONValue) if (not resolve_base) and self.is_base_context(): # no need to resolve refs in the context of the original schema - they # are still valid return schema elif self.is_type(schema, "array"): return JSONValue([ self._resolve_refs(v).val for v in schema ], schema.ref) elif self.is_type(schema, "object"): ref = schema.val.get("$ref") if ref is not None: with self.resolver.resolving(ref) as resolved: return self._resolve_refs(JSONValue(resolved, ref)) else: return JSONValue(dict( ((k, self._resolve_refs(v).val) for k, v in schema.items()) ), schema.ref) else: return schema
def work(self, strategy, schema, meta, **kwargs): assert isinstance(schema, JSONValue) schema = JSONValue(dict(schema.val), schema.ref) schema.val.pop("mergeStrategy", None) schema.val.pop("mergeOptions", None) rv = strategy.get_schema(self, schema, meta, **kwargs) assert isinstance(rv, JSONValue) return rv
def merge(self, walk, base, head, schema, meta, **kwargs): if not walk.is_type(head, "object"): raise HeadInstanceError("Head for an 'object' merge strategy is not an object") if base.is_undef(): base = JSONValue({}, base.ref) else: if not walk.is_type(base, "object"): raise BaseInstanceError("Base for an 'object' merge strategy is not an object") base = JSONValue(dict(base.val), base.ref) for k, v in head.items(): subschema = JSONValue(undef=True) # get subschema for this element if not schema.is_undef(): p = schema.get('properties') if not p.is_undef(): subschema = p.get(k) if subschema.is_undef(): p = schema.get('patternProperties') if not p.is_undef(): for pattern, s in p.items(): if re.search(pattern, k): subschema = s if subschema.is_undef(): p = schema.get('additionalProperties') if not p.is_undef(): subschema = p base.val[k] = walk.descend(subschema, base.get(k), v, meta).val return base
def resolve_subschema_option_refs(self, subschema): # This is kind of ugly - schema for meta data # can again contain references to external schemas. # # Since we already have in place all the machinery # to resolve these references in the merge schema, # we (ab)use it here to do the same for meta data # schema. m = Merger(subschema) m.validator.resolver.store.update(self.resolver.store) w = WalkSchema(m, merge_options={}) subschema = w._resolve_refs(JSONValue(subschema), resolve_base=True).val return subschema
def merge(self, base, head, meta=None): """Merge head into base. base -- Old JSON document you are merging into. head -- New JSON document for merging into base. meta -- Optional dictionary with meta-data. Any elements in the meta dictionary will be added to the dictionaries appended by the version strategies. Returns an updated base document """ schema = JSONValue(self.schema) if base is None: base = JSONValue(undef=True) else: base = JSONValue(base) head = JSONValue(head) walk = WalkInstance(self, base, head) return walk.descend(schema, base, head, meta).val
def get_schema(self, walk, schema, meta, **kwargs): schema2 = JSONValue(dict(schema.val), schema.ref) def descend_keyword(keyword): p = schema.get(keyword) if not p.is_undef(): for k, v in p.items(): schema2.val[keyword][k] = walk.descend(v, meta).val descend_keyword("properties") descend_keyword("patternProperties") p = schema.get("additionalProperties") if not p.is_undef(): schema2.val["additionalProperties"] = walk.descend(p, meta).val return schema2
def get_schema(self, walk, schema, meta, **kwargs): schema2 = JSONValue(dict(schema.val), schema.ref) def descend_keyword(keyword): p = schema.get(keyword) if not p.is_undef(): for k, v in p.items(): schema2[keyword][k] = walk.descend(v, meta) descend_keyword("properties") descend_keyword("patternProperties") # additionalProperties can be boolean in draft 4 p = schema.get("additionalProperties") if not p.is_undef() and walk.is_type(p, "object"): schema2["additionalProperties"] = walk.descend(p, meta) return schema2
def get_schema(self, walk, schema, meta, limit=None, **kwargs): if meta is not None: item = dict(meta) else: item = {} if 'properties' not in item: item['properties'] = {} item['properties']['value'] = schema.val rv = {"type": "array", "items": item} if limit is not None: rv['maxItems'] = limit return JSONValue(rv, schema.ref)
def descend(self, schema, *args): assert isinstance(schema, JSONValue) self.lvl += 1 log.debug("descend: %sschema %s" % ( self._indent(), schema.ref, )) if not schema.is_undef(): with self.resolver.resolving(schema.ref) as resolved: assert schema.val == resolved if not schema.is_undef(): ref = schema.val.get("$ref") if ref is not None: with self.resolver.resolving(ref) as resolved: rv = self.descend(JSONValue(resolved, ref), *args) self.lvl -= 1 return rv else: name = schema.val.get("mergeStrategy") opts = schema.val.get("mergeOptions") if opts is None: opts = {} else: name = None opts = {} if name is None: name = self.default_strategy(schema, *args, **opts) log.debug("descend: %sinvoke strategy %s" % (self._indent(), name)) strategy = self.merger.strategies[name] rv = self.work(strategy, schema, *args, **opts) self.lvl -= 1 return rv
def get_schema(self, walk, schema, meta, **kwargs): for forbidden in ("oneOf", "allOf", "anyOf"): if forbidden in schema.val: raise SchemaError("Type ambiguous schema") schema2 = JSONValue(dict(schema.val), schema.ref) def descend_keyword(keyword): p = schema.get(keyword) if not p.is_undef(): for k, v in p.items(): schema2.val[keyword][k] = walk.descend(v, meta).val descend_keyword("properties") descend_keyword("patternProperties") p = schema.get("additionalProperties") if not p.is_undef(): schema2.val["additionalProperties"] = walk.descend(p, meta).val return schema2
def test_get_attr(self): v = JSONValue({'a': 'b'}) va = v['a'] self.assertEqual('b', va.val) self.assertEqual('#/a', va.ref)
def test_get(self): v = JSONValue({'a': 'b'}) va = v.get('a') self.assertTrue('b', va.val) self.assertEqual('#/a', va.ref)
def test_get_default(self): v = JSONValue({}) va = v.get('a') self.assertTrue(va.is_undef())
def test_get_attr_escape_tilde(self): v = JSONValue({'~0': 'a'}) va = v['~0'] self.assertEqual('a', va.val) self.assertEqual('#/~00', va.ref)