def test_parsing_a_context_expands_prefixes(): ctx = Context({ '@vocab': 'http://example.org/ns/', 'x': 'http://example.org/ns/', 'label': 'x:label', 'x:updated': { '@type': 'x:date' } }) term = ctx.terms.get('label') assert term.id == 'http://example.org/ns/label' term = ctx.terms.get('x:updated') assert term.id == 'http://example.org/ns/updated' assert term.type == 'http://example.org/ns/date' # test_expanding_terms(): assert ctx.expand('term') == 'http://example.org/ns/term' assert ctx.expand('x:term') == 'http://example.org/ns/term' # test_shrinking_iris(): assert ctx.shrink_iri('http://example.org/ns/term') == 'x:term' assert ctx.to_symbol('http://example.org/ns/term') == 'term'
def test_parsing_a_context_expands_prefixes(): ctx = Context({ "@vocab": "http://example.org/ns/", "x": "http://example.org/ns/", "label": "x:label", "x:updated": { "@type": "x:date" }, }) term = ctx.terms.get("label") assert term.id == "http://example.org/ns/label" term = ctx.terms.get("x:updated") assert term.id == "http://example.org/ns/updated" assert term.type == "http://example.org/ns/date" # test_expanding_terms(): assert ctx.expand("term") == "http://example.org/ns/term" assert ctx.expand("x:term") == "http://example.org/ns/term" # test_shrinking_iris(): assert ctx.shrink_iri("http://example.org/ns/term") == "x:term" assert ctx.to_symbol("http://example.org/ns/term") == "term"
def test_loading_contexts(): # Given context data: source1 = "http://example.org/base.jsonld" source2 = "http://example.org/context.jsonld" SOURCES[source1] = {'@context': {"@vocab": "http://example.org/vocab/"}} SOURCES[source2] = {'@context': [source1, {"n": "name"}]} # Create a context: ctx = Context(source2) assert ctx.expand('n') == 'http://example.org/vocab/name' # Context can be a list: ctx = Context([source2]) assert ctx.expand('n') == 'http://example.org/vocab/name'
def test_loading_contexts(): # Given context data: source1 = "http://example.org/base.jsonld" source2 = "http://example.org/context.jsonld" SOURCES[source1] = {'@context': {"@vocab": "http://example.org/vocab/"}} SOURCES[source2] = {'@context': [source1, {"n": "name"}]} # Create a context: ctx = Context(source2) assert ctx.expand('n') == 'http://example.org/vocab/name' # Context can be a list: ctx = Context([source2]) assert ctx.expand('n') == 'http://example.org/vocab/name'
def test_parsing_a_context_expands_prefixes(): ctx = Context({ '@vocab': 'http://example.org/ns/', 'x': 'http://example.org/ns/', 'label': 'x:label', 'x:updated': {'@type': 'x:date'}}) term = ctx.terms.get('label') assert term.id == 'http://example.org/ns/label' term = ctx.terms.get('x:updated') assert term.id == 'http://example.org/ns/updated' assert term.type == 'http://example.org/ns/date' # test_expanding_terms(): assert ctx.expand('term') == 'http://example.org/ns/term' assert ctx.expand('x:term') == 'http://example.org/ns/term' # test_shrinking_iris(): assert ctx.shrink_iri('http://example.org/ns/term') == 'x:term' assert ctx.to_symbol('http://example.org/ns/term') == 'term'
# break # if (counter % 100) == 0: # logger.warning('Counter: ' + str(counter)) g = ConjunctiveGraph() try: doc = item['@graph'].to_dict() doc_str = json.dumps(doc) except: logger.error( 'Item without @graph -- is the selected index in JSON-LD format?' ) exit itemId = doc['@id'] expandedId = ctx.expand(itemId) if expandedId == itemId: try: parse(itemId, rule='IRI') except: logger.warning('ID cannot be expanded to URI: ' + itemId) g.parse(data=doc_str, context=global_context, format='json-ld') serialized_data = g.serialize(format=format) lines = serialized_data.split('\n') lines = [line for line in lines if not line.startswith('@prefix')] serialized_data = '\n'.join(lines) fout.write(serialized_data)