コード例 #1
0
 def quads_to_jsonld(quads):
     from pyld import jsonld
     context = AssemblQuadStorageManager.get_jsonld_context(True)
     jsonf = jsonld.from_rdf(quads)
     jsonc = jsonld.compact(jsonf, context)
     jsonc['@context'] = AssemblQuadStorageManager.get_jsonld_context(False)
     return jsonc
コード例 #2
0
def rdf_to_json(format, path: Path) -> dict:
    try:
        import rdflib
        from pyld import jsonld
    except ImportError:
        eprint("In order to process TTL manifests, you need RDFlib and PyLD:")
        eprint("  python -m pip install rdflib PyLD")
        return None
    g = rdflib.Graph()
    with path.open() as f:
        g.load(f, format=format)
    nq = g.serialize(format="ntriples").decode("utf-8")

    extended = jsonld.from_rdf(nq)
    with DIR.joinpath("manifest-context.jsonld").open() as f:
        frame = json.load(f)
    manifest = jsonld.frame(extended, frame)

    # ugly hack to "relativize" IRIs
    manifest = json.dumps(manifest)
    manifest = manifest.replace(f'file://{path.absolute()}#', "#")
    manifest = manifest.replace(f'file://{path.parent.absolute()}/', "")
    manifest = json.loads(manifest)

    return manifest
コード例 #3
0
    def write_resources(self,
                        graph_id=None,
                        resourceinstanceids=None,
                        **kwargs):
        super(RdfWriter,
              self).write_resources(graph_id=graph_id,
                                    resourceinstanceids=resourceinstanceids,
                                    **kwargs)
        g = self.get_rdf_graph()
        value = g.serialize(format="nquads").decode("utf-8")

        # print(f"Got graph: {value}")
        js = from_rdf(value, {
            "format": "application/nquads",
            "useNativeTypes": True
        })

        assert len(
            resourceinstanceids
        ) == 1  # currently, this should be limited to a single top resource

        archesproject = Namespace(settings.ARCHES_NAMESPACE_FOR_DATA_EXPORT)
        resource_inst_uri = archesproject[reverse(
            "resources", args=[resourceinstanceids[0]]).lstrip("/")]

        context = self.graph_model.jsonldcontext
        framing = {
            "@omitDefault": True,
            "@omitGraph": False,
            "@id": str(resource_inst_uri)
        }

        if context:
            framing["@context"] = context

        js = frame(js, framing)

        try:
            context = JSONDeserializer().deserialize(context)
        except ValueError:
            if context == "":
                context = {}
            context = {"@context": context}
        except AttributeError:
            context = {"@context": {}}

        # Currently omitGraph is not processed by pyLd, but data is compacted
        # simulate omitGraph:
        if "@graph" in js and len(js["@graph"]) == 1:
            # merge up
            for (k, v) in list(js["@graph"][0].items()):
                js[k] = v
            del js["@graph"]

        out = json.dumps(js, indent=kwargs.get("indent", None), sort_keys=True)
        dest = StringIO(out)

        full_file_name = os.path.join("{0}.{1}".format(self.file_name,
                                                       "jsonld"))
        return [{"name": full_file_name, "outputfile": dest}]
コード例 #4
0
ファイル: rdf2es.py プロジェクト: linked-swissbib/utilities
 def rdf2jsonld(self):
     """
     Splits Nquads and converts the partitions to JSON-LD compacted document form
     """
     # Extract tokens from offset + 1 to <n-th offset after> (n = args.docs)
     i = 0
     while i < len(self.newdoc) - 1:
         if i + self.docs >= len(self.newdoc) - 1:
             j = len(self.newdoc) - 1
         else:
             j = i + self.docs
         # Serializing RDF into JSON-LD by method from_rdf results in the so called
         # expanded document form, i.e. a format that doesn't contain any namespaces
         print("Serializing RDF file to JSON-LD")
         expand = jsonld.from_rdf(self.nquads[self.offsets[self.newdoc[i]] + 1:self.offsets[self.newdoc[j]]])
         i = j
         # The compacted JSON-LD document form offers the possibility to include a context
         # (i.e. namespaces) and thus reduces redundancy
         print("Converting to compacted document form")
         compacted = jsonld.compact(expand, self.loadjson(self.frame))
         # TODO: Insert function to append data of contributors to the bibliographicResource
         print("Indexing documents")
         for graph in compacted["@graph"]:
             if self.extcont is True:
                 graph["@context"] = path.abspath(args.frame)
             else:
                 graph["@context"] = compacted["@context"]
             self.output(graph)
コード例 #5
0
 def quads_to_jsonld(quads):
     from pyld import jsonld
     context = AssemblQuadStorageManager.get_jsonld_context(True)
     jsonf = jsonld.from_rdf(quads)
     jsonc = jsonld.compact(jsonf, context)
     jsonc['@context'] = AssemblQuadStorageManager.get_jsonld_context(False)
     return jsonc
コード例 #6
0
ファイル: rdf2es.py プロジェクト: linked-swissbib/utilities
 def rdf2jsonld(self):
     """
     Splits Nquads and converts the partitions to JSON-LD compacted document form
     """
     # Extract tokens from offset + 1 to <n-th offset after> (n = args.docs)
     i = 0
     while i < len(self.newdoc) - 1:
         if i + self.docs >= len(self.newdoc) - 1:
             j = len(self.newdoc) - 1
         else:
             j = i + self.docs
         # Serializing RDF into JSON-LD by method from_rdf results in the so called
         # expanded document form, i.e. a format that doesn't contain any namespaces
         print("Serializing RDF file to JSON-LD")
         expand = jsonld.from_rdf(
             self.nquads[self.offsets[self.newdoc[i]] +
                         1:self.offsets[self.newdoc[j]]])
         i = j
         # The compacted JSON-LD document form offers the possibility to include a context
         # (i.e. namespaces) and thus reduces redundancy
         print("Converting to compacted document form")
         compacted = jsonld.compact(expand, self.loadjson(self.frame))
         # TODO: Insert function to append data of contributors to the bibliographicResource
         print("Indexing documents")
         for graph in compacted["@graph"]:
             if self.extcont is True:
                 graph["@context"] = path.abspath(args.frame)
             else:
                 graph["@context"] = compacted["@context"]
             self.output(graph)
コード例 #7
0
ファイル: publish.py プロジェクト: oeg-upm/agora-wot
def serialize_in_json(g, uri):
    context = build_graph_context(g)
    cg = skolemize(g)
    ted_nquads = cg.serialize(format='nquads')
    ld = jsonld.from_rdf(ted_nquads)
    type = list(cg.objects(uri, RDF.type)).pop()
    ld = jsonld.frame(ld, {'context': context, '@type': str(type)})
    return json.dumps(jsonld.compact(ld, context), indent=3, sort_keys=True)
コード例 #8
0
    def write_resources(self,
                        graph_id=None,
                        resourceinstanceids=None,
                        **kwargs):
        super(RdfWriter,
              self).write_resources(graph_id=graph_id,
                                    resourceinstanceids=resourceinstanceids,
                                    **kwargs)
        g = self.get_rdf_graph()
        value = g.serialize(format='nquads')
        js = from_rdf(value, {
            'format': 'application/nquads',
            'useNativeTypes': True
        })

        assert len(
            resourceinstanceids
        ) == 1  # currently, this should be limited to a single top resource

        archesproject = Namespace(settings.ARCHES_NAMESPACE_FOR_DATA_EXPORT)
        resource_inst_uri = archesproject[reverse(
            'resources', args=[resourceinstanceids[0]]).lstrip('/')]

        context = self.graph_model.jsonldcontext
        framing = {
            "@omitDefault": True,
            "@omitGraph": False,
            "@id": str(resource_inst_uri)
        }

        if context:
            framing["@context"] = context

        js = frame(js, framing)

        try:
            context = JSONDeserializer().deserialize(context)
        except ValueError:
            if context == '':
                context = {}
            context = {"@context": context}
        except AttributeError:
            context = {"@context": {}}

        # Currently omitGraph is not processed by pyLd, but data is compacted
        # simulate omitGraph:
        if '@graph' in js and len(js['@graph']) == 1:
            # merge up
            for (k, v) in js['@graph'][0].items():
                js[k] = v
            del js['@graph']

        out = json.dumps(js, indent=kwargs.get('indent', None), sort_keys=True)
        dest = StringIO(out)

        full_file_name = os.path.join('{0}.{1}'.format(self.file_name,
                                                       'jsonld'))
        return [{'name': full_file_name, 'outputfile': dest}]
コード例 #9
0
    def get(self, request, conceptid=None):
        if user_can_read_concepts(user=request.user):
            allowed_formats = ['json', 'json-ld']
            format = request.GET.get('format', 'json-ld')
            if format not in allowed_formats:
                return JSONResponse(status=406, reason='incorrect format specified, only %s formats allowed' % allowed_formats)

            include_subconcepts = request.GET.get('includesubconcepts', 'true') == 'true'
            include_parentconcepts = request.GET.get('includeparentconcepts', 'true') == 'true'
            include_relatedconcepts = request.GET.get('includerelatedconcepts', 'true') == 'true'

            depth_limit = request.GET.get('depthlimit', None)
            lang = request.GET.get('lang', settings.LANGUAGE_CODE)

            try:
                indent = int(request.GET.get('indent', None))
            except Exception:
                indent = None
            if conceptid:
                try:
                    ret = []
                    concept_graph = Concept().get(id=conceptid, include_subconcepts=include_subconcepts,
                                                  include_parentconcepts=include_parentconcepts, include_relatedconcepts=include_relatedconcepts,
                                                  depth_limit=depth_limit, up_depth_limit=None, lang=lang)

                    ret.append(concept_graph)
                except models.Concept.DoesNotExist:
                    return JSONResponse(status=404)
                except Exception as e:
                    return JSONResponse(status=500, reason=e)
            else:
                return JSONResponse(status=500)
        else:
            return JSONResponse(status=500)

        if format == 'json-ld':
            try:
                skos = SKOSWriter()
                value = skos.write(ret, format="nt")
                js = from_rdf(str(value), options={format: 'application/nquads'})

                context = [{
                    "@context": {
                        "skos": SKOS,
                        "dcterms": DCTERMS,
                        "rdf": str(RDF)
                    }
                }, {
                    "@context": settings.RDM_JSONLD_CONTEXT
                }]

                ret = compact(js, context)
            except Exception as e:
                return JSONResponse(status=500, reason=e)

        return JSONResponse(ret, indent=indent)
コード例 #10
0
ファイル: serialize.py プロジェクト: oeg-upm/agora-gw
def serialize_graph(g, format=TURTLE, frame=None):
    if format == TURTLE:
        return g.serialize(format='turtle')

    context = build_graph_context(g)
    cg = skolemize(g)
    ted_nquads = cg.serialize(format='nquads')
    ld = jsonld.from_rdf(ted_nquads)
    if frame is not None:
        ld = jsonld.frame(ld, {'context': context, '@type': str(frame)})
    return json.dumps(jsonld.compact(ld, context), indent=3, sort_keys=True)
コード例 #11
0
ファイル: virtuoso_mapping.py プロジェクト: iilab/assembl
 def quads_to_jsonld(self, quads):
     from pyld import jsonld
     context = json.load(open(join(dirname(__file__), 'ontology',
                                   'context.jsonld')))
     server_uri = self.local_uri()
     context["@context"]['local'] = server_uri
     jsonf = jsonld.from_rdf(quads)
     jsonc = jsonld.compact(jsonf, context)
     jsonc['@context'] = [
         context_url, {'local': server_uri}]
     return jsonc
コード例 #12
0
ファイル: rdffile.py プロジェクト: archesproject/arches
    def build_json(self, graph_id=None, resourceinstanceids=None, **kwargs):
        # Build the JSON separately serializing it, so we can use internally
        super(RdfWriter,
              self).write_resources(graph_id=graph_id,
                                    resourceinstanceids=resourceinstanceids,
                                    **kwargs)
        g = self.get_rdf_graph()
        value = g.serialize(format="nquads").decode("utf-8")

        js = from_rdf(value, {
            "format": "application/nquads",
            "useNativeTypes": True
        })

        assert len(
            resourceinstanceids
        ) == 1  # currently, this should be limited to a single top resource

        archesproject = Namespace(settings.ARCHES_NAMESPACE_FOR_DATA_EXPORT)
        resource_inst_uri = archesproject[reverse(
            "resources", args=[resourceinstanceids[0]]).lstrip("/")]

        context = self.graph_model.jsonldcontext
        framing = {
            "@omitDefault": True,
            "@omitGraph": False,
            "@id": str(resource_inst_uri)
        }

        if context:
            framing["@context"] = context

        js = frame(js, framing)

        try:
            context = JSONDeserializer().deserialize(context)
        except ValueError:
            if context == "":
                context = {}
            context = {"@context": context}
        except AttributeError:
            context = {"@context": {}}

        # Currently omitGraph is not processed by pyLd, but data is compacted
        # simulate omitGraph:
        if "@graph" in js and len(js["@graph"]) == 1:
            # merge up
            for (k, v) in list(js["@graph"][0].items()):
                js[k] = v
            del js["@graph"]
        return js
コード例 #13
0
def create_jsonLD(graph_data, filter_frame):
    """Create JSON-LD output for the given subject."""
    graph = ConjunctiveGraph()
    graph.parse(data=graph_data, format="turtle")
    try:
        # pyld likes nquads, by default
        expanded = jsonld.from_rdf(graph.serialize(format="nquads"))
        framed = jsonld.frame(expanded, json.loads(filter_frame))
        result = json.dumps(framed, indent=1, sort_keys=True)
        app_logger.info('Serialized as JSON-LD compact with the frame.')
        return result
    except Exception as error:
        app_logger.error('JSON-LD frame failed with error: {0}'.format(error))
        return error
コード例 #14
0
ファイル: save_jsonld.py プロジェクト: luca/ontology
def convert(context, src, dest):
    #context = abspath(context)
    with open(context) as f:
        context = load(f)
    g = ConjunctiveGraph()
    with open(src) as f:
        g.parse(data=f.read(), format='trig')

    with open(dest, 'w') as f:
        # f.write(g.serialize(format='json-ld', indent=4, context=context))
        # Bug in rdflib: Above loses the TextPositionSelector.
        quads = g.serialize(format='nquads')
        json = jsonld.from_rdf(quads)
        jsonc = jsonld.compact(json, context)
        dump(jsonc, f, indent="    ")
コード例 #15
0
ファイル: save_jsonld.py プロジェクト: luca/ontology
def convert(context, src, dest):
    #context = abspath(context)
    with open(context) as f:
        context = load(f)
    g = ConjunctiveGraph()
    with open(src) as f:
        g.parse(data=f.read(), format='trig')

    with open(dest, 'w') as f:
        # f.write(g.serialize(format='json-ld', indent=4, context=context))
        # Bug in rdflib: Above loses the TextPositionSelector.
        quads = g.serialize(format='nquads')
        json = jsonld.from_rdf(quads)
        jsonc = jsonld.compact(json, context)
        dump(jsonc, f, indent="    ")
コード例 #16
0
ファイル: import.py プロジェクト: transformaps/oerworldmap
def convert(input_path, output_path):
    skos = jsonld.from_rdf(get_skos(input_path).decode('unicode_escape').encode('utf-8','ignore'))
    context = {
        "@vocab": "http://www.w3.org/2004/02/skos/core#",
        "name": {
            "@id": "http://www.w3.org/2004/02/skos/core#prefLabel",
            "@container": "@set"
        },
        "alternateName": {
            "@id": "http://www.w3.org/2004/02/skos/core#altLabel",
            "@container": "@set"
        },
        "narrower": {
            "@id": "http://www.w3.org/2004/02/skos/core#narrower",
            "@container": "@set"
        },
        "description": {
            "@id": "http://purl.org/dc/terms/description",
            "@container": "@set"
        },
        "scopeNote": {
            "@container": "@set"
        },
        "notation": {
            "@container": "@set"
        },
        "publisher": "http://purl.org/dc/terms/publisher",
        "title": "http://purl.org/dc/terms/title",
        "preferredNamespacePrefix": "http://purl.org/vocab/vann/preferredNamespacePrefix",
        "preferredNamespaceUri": "http://purl.org/vocab/vann/preferredNamespaceUri",
        "source": "http://purl.org/dc/terms/source"
    }
    frame = {
        "@context": context,
        "@type": "ConceptScheme",
        "@explicit": True,
        "hasTopConcept": {
            "@type": "Concept",
            "narrower": {
                "@type": "Concept"
            }
        }
    }
    framed = jsonld.compact(jsonld.frame(skos, frame), context)
    del framed['@context']
    with open(output_path, 'w') as output_file:
        json.dump(framed, output_file, indent=2, ensure_ascii=False)
    print "Wrote data for " + input_path + " to " + output_path
コード例 #17
0
    def produceGeoJsonLd(data, structure, types, frame):

        result = EOCollector.collect(data)

        result = GeoJsonProducer.annotate(result, structure)

        result = GeoJsonProducer.formatValues(result)

        graph = EOGraph()

        graph.addEoTriples(structure, result, types)

        g = jsonld.from_rdf(graph.serialize())

        framed = jsonld.frame(g, frame)

        return framed
コード例 #18
0
ファイル: rdffile.py プロジェクト: mrcnc/arches
    def write_resources(self,
                        graph_id=None,
                        resourceinstanceids=None,
                        **kwargs):
        super(RdfWriter,
              self).write_resources(graph_id=graph_id,
                                    resourceinstanceids=resourceinstanceids,
                                    **kwargs)
        g = self.get_rdf_graph()
        value = g.serialize(format='nt')
        js = from_rdf(str(value), options={format: 'application/nquads'})

        framing = {
            "@omitDefault":
            True,
            "@type":
            "%sgraph/%s" %
            (settings.ARCHES_NAMESPACE_FOR_DATA_EXPORT, self.graph_id)
        }

        js = frame(js, framing)

        context = self.graph_model.jsonldcontext
        try:
            context = JSONDeserializer().deserialize(context)
        except ValueError:
            if context == '':
                context = {}
            context = {"@context": context}
        except AttributeError:
            context = {"@context": {}}

        out = compact(js, context)
        out = json.dumps(out,
                         indent=kwargs.get('indent', None),
                         sort_keys=True)
        dest = StringIO(out)

        full_file_name = os.path.join('{0}.{1}'.format(self.file_name,
                                                       'jsonld'))
        return [{'name': full_file_name, 'outputfile': dest}]
コード例 #19
0
def result_data_to_jsonld(result_data, context):

    if set(result_data["head"]["vars"]) != set(
        ["subject", "predicate", "object"]):
        raise Exception("result not in s/p/o format")

    result = {
        "@default": map(reformat_node, result_data["results"]["bindings"])
    }

    # print json.dumps(result, indent=4)
    compacted_graph = jsonld.compact(jsonld.from_rdf(result),
                                     context)["@graph"]

    # remove bad data generated by earlier code
    compacted_graph = [
        fix_leaves(s) for s in compacted_graph if not bad_item(s)
    ]

    contained = dict(contained_items(compacted_graph))

    return compacted_graph, contained
コード例 #20
0
def from_rdf(rdf, context=None, base=None, remove_context=False):
    """Serialize RDF as OA JSON-LD, return compacted JSON-LD."""

    # From http://www.w3.org/TR/json-ld/#h3_serializing-deserializing-rdf:
    #
    #    Deserializing [expanded and flattened JSON-LD] to RDF now is
    #    a straightforward process of turning each node object into
    #    one or more RDF triples. [...] The process of serializing RDF
    #    as JSON-LD can be thought of as the inverse of this last
    #    step, creating an expanded JSON-LD document closely matching
    #    the triples from RDF, using a single node object for all
    #    triples having a common subject, and a single property for
    #    those triples also having a common predicate.
    #
    # See also: http://www.w3.org/TR/2014/REC-json-ld-api-20140116/#rdf-serialization-deserialization-algorithms

    if context is None:
        context = default_context()
    if base is None:
        base = default_base()

    document = jsonld.from_rdf(rdf, {'format': 'application/nquads'})
    return compact(document, context, base, remove_context)
コード例 #21
0
ファイル: oajson.py プロジェクト: spyysalo/restful-oa
def from_rdf(rdf, context=None, base=None, remove_context=False):
    """Serialize RDF as OA JSON-LD, return compacted JSON-LD."""

    # From http://www.w3.org/TR/json-ld/#h3_serializing-deserializing-rdf:
    #
    #    Deserializing [expanded and flattened JSON-LD] to RDF now is
    #    a straightforward process of turning each node object into
    #    one or more RDF triples. [...] The process of serializing RDF
    #    as JSON-LD can be thought of as the inverse of this last
    #    step, creating an expanded JSON-LD document closely matching
    #    the triples from RDF, using a single node object for all
    #    triples having a common subject, and a single property for
    #    those triples also having a common predicate.
    #
    # See also: http://www.w3.org/TR/2014/REC-json-ld-api-20140116/#rdf-serialization-deserialization-algorithms

    if context is None:
        context = default_context()
    if base is None:
        base = default_base()

    document = jsonld.from_rdf(rdf, {'format': 'application/nquads'})
    return compact(document, context, base, remove_context)
コード例 #22
0
ファイル: serialize.py プロジェクト: fserena/agora-gw
def serialize_graph(g, format=TURTLE, frame=None, skolem=True):
    if skolem:
        cg = skolemize(g)
    else:
        cg = ConjunctiveGraph()
        cg.__iadd__(g)

    context = build_graph_context(g)

    if format == TURTLE:
        for prefix, uri in g.namespaces():
            if prefix in context:
                cg.bind(prefix, uri)

        return cg.serialize(format='turtle')

    ted_nquads = cg.serialize(format='nquads')
    ld = jsonld.from_rdf(ted_nquads)
    if frame is not None:
        ld = jsonld.frame(ld, {'context': context, '@type': str(frame)})
    ld = jsonld.compact(ld, context)

    return json.dumps(ld, indent=3, sort_keys=True)
コード例 #23
0
def run_testcases2(profile, executable, halt_on_error, limit_testcase_count,
                   iri):
    conn = my_ag_conn()
    if iri == None:
        pointer = select_one_result_and_one_binding(
            conn, "localhost:last_tau_testcases_parsed rdf:value ?pointer.")
    else:
        pointer = franz.openrdf.model.URI(iri)
    #print(pointer)
    graph, result = read_pointer(conn, pointer)
    quads = read_quads_from_context(conn, graph)
    jld = jsonld.from_rdf({'@default': quads}, {})

    data = frame_result(jld, result)
    testcases = data['@graph'][0]['rdf:value']['@list']
    for tc_idx, tc in enumerate(testcases):
        if tc_idx == limit_testcase_count:
            break
        queries = tc['tc:queries']['@list']
        for q in queries:
            query_pointer_uri = construct_pointer(conn, q, graph)
            #query_pointer_uri is a single uri, and you can read the default graph to figure out the value it points to, and what the relevant graph is

            if profile == 'pyco3':
                if executable == None:
                    executable = 'pyco3'

            args = [
                executable, '-g', 'main', '--', '--task', query_pointer_uri
            ]
            logging.getLogger(__name__).info(f'#spawning: {shlex.join(args)}')
            cmpl = subprocess.run(args)

            if halt_on_error:
                if cmpl.returncode != 0:
                    exit()
コード例 #24
0
ファイル: tests.py プロジェクト: nypen/insight
    for key in result:
        print(key, " : ", result[key])

    print(
        "---------------------------------------------------------------------------------------------------"
    )
    print(id + " JSONLD")
    print(
        "---------------------------------------------------------------------------------------------------"
    )

    graph = EOGraph()

    graph.addEoTriples(structure1, result)

    g = jsonld.from_rdf(graph.serialize())

    filename = "./Output/rdfGraph_{}.json".format(id)
    graph.printRdf(filename)

    # compacted = jsonld.compact(g, "https://schema.org/docs/jsonldcontext.jsonld")

    framed = jsonld.frame(g, frame)
    print(json.dumps(framed, indent=2))
    print()
    print()

# platformId = GCMDApi.getPlatformId("Sentinel-2A")

# print(platformId)
コード例 #25
0
ファイル: rdfindex.py プロジェクト: no-reply/lotnd
    endpoint.setQuery(thesisQuery)
    endpoint.setReturnFormat(XML)
    thesisXML = endpoint.query().convert()
    #add the thesis to the temporary graph
#    print thesisXML.serialize(format='n3')
    tmpgraph = rdflib.Graph(g.store, osuNs['theses'])
    tmpgraph.parse(data=thesisXML.serialize(format='xml'))

    for o in tmpgraph.objects():
        if isinstance(o, rdflib.URIRef):
            query = "DESCRIBE <" + o + ">"
            endpoint.setQuery(query)
            desc = endpoint.query().convert()
            tmpgraph.parse(data=desc.serialize(format='xml'))

    j = jsonld.compact(jsonld.from_rdf(g.serialize(format='nquads')), contexts)
    if 0 in j['@graph']:
        try:
            outfile.write(json.dumps(j['@graph'][0], indent=1))
            j = jsonld.frame(j, json.load(urllib2.urlopen('http://achelo.us/thesis_frame.jsonld')))
            outfile_f.write(json.dumps(j['@graph'][0], indent=1))
            elastic.index(j['@graph'][0], 'theses', 'thesis')
        except Exception as e:
            print e 
            print json.dumps(j['@graph'])
            outfile_e.write(json.dumps(j))
            continue

    if (count % 100) == 0:
        print count
コード例 #26
0
def pyld_json_from_rdflib_graph(graph):
    """Get PyLD JSON object from and rdflib input graph."""
    default_graph = pyld_graph_from_rdflib_graph(graph)
    return jsonld.from_rdf({'@default': default_graph})
コード例 #27
0
ファイル: serialize.py プロジェクト: fserena/agora-gw
def _ted_as_json_ld(sg):
    g = ConjunctiveGraph()
    g.__iadd__(sg)

    for res in g.query("""SELECT ?p ?name WHERE { ?p a <%s> ; <%s> ?name}""" %
                       (WOT.Property, WOT.interactionName)):
        g.remove((res.p, WOT.interactionName, res.name))
        g.add((res.p, WOT.propertyName, res.name))

    for res in g.query("""SELECT ?p ?name WHERE { ?p a <%s> ; <%s> ?name}""" %
                       (WOT.Action, WOT.interactionName)):
        g.remove((res.p, WOT.interactionName, res.name))
        g.add((res.p, WOT.actionName, res.name))

    for res in g.query("""SELECT ?p ?name WHERE { ?p a <%s> ; <%s> ?name}""" %
                       (WOT.Event, WOT.interactionName)):
        g.remove((res.p, WOT.interactionName, res.name))
        g.add((res.p, WOT.eventName, res.name))

    context = build_context(g)

    if 'pid' in context:
        context['pid'] = str(WOT.interactionName)
    if 'aid' in context:
        context['aid'] = str(WOT.interactionName)
    if 'eid' in context:
        context['eid'] = str(WOT.interactionName)

    cg = skolemize(g)
    ted_nquads = cg.serialize(format='nquads')
    ld = jsonld.from_rdf(ted_nquads)

    td_frame = jsonld.compact(
        jsonld.frame(ld, {
            'context': context,
            '@type': str(CORE.ThingDescription)
        }), context)

    td_context = td_frame['@context']
    del td_frame['@context']
    ted_frame = jsonld.compact(
        jsonld.frame(ld, {
            'context': context,
            '@type': str(CORE.ThingEcosystemDescription)
        }), context)
    ted_context = ted_frame['@context']
    del ted_frame['@context']

    component_ids = []
    ted_components = ted_frame.get('describes', {}).get('components', [])
    if isinstance(ted_components, dict) or isinstance(ted_components, str):
        ted_components = [ted_components]
    for component in ted_components:
        # if it does not contain 'describedBy' it is a resource
        cid = component['@id'] if isinstance(
            component, dict) and 'describedBy' in component else component
        component_ids.append(cid)
    if component_ids:
        ted_frame['describes']['components'] = component_ids
    if '@graph' not in td_frame:
        source_td_frame = copy.deepcopy(td_frame)
        td_frame = {'@graph': []}
        if source_td_frame:
            td_frame['@graph'].append(source_td_frame)

    td_frame['@graph'].append(ted_frame)
    td_frame['@context'] = merge_two_dicts(td_context, ted_context)
    try:
        for pdata in path_data("$..interactions", td_frame['@graph']):
            if isinstance(pdata, list):
                for int_dict in pdata:
                    replace_interaction_name(int_dict)
            else:
                replace_interaction_name(pdata)
    except TypeError:
        pass

    return json.dumps(td_frame, indent=3, sort_keys=True)
コード例 #28
0
ファイル: runtests.py プロジェクト: icaromedeiros/pyld
    def main(self):
        print 'PyLD Unit Tests'
        print 'Use -h or --help to view options.'

        # add program options
        self.parser.add_option('-f', '--file', dest='file',
            help='The single test file to run', metavar='FILE')
        self.parser.add_option('-d', '--directory', dest='directory',
            help='The directory full of test files', metavar='DIR')
        self.parser.add_option('-v', '--verbose', dest='verbose',
            action='store_true', default=False,
            help='Prints verbose test data')

        # parse options
        (self.options, args) = self.parser.parse_args()

        # check if file or directory were specified
        if self.options.file == None and self.options.directory == None:
            raise Exception('No test file or directory specified.')

        # check if file was specified, exists, and is file
        if self.options.file is not None:
            if (os.path.exists(self.options.file) and
                os.path.isfile(self.options.file)):
                # add manifest file to the file list
                self.manifest_files.append(os.path.abspath(self.options.file))
            else:
                raise Exception('Invalid test file: "%s"' % self.options.file)

        # check if directory was specified, exists and is dir
        if self.options.directory is not None:
            if (os.path.exists(self.options.directory) and
                os.path.isdir(self.options.directory)):
                # load manifest files from test directory
                for test_dir, dirs, files in os.walk(self.options.directory):
                    for manifest in files:
                        # add all .jsonld manifest files to the file list
                        if (manifest.find('manifest') != -1 and
                            manifest.endswith('.jsonld')):
                            self.manifest_files.append(
                                join(test_dir, manifest))
            else:
                raise Exception('Invalid test directory: "%s"' %
                    self.options.directory)

        # see if any manifests have been specified
        if len(self.manifest_files) == 0:
            raise Exception('No manifest files found.')

        passed = 0
        failed = 0
        total = 0

        # run the tests from each manifest file
        for manifest_file in self.manifest_files:
            test_dir = os.path.dirname(manifest_file)
            manifest = json.load(open(manifest_file, 'r'))
            count = 1

            for test in manifest['sequence']:
                # skip unsupported types
                skip = True
                test_type = test['@type']
                for tt in TEST_TYPES:
                    if tt in test_type:
                        skip = False
                        break
                if skip:
                    print 'Skipping test: "%s" ...' % test['name']
                    continue

                print 'JSON-LD/%s %04d/%s...' % (
                    manifest['name'], count, test['name']),

                total += 1
                count += 1

                # read input file
                with open(join(test_dir, test['input'])) as f:
                    if test['input'].endswith('.jsonld'):
                        input = json.load(f)
                    else:
                        input = f.read().decode('utf8')
                # read expect file
                with open(join(test_dir, test['expect'])) as f:
                    if test['expect'].endswith('.jsonld'):
                        expect = json.load(f)
                    else:
                        expect = f.read().decode('utf8')
                result = None

                # JSON-LD options
                options = {
                    'base': 'http://json-ld.org/test-suite/tests/' +
                        test['input']}

                try:
                    if 'jld:NormalizeTest' in test_type:
                        options['format'] = 'application/nquads'
                        result = jsonld.normalize(input, options)
                    elif 'jld:ExpandTest' in test_type:
                        result = jsonld.expand(input, options)
                    elif 'jld:CompactTest' in test_type:
                        ctx = json.load(open(join(test_dir, test['context'])))
                        result = jsonld.compact(input, ctx, options)
                    elif 'jld:FrameTest' in test_type:
                        frame = json.load(open(join(test_dir, test['frame'])))
                        result = jsonld.frame(input, frame, options)
                    elif 'jld:FromRDFTest' in test_type:
                        result = jsonld.from_rdf(input, options)
                    elif 'jld:ToRDFTest' in test_type:
                        options['format'] = 'application/nquads'
                        result = jsonld.to_rdf(input, options)
    
                    # check the expected value against the test result
                    success = deep_compare(expect, result)
    
                    if success:
                        passed += 1
                        print 'PASS'
                    else:
                        failed += 1
                        print 'FAIL'
    
                    if not success or self.options.verbose:
                        print 'Expect:', json.dumps(expect, indent=2)
                        print 'Result:', json.dumps(result, indent=2)
                except jsonld.JsonLdError as e:
                    print '\nError: ', e
                    failed += 1
                    print 'FAIL'

        print 'Done. Total:%d Passed:%d Failed:%d' % (total, passed, failed)
コード例 #29
0
    def main(self):
        print('PyLD Unit Tests')
        print('Use -h or --help to view options.')

        # add program options
        self.parser.add_option('-f',
                               '--file',
                               dest='file',
                               help='The single test file to run',
                               metavar='FILE')
        self.parser.add_option('-d',
                               '--directory',
                               dest='directory',
                               help='The directory full of test files',
                               metavar='DIR')
        self.parser.add_option('-e',
                               '--earl',
                               dest='earl',
                               help='The filename to write the EARL report to',
                               metavar='EARL')
        self.parser.add_option('-v',
                               '--verbose',
                               dest='verbose',
                               action='store_true',
                               default=False,
                               help='Prints verbose test data')

        # parse options
        (self.options, args) = self.parser.parse_args()

        # check if file or directory were specified
        if self.options.file == None and self.options.directory == None:
            raise Exception('No test file or directory specified.')

        # check if file was specified, exists, and is file
        if self.options.file is not None:
            if (os.path.exists(self.options.file)
                    and os.path.isfile(self.options.file)):
                # add manifest file to the file list
                self.manifest_files.append(os.path.abspath(self.options.file))
            else:
                raise Exception('Invalid test file: "%s"' % self.options.file)

        # check if directory was specified, exists and is dir
        if self.options.directory is not None:
            if (os.path.exists(self.options.directory)
                    and os.path.isdir(self.options.directory)):
                # load manifest files from test directory
                for test_dir, dirs, files in os.walk(self.options.directory):
                    for manifest in files:
                        # add all .jsonld manifest files to the file list
                        if (manifest.find('manifest') != -1
                                and manifest.endswith('.jsonld')):
                            self.manifest_files.append(join(
                                test_dir, manifest))
            else:
                raise Exception('Invalid test directory: "%s"' %
                                self.options.directory)

        # see if any manifests have been specified
        if len(self.manifest_files) == 0:
            raise Exception('No manifest files found.')

        passed = 0
        failed = 0
        total = 0

        # run the tests from each manifest file
        for manifest_file in self.manifest_files:
            test_dir = os.path.dirname(manifest_file)
            manifest = json.load(open(manifest_file, 'r'))
            count = 1

            for test in manifest['sequence']:
                # skip unsupported types
                skip = True
                test_type = test['@type']
                for tt in test_type:
                    if tt in SKIP_TEST_TYPES:
                        skip = True
                        break
                    if tt in TEST_TYPES:
                        skip = False
                if skip:
                    # print 'Skipping test: "%s" ...' % test['name']
                    continue

                print('JSON-LD/%s %04d/%s...' %
                      (manifest['name'], count, test['name']),
                      end=' ')

                total += 1
                count += 1

                # read input file
                with open(join(test_dir, test['input'])) as f:
                    if test['input'].endswith('.jsonld'):
                        input = json.load(f)
                    else:
                        input = f.read().decode('utf8')
                # read expect file
                with open(join(test_dir, test['expect'])) as f:
                    if test['expect'].endswith('.jsonld'):
                        expect = json.load(f)
                    else:
                        expect = f.read().decode('utf8')
                result = None

                # JSON-LD options
                options = {
                    'base':
                    'http://json-ld.org/test-suite/tests/' + test['input'],
                    'useNativeTypes': True
                }

                success = False
                try:
                    if 'jld:ExpandTest' in test_type:
                        result = jsonld.expand(input, options)
                    elif 'jld:CompactTest' in test_type:
                        ctx = json.load(open(join(test_dir, test['context'])))
                        result = jsonld.compact(input, ctx, options)
                    elif 'jld:FlattenTest' in test_type:
                        result = jsonld.flatten(input, None, options)
                    elif 'jld:FrameTest' in test_type:
                        frame = json.load(open(join(test_dir, test['frame'])))
                        result = jsonld.frame(input, frame, options)
                    elif 'jld:FromRDFTest' in test_type:
                        result = jsonld.from_rdf(input, options)
                    elif 'jld:ToRDFTest' in test_type:
                        options['format'] = 'application/nquads'
                        result = jsonld.to_rdf(input, options)
                    elif 'jld:NormalizeTest' in test_type:
                        options['format'] = 'application/nquads'
                        result = jsonld.normalize(input, options)

                    # check the expected value against the test result
                    success = deep_compare(expect, result)

                    if success:
                        passed += 1
                        print('PASS')
                    else:
                        failed += 1
                        print('FAIL')

                    if not success or self.options.verbose:
                        print('Expect:', json.dumps(expect, indent=2))
                        print('Result:', json.dumps(result, indent=2))
                except jsonld.JsonLdError as e:
                    print('\nError: ', e)
                    failed += 1
                    print('FAIL')

                # add EARL report assertion
                EARL['subjectOf'].append({
                    '@type':
                    'earl:Assertion',
                    'earl:assertedBy':
                    EARL['doap:developer']['@id'],
                    'earl:mode':
                    'earl:automatic',
                    'earl:test':
                    ('http://json-ld.org/test-suite/tests/' +
                     os.path.basename(manifest_file) + test.get('@id', '')),
                    'earl:result': {
                        '@type':
                        'earl:TestResult',
                        'dc:date':
                        datetime.datetime.utcnow().isoformat(),
                        'earl:outcome':
                        ('earl:' + 'passed' if success else 'failed')
                    }
                })

        if self.options.earl:
            f = open(self.options.earl, 'w')
            f.write(json.dumps(EARL, indent=2))
            f.close()

        print('Done. Total:%d Passed:%d Failed:%d' % (total, passed, failed))
コード例 #30
0
        "contains": "http://example.org/library/the-republic#introduction"
    }, {
        "@id": "http://example.org/library/the-republic#introduction",
        "@type": "Chapter",
        "description": "An introductory chapter on The Republic.",
        "title": "The Introduction"
    }]
}

from pyld import jsonld

rdf = jsonld.normalize(data)

print "Normalized"
print fmt(rdf)

frame = {
    "@context": {"@vocab": "http://example.org/"},
    "@type": "Library",
    "contains": {
          "@type": "Book",
          "contains": {
                  "@type": "Chapter"
              }
      }
}

print "Framed"
print fmt(jsonld.frame(jsonld.from_rdf(rdf), frame))

コード例 #31
0
ファイル: runtests.py プロジェクト: allenwade3/pyld
    def main(self):
        print('PyLD Unit Tests')
        print('Use -h or --help to view options.')

        # add program options
        self.parser.add_option('-f', '--file', dest='file',
            help='The single test file to run', metavar='FILE')
        self.parser.add_option('-d', '--directory', dest='directory',
            help='The directory full of test files', metavar='DIR')
        self.parser.add_option('-e', '--earl', dest='earl',
            help='The filename to write the EARL report to', metavar='EARL')
        self.parser.add_option('-v', '--verbose', dest='verbose',
            action='store_true', default=False,
            help='Prints verbose test data')

        # parse options
        (self.options, args) = self.parser.parse_args()

        # check if file or directory were specified
        if self.options.file == None and self.options.directory == None:
            raise Exception('No test file or directory specified.')

        # check if file was specified, exists, and is file
        if self.options.file is not None:
            if (os.path.exists(self.options.file) and
                os.path.isfile(self.options.file)):
                # add manifest file to the file list
                self.manifest_files.append(os.path.abspath(self.options.file))
            else:
                raise Exception('Invalid test file: "%s"' % self.options.file)

        # check if directory was specified, exists and is dir
        if self.options.directory is not None:
            if (os.path.exists(self.options.directory) and
                os.path.isdir(self.options.directory)):
                # load manifest files from test directory
                for test_dir, dirs, files in os.walk(self.options.directory):
                    for manifest in files:
                        # add all .jsonld manifest files to the file list
                        if (manifest.find('manifest') != -1 and
                            manifest.endswith('.jsonld')):
                            self.manifest_files.append(
                                join(test_dir, manifest))
            else:
                raise Exception('Invalid test directory: "%s"' %
                    self.options.directory)

        # see if any manifests have been specified
        if len(self.manifest_files) == 0:
            raise Exception('No manifest files found.')

        passed = 0
        failed = 0
        total = 0

        # run the tests from each manifest file
        for manifest_file in self.manifest_files:
            test_dir = os.path.dirname(manifest_file)
            manifest = json.load(open(manifest_file, 'r'))
            count = 1

            for test in manifest['sequence']:
                # skip unsupported types
                skip = True
                test_type = test['@type']
                for tt in test_type:
                    if tt in SKIP_TEST_TYPES:
                        skip = True
                        break
                    if tt in TEST_TYPES:
                        skip = False
                if skip:
                    # print 'Skipping test: "%s" ...' % test['name']
                    continue

                print('JSON-LD/%s %04d/%s...' % (
                    manifest['name'], count, test['name']), end=' ')

                total += 1
                count += 1

                # read input file
                with open(join(test_dir, test['input'])) as f:
                    if test['input'].endswith('.jsonld'):
                        input = json.load(f)
                    else:
                        input = f.read().decode('utf8')
                # read expect file
                with open(join(test_dir, test['expect'])) as f:
                    if test['expect'].endswith('.jsonld'):
                        expect = json.load(f)
                    else:
                        expect = f.read().decode('utf8')
                result = None

                # JSON-LD options
                options = {
                    'base': 'http://json-ld.org/test-suite/tests/' +
                        test['input'],
                    'useNativeTypes': True
                }

                success = False
                try:
                    if 'jld:ExpandTest' in test_type:
                        result = jsonld.expand(input, options)
                    elif 'jld:CompactTest' in test_type:
                        ctx = json.load(open(join(test_dir, test['context'])))
                        result = jsonld.compact(input, ctx, options)
                    elif 'jld:FlattenTest' in test_type:
                        result = jsonld.flatten(input, None, options)
                    elif 'jld:FrameTest' in test_type:
                        frame = json.load(open(join(test_dir, test['frame'])))
                        result = jsonld.frame(input, frame, options)
                    elif 'jld:FromRDFTest' in test_type:
                        result = jsonld.from_rdf(input, options)
                    elif 'jld:ToRDFTest' in test_type:
                        options['format'] = 'application/nquads'
                        result = jsonld.to_rdf(input, options)
                    elif 'jld:NormalizeTest' in test_type:
                        options['format'] = 'application/nquads'
                        result = jsonld.normalize(input, options)

                    # check the expected value against the test result
                    success = deep_compare(expect, result)

                    if success:
                        passed += 1
                        print('PASS')
                    else:
                        failed += 1
                        print('FAIL')

                    if not success or self.options.verbose:
                        print('Expect:', json.dumps(expect, indent=2))
                        print('Result:', json.dumps(result, indent=2))
                except jsonld.JsonLdError as e:
                    print('\nError: ', e)
                    failed += 1
                    print('FAIL')

                # add EARL report assertion
                EARL['subjectOf'].append({
                    '@type': 'earl:Assertion',
                    'earl:assertedBy': EARL['doap:developer']['@id'],
                    'earl:mode': 'earl:automatic',
                    'earl:test': ('http://json-ld.org/test-suite/tests/' +
                        os.path.basename(manifest_file) + test.get('@id', '')),
                    'earl:result': {
                        '@type': 'earl:TestResult',
                        'dc:date': datetime.datetime.utcnow().isoformat(),
                        'earl:outcome': ('earl:' + 'passed' if success else
                            'failed')
                    }
                })

        if self.options.earl:
            f = open(self.options.earl, 'w')
            f.write(json.dumps(EARL, indent=2))
            f.close()

        print('Done. Total:%d Passed:%d Failed:%d' % (total, passed, failed))
コード例 #32
0
}

from pyld import jsonld

compacted = jsonld.compact(data, context)
print fmt(compacted)

doc = fmt(jsonld.expand(compacted))

rdf = jsonld.normalize(compacted)

print "Normalized"
print fmt(rdf)

print "Compacted"
print fmt(jsonld.compact(jsonld.from_rdf(rdf), context))

print "Framed"
print fmt(jsonld.frame(compacted, frame))

print "Reverse framed"
print fmt(jsonld.frame(compacted, rev_frame))

# usual caveats re YAML safety here..
print "YAML"
print pyaml.dump(jsonld.frame(compacted, frame), indent=4)

exit()

from rdflib import Graph, plugin
from rdflib.serializer import Serializer
コード例 #33
0
    async def derive_proof(
        self,
        *,
        proof: dict,
        document: dict,
        reveal_document: dict,
        document_loader: DocumentLoaderMethod,
        nonce: bytes = None,
    ):
        """Derive proof for document, return dict with derived document and proof."""

        # Validate that the input proof document has a proof compatible with this suite
        if proof.get("type") not in self.supported_derive_proof_types:
            raise LinkedDataProofException(
                f"Proof document proof incompatible, expected proof types of"
                f" {self.supported_derive_proof_types}, received " +
                proof["type"])

        # Extract the BBS signature from the input proof
        signature = b64_to_bytes(proof["proofValue"])

        # Initialize the BBS signature suite
        # This is used for creating the input document verification data
        # NOTE: both suite._create_verify_xxx_data and self._create_verify_xxx_data
        # are used in this file. They have small changes in behavior
        suite = BbsBlsSignature2020(key_pair=self.key_pair)

        # Initialize the derived proof
        derived_proof = self.proof.copy() if self.proof else {}

        # Ensure proof type is set
        derived_proof["type"] = self.signature_type

        # Get the input document and proof statements
        document_statements = suite._create_verify_document_data(
            document=document, document_loader=document_loader)
        proof_statements = suite._create_verify_proof_data(
            proof=proof, document=document, document_loader=document_loader)

        # Transform any blank node identifiers for the input
        # document statements into actual node identifiers
        # e.g _:c14n0 => urn:bnid:_:c14n0
        transformed_input_document_statements = (
            self._transform_blank_node_ids_into_placeholder_node_ids(
                document_statements))

        # Transform the resulting RDF statements back into JSON-LD
        compact_input_proof_document = jsonld.from_rdf(
            "\n".join(transformed_input_document_statements))

        # Frame the result to create the reveal document result
        reveal_document_result = jsonld.frame(
            compact_input_proof_document,
            reveal_document,
            {"documentLoader": document_loader},
        )

        # Canonicalize the resulting reveal document
        reveal_document_statements = suite._create_verify_document_data(
            document=reveal_document_result, document_loader=document_loader)

        # Get the indices of the revealed statements from the transformed input document
        # offset by the number of proof statements
        number_of_proof_statements = len(proof_statements)

        # Always reveal all the statements associated to the original proof
        # these are always the first statements in the normalized form
        proof_reveal_indices = [
            indice for indice in range(number_of_proof_statements)
        ]

        # Reveal the statements indicated from the reveal document
        document_reveal_indices = list(
            map(
                lambda reveal_statement: transformed_input_document_statements.
                index(reveal_statement) + number_of_proof_statements,
                reveal_document_statements,
            ))

        # Check there is not a mismatch
        if len(document_reveal_indices) != len(reveal_document_statements):
            raise LinkedDataProofException(
                "Some statements in the reveal document not found in original proof"
            )

        # Combine all indices to get the resulting list of revealed indices
        reveal_indices = [*proof_reveal_indices, *document_reveal_indices]

        # Create a nonce if one is not supplied
        nonce = nonce or urandom(50)

        derived_proof["nonce"] = bytes_to_b64(nonce,
                                              urlsafe=False,
                                              pad=True,
                                              encoding="utf-8")

        # Combine all the input statements that were originally signed
        # NOTE: we use plain strings here as input for the bbs lib.
        # the MATTR lib uses bytes, but the wrapper expects strings
        # it also works if we pass bytes as input
        all_input_statements = [*proof_statements, *document_statements]

        # Fetch the verification method
        verification_method = self._get_verification_method(
            proof=proof, document_loader=document_loader)

        # Create key pair from public key in verification method
        key_pair = self.key_pair.from_verification_method(verification_method)

        # Get the proof messages (revealed or not)
        proof_messages = []
        for input_statement_index in range(len(all_input_statements)):
            # if input statement index in revealed messages indexes use revealed type
            # otherwise use blinding
            proof_type = (ProofMessageType.Revealed
                          if input_statement_index in reveal_indices else
                          ProofMessageType.HiddenProofSpecificBlinding)
            proof_messages.append(
                ProofMessage(
                    message=all_input_statements[input_statement_index],
                    proof_type=proof_type,
                ))

        # get bbs key from bls key pair
        bbs_public_key = BlsKeyPair(
            public_key=key_pair.public_key).get_bbs_key(
                len(all_input_statements))

        # Compute the proof
        proof_request = CreateProofRequest(
            public_key=bbs_public_key,
            messages=proof_messages,
            signature=signature,
            nonce=nonce,
        )

        output_proof = bls_create_proof(proof_request)

        # Set the proof value on the derived proof
        derived_proof["proofValue"] = bytes_to_b64(output_proof,
                                                   urlsafe=False,
                                                   pad=True,
                                                   encoding="utf-8")

        # Set the relevant proof elements on the derived proof from the input proof
        derived_proof["verificationMethod"] = proof["verificationMethod"]
        derived_proof["proofPurpose"] = proof["proofPurpose"]
        derived_proof["created"] = proof["created"]

        return DeriveProofResult(document={**reveal_document_result},
                                 proof=derived_proof)
コード例 #34
0
ファイル: convert.py プロジェクト: zimeon/rdflib-pyld-compat
def pyld_jsonld_from_rdflib_graph(graph):
    """Get PyLD JSON-LD object from and rdflib input graph."""
    default_graph = _pyld_dataset_from_rdflib_graph(graph)
    json = jsonld.from_rdf({'@default': default_graph})
    _fix_type_null(json)
    return (json)