コード例 #1
0
def canonize_node(g, node, authority, id=None):
    skolem = ConjunctiveGraph()

    if isinstance(node, URIRef):
        node_parse = urlparse(node)
        node_id = node_parse.path.lstrip('/')
        if node_id != id:
            return g

    if not id:
        id = str(node)

    authority = authority.rstrip('/')
    skolem_uri = URIRef('/'.join([authority, str(id)]))
    for s, p, o in g:
        if s == node:
            s = skolem_uri
        if o == node:
            o = skolem_uri
        skolem.add((s, p, o))
    return skolem
コード例 #2
0
ファイル: reader.py プロジェクト: arcangelo7/oc_ocdm
    def _load_graph(self, file_path: str) -> ConjunctiveGraph:
        formats: List[str] = [
            "json-ld", "rdfxml", "turtle", "trig", "nt11", "nquads"
        ]

        loaded_graph: ConjunctiveGraph = ConjunctiveGraph()

        errors: str = ""
        for cur_format in formats:
            try:
                if cur_format == "json-ld":
                    with open(file_path, "rt") as f:
                        json_ld_file: Any = json.load(f)
                        if isinstance(json_ld_file, dict):
                            json_ld_file: List[Any] = [json_ld_file]

                        for json_ld_resource in json_ld_file:
                            # Trick to force the use of a pre-loaded context if the format
                            # specified is JSON-LD
                            if "@context" in json_ld_resource:
                                cur_context: str = json_ld_resource["@context"]
                                if cur_context in self.context_map:
                                    context_json: Any = self.context_map[
                                        cur_context]["@context"]
                                    json_ld_resource["@context"] = context_json

                            loaded_graph.parse(data=json.dumps(
                                json_ld_resource, ensure_ascii=False),
                                               format=cur_format)
                else:
                    loaded_graph.parse(file_path, format=cur_format)

                return loaded_graph
            except Exception as e:
                errors += f" | {e}"  # Try another format

        raise IOError(
            "1",
            f"It was impossible to handle the format used for storing the file '{file_path}'{errors}"
        )
コード例 #3
0
ファイル: rdf_views.py プロジェクト: pdc/jeremydaysite
def strip_rdf(request, number, format):
    ordinal = int(number)
    if ordinal < 1:
        raise Http404
    strips = twslib.get_tws(settings.TWS_FILE, settings.TWS_SRC_PREFIX)
    if ordinal > len(strips):
        raise Http404

    # Strips are organized by date, not number.
    # So we need to work out which strips has this number.
    indexes_by_number = dict(
        (strip['number'], i) for (i, strip) in enumerate(strips))
    index = indexes_by_number[ordinal]
    strip = strips[index]

    graph = ConjunctiveGraph()
    graph.bind('alleged', ALLEGED)
    graph.bind('dc', DC)
    graph.bind('foaf', FOAF)
    graph.bind('tws', TWS)

    strip_subject = TWS[number]
    graph.add((strip_subject, RDF.type, ALLEGED['comic']))
    graph.add((strip_subject, DC['creator'], Literal('Jeremy Day')))
    graph.add((strip_subject, DC['title'], Literal(strip['title'])))
    graph.add((strip_subject, DC['date'], Literal(strip['date'])))

    graph.add((strip_subject, ALLEGED['excerpt'], URIRef(strip['icon_src'])))
    graph.add((strip_subject, ALLEGED['image'], URIRef(strip['image_src'])))

    if ordinal > 1:
        graph.add((strip_subject, ALLEGED['prev-page'],
                   TWS['strip%d' % (ordinal - 1)]))
    if ordinal < len(strips):
        graph.add((strip_subject, ALLEGED['next-page'],
                   TWS['strip%d' % (ordinal + 1)]))

    response = HttpResponse(graph.serialize(format=format),
                            content_type=format_content_types[format])
    return response
コード例 #4
0
ファイル: user.py プロジェクト: Cheshire-Grampa/cheshire3
    def __init__(self, conn, name):
        user = irods.irodsUser(conn, name)
        self.id = user.getId()
        self.username = id
        self.email = ""
        self.address = ""
        self.tel = ""
        self.realName = ""
        self.description = ""
        self.flags = {}

        umd = user.getUserMetadata()
        for u in umd:
            if u[0] == 'rdf':
                # try to parse
                try:
                    g = ConjunctiveGraph()
                except:
                    continue
                for (key, val) in NS.iteritems():
                    g.bind(key, val)
                data = StringInputSource(umd[1])
                try:
                    if umd[2]:
                        g.parse(data, umd[2])
                    else:
                        g.parse(data)
                    me = NS['demo']['users/%s'] % self.id
                    for (p, o) in g.predicate_objects(me):
                        if predicateMap.has_key(p):
                            setattr(self, predicateMap[p], str(o))
                except:
                    # rdf exists, could parse, but is broken
                    raise
            elif u[0] in self.simpleFields:
                setattr(self, u[0], u[1])
            elif u[0] == 'flags':
                # should be a {} of flag : [obj, obj]
                # xxx
                pass
コード例 #5
0
def resources(request, uri, ext=None):
    if request.user.is_authenticated():
        perms = ProjectPermission.objects.filter(user=request.user)
    else:
        perms = []

    uri = uri.rstrip('/')
    store_g = Graph(store=rdfstore(), identifier=URIRef(uri))
    g = Graph()
    g += store_g

    if len(g) > 0:
        for i in perms:
            anno_uri = settings.URI_MINT_BASE \
                + "/projects/" + i.identifier \
                + "/resources/" + uri \
                + "/annotations/"
            anno_url = reverse('semantic_store_project_annotations',
                               kwargs={'project_uri': i.identifier}) \
                               + "?uri=" + uri
            g.add((URIRef(uri), NS.ore['aggregates'], URIRef(anno_uri)))
            g.add(
                (URIRef(anno_uri), NS.ore['isDescribedBy'], URIRef(anno_url)))
            g.add((URIRef(anno_uri), NS.rdf['type'], NS.ore['Aggregation']))
            g.add((URIRef(anno_uri), NS.rdf['type'], NS.rdf['List']))
            g.add((URIRef(anno_uri), NS.rdf['type'], NS.dms['AnnotationList']))
        return NegotiatedGraphResponse(request, g)
    else:
        main_graph_store = ConjunctiveGraph(store=rdfstore(),
                                            identifier=default_identifier)
        main_graph = Graph()
        main_graph += main_graph_store
        g = Graph()
        bind_namespaces(g)
        for t in main_graph.triples((URIRef(uri), None, None)):
            g.add(t)
        if len(g) > 0:
            return NegotiatedGraphResponse(request, g)
        else:
            return HttpResponseNotFound()
コード例 #6
0
ファイル: wikidata.py プロジェクト: BLADErangu/onto
def build_item_data_graph(item_type,
                          item_properties,
                          item_data_graph_path,
                          endpoint_url,
                          max_num_objects,
                          create=True):
    item_data_graph = ConjunctiveGraph("Sleepycat")

    item_data_graph.open(item_data_graph_path, create=create)

    for item_property in item_properties:
        item_data_query = RETRIEVE_ITEM_PROPERTIES_QUERY % (
            item_type, item_type, item_type, item_type, item_type,
            item_property)
        sparql_client = SPARQLWrapper(endpoint_url, returnFormat=JSON)
        sparql_client.setTimeout(604800)
        sparql_client.setQuery(item_data_query)
        results = sparql_client.queryAndConvert()
        num_bindings = len(results["results"]["bindings"])
        added_triples = defaultdict(lambda: defaultdict(lambda: 0))
        for i, binding in enumerate(results["results"]["bindings"]):
            print("[{}/{}]".format(i + 1, num_bindings))
            subject = URIRef(binding["s"]["value"])
            predicate = URIRef(binding["p"]["value"])
            if binding["o"]["type"] == "literal":
                object = Literal(binding["o"]["value"],
                                 datatype=binding["o"]["datatype"])
            else:
                object = URIRef(binding["o"]["value"])
            if max_num_objects is not None:
                if added_triples[subject][predicate] < max_num_objects:
                    triple = (subject, predicate, object)
                    added_triples[subject][predicate] += 1
                    item_data_graph.add(triple)
            else:
                triple = (subject, predicate, object)
                item_data_graph.add(triple)

    item_data_graph.close()
コード例 #7
0
def testFinalNewline():
    """
    http://code.google.com/p/rdflib/issues/detail?id=5
    """
    import sys

    graph = ConjunctiveGraph()
    graph.add(
        (
            URIRef("http://ex.org/a"),
            URIRef("http://ex.org/b"),
            URIRef("http://ex.org/c"),
        )
    )

    failed = set()
    for p in rdflib.plugin.plugins(None, rdflib.plugin.Serializer):
        v = graph.serialize(format=p.name)
        lines = v.split("\n".encode("latin-1"))
        if "\n".encode("latin-1") not in v or (lines[-1] != "".encode("latin-1")):
            failed.add(p.name)
    assert len(failed) == 0, "No final newline for formats: '%s'" % failed
コード例 #8
0
 def initConnection(self, clear=True):
     if not (self.conn and self.conn.opened):
         if self.url.endswith('.fs'):
             from ZODB.FileStorage import FileStorage
             if clear and os.path.exists(self.path):
                 os.unlink('/tmp/zodb_local2.fs')
                 os.unlink('/tmp/zodb_local2.fs.index')
                 os.unlink('/tmp/zodb_local2.fs.tmp')
                 os.unlink('/tmp/zodb_local2.fs.lock')
             openstr = os.path.abspath(os.path.expanduser(self.url[7:]))
             fs = FileStorage(openstr)
         else:
             from ZEO.ClientStorage import ClientStorage
             schema, opts = _parse_rfc1738_args(self.url)
             fs = ClientStorage((opts['host'], int(opts['port'])))
         self.zdb = ZODB.DB(fs)
         self.conn = self.zdb.open()
     root = self.conn.root()
     if 'rdflib' not in root:
         root['rdflib'] = ConjunctiveGraph(self.store_name)
     self.graph = self.g = root['rdflib']
     transaction.commit()
コード例 #9
0
 def getDataBySP(self, thingy, propthingy, context=None):
     qdict = {}
     if context:
         qdict['c'] = namespaces.n3encode(context)
     #print "THINGY", thingy
     if len(thingy.split(':')) > 1:
         qdict['s'] = namespaces.n3encode(thingy)
     else:
         qdict['s'] = thingy
     qdict['p'] = namespaces.n3encode(propthingy)
     #print "qdict", qdict
     data = self.tsc.query_statements(qdict)
     #print data
     bg = ConjunctiveGraph()
     namespaces.bindgraph(bg)
     #abnode=BNode()
     res = bg.parse(StringIO.StringIO(data))
     listofo = []
     #this bnode crap is very fragile TODO:replace
     for trip in res:
         listofo.append(str(trip[2].encode('utf-8')))
     return listofo
コード例 #10
0
    def setUp(self):
        graph = ConjunctiveGraph()
        graph.parse(StringInputSource('''
          @prefix p: <http://example.com/pic/> .
          @prefix : <http://photo.bigasterisk.com/0.1/> .
          @prefix foaf: <http://xmlns.com/foaf/0.1/> .
          @prefix xs: <http://www.w3.org/2001/XMLSchema#> .
          @prefix exif: <http://www.kanzaki.com/ns/exif#> .

          p:a a foaf:Image; exif:dateTime "2014-01-01T00:00:00Z"^^xs:dateTime .
          p:b a foaf:Image; exif:dateTime "2014-01-02T00:00:00Z"^^xs:dateTime .
          p:c a foaf:Image; exif:dateTime "2014-01-03T00:00:00Z"^^xs:dateTime .
          p:d a foaf:Image; exif:dateTime "2014-01-04T00:00:00Z"^^xs:dateTime .
        '''),
                    format='n3')

        bindAll(graph)

        index = imageset.ImageIndex(graph)
        index.finishBackgroundIndexing()
        self.imageSet = imageset.ImageSet(graph, index)
        self.request = self.imageSet.request
コード例 #11
0
 def remove(self, xxx_todo_changeme, context=None):
     (subject, predicate, object_) = xxx_todo_changeme
     lock = destructiveOpLocks['remove']
     lock = lock if lock else threading.RLock()
     with lock:
         # Need to determine which quads will be removed if any term is a
         # wildcard
         context = context.__class__(
             self.store,
             context.identifier) if context is not None else None
         ctxId = context.identifier if context is not None else None
         if None in [subject, predicate, object_, context]:
             if ctxId:
                 for s, p, o in context.triples(
                     (subject, predicate, object_)):
                     try:
                         self.reverseOps.remove((s, p, o, ctxId, 'remove'))
                     except ValueError:
                         self.reverseOps.append((s, p, o, ctxId, 'add'))
             else:
                 for s, p, o, ctx in ConjunctiveGraph(self.store).quads(
                     (subject, predicate, object_)):
                     try:
                         self.reverseOps.remove(
                             (s, p, o, ctx.identifier, 'remove'))
                     except ValueError:
                         self.reverseOps.append(
                             (s, p, o, ctx.identifier, 'add'))
         else:
             if not list(
                     self.triples((subject, predicate, object_), context)):
                 return  # triple not present in store, do nothing
             try:
                 self.reverseOps.remove(
                     (subject, predicate, object_, ctxId, 'remove'))
             except ValueError:
                 self.reverseOps.append(
                     (subject, predicate, object_, ctxId, 'add'))
         self.store.remove((subject, predicate, object_), context)
コード例 #12
0
def updateObservationProps(stationcod, typeslist):
    for obstype in typeslist:
        uri = RESOURCE_URI + 'station/' + stationcod

        #Initialization of the graph
        ssn = Namespace("http://purl.oclc.org/NET/ssnx/ssn#")

        store = IOMemory()

        g = ConjunctiveGraph(store=store)
        g.bind("ssn", ssn)

        cpr = URIRef(uri)
        gpr = Graph(store=store, identifier=cpr)

        #Add data to the graph
        gpr.add((cpr, RDF.type, ssn['Sensor']))
        gpr.add((cpr, ssn['observes'], RESOURCE_URI + 'prop/' + obstype))

        #Update RDF
        print uri + ' | ' + obstype
        insertGraph(g=gpr, sparql=VIRTUOSO_URL, resourceuri=RESOURCE_URI)
コード例 #13
0
ファイル: core.py プロジェクト: mindis/QuitStore
    def getFileReferenceAndContext(self, blob, commit):
        """Get the FielReference and Context for a given blob (name, oid) of a commit.

        On Cache miss this method also updates teh commits cache.
        """
        uriFileMap = self.config.getgraphurifilemap()

        if blob not in self._blobs:
            (name, oid) = blob
            content = commit.node(path=name).content
            # content = self.repository._repository[oid].data
            graphUris = self.config.getgraphuriforfile(name)
            graphsFromConfig = set((Graph(identifier=i) for i in graphUris))
            tmp = ConjunctiveGraph()
            tmp.parse(data=content, format='nquads')
            contexts = set(
                (context for context in tmp.contexts(None)
                 if context.identifier in uriFileMap)) | graphsFromConfig
            quitWorkingData = (FileReference(name, content), contexts)
            self._blobs.set(blob, quitWorkingData)
            return quitWorkingData
        return self._blobs.get(blob)
コード例 #14
0
def test_date():
    with CSVW(csv_path="tests/datatypes.date.csv",
              metadata_path="tests/datatypes.date.csv-metadata.json") as csvw:
        rdf_output = csvw.to_rdf()

    g = ConjunctiveGraph()
    g.parse(data=rdf_output, format="turtle")

    date1_lit = Literal("2017-01-09", datatype=XSD.date)
    assert len(list(g.triples((NS['event/1'], NS['date1'], date1_lit)))) == 1

    date2_lit = Literal("2017-01-10Z", datatype=XSD.date)
    assert len(list(g.triples((NS['event/1'], NS['date2'], date2_lit)))) == 1

    date3_lit = Literal("2017-01-11", datatype=XSD.date)
    assert len(list(g.triples((NS['event/1'], NS['date3'], date3_lit)))) == 1

    date4_lit = Literal("2002-09-24-06:00", datatype=XSD.date)
    assert len(list(g.triples((NS['event/1'], NS['date4'], date4_lit)))) == 1

    date5_lit = Literal("2002-09-24+04:00", datatype=XSD.date)
    assert len(list(g.triples((NS['event/1'], NS['date5'], date5_lit)))) == 1
コード例 #15
0
 def participants_private_as_graph(self, discussion_id):
     from assembl.models import Discussion, AgentProfile
     local_uri = self.local_uri()
     discussion = Discussion.get(discussion_id)
     d_storage_name = self.private_user_storage.name
     d_graph_iri = self.private_user_storage.sections[0].graph_iri
     cg = ConjunctiveGraph(identifier=d_graph_iri)
     v = get_virtuoso(self.session, d_storage_name)
     v_main = get_virtuoso(self.session, self.discussion_storage_name())
     participant_ids = discussion.get_participants(True)
     profiles = {
         URIRef(AgentProfile.uri_generic(id, local_uri))
         for id in participant_ids
     }
     self.add_subject_data(v, cg, profiles)
     accounts = [
         account for ((account, p, profile),
                      g) in v_main.triples((None, SIOC.account_of, None))
         if profile in profiles
     ]
     self.add_subject_data(v, cg, accounts)
     return cg
コード例 #16
0
    def html_to_rdf_extruct(html_source) -> ConjunctiveGraph:
        data = extruct.extract(
            html_source, syntaxes=["microdata", "rdfa", "json-ld"], errors="ignore"
        )
        kg = ConjunctiveGraph()

        base_path = Path(__file__).parent.parent  # current directory
        static_file_path = str((base_path / "static/data/jsonldcontext.json").resolve())

        for md in data["json-ld"]:
            if "@context" in md.keys():
                print(md["@context"])
                if ("https://schema.org" in md["@context"]) or (
                    "http://schema.org" in md["@context"]
                ):
                    md["@context"] = static_file_path
            kg.parse(data=json.dumps(md, ensure_ascii=False), format="json-ld")
        for md in data["rdfa"]:
            if "@context" in md.keys():
                if ("https://schema.org" in md["@context"]) or (
                    "http://schema.org" in md["@context"]
                ):
                    md["@context"] = static_file_path
            kg.parse(data=json.dumps(md, ensure_ascii=False), format="json-ld")
        for md in data["microdata"]:
            if "@context" in md.keys():
                if ("https://schema.org" in md["@context"]) or (
                    "http://schema.org" in md["@context"]
                ):
                    md["@context"] = static_file_path
            kg.parse(data=json.dumps(md, ensure_ascii=False), format="json-ld")

        logging.debug(kg.serialize(format="turtle"))

        kg.namespace_manager.bind("sc", URIRef("http://schema.org/"))
        kg.namespace_manager.bind("bsc", URIRef("https://bioschemas.org/"))
        kg.namespace_manager.bind("dct", URIRef("http://purl.org/dc/terms/"))

        return kg
コード例 #17
0
def populate_ontology():
    ont_path = path_kg + 'traffic_ontology.xml'
    metadata = pd.read_csv(path_src + 'trafficMetaData.csv', sep=',')
    g = ConjunctiveGraph()
    g.load(ont_path)
    g.add((URIRef(base_uri), RDF.type, OWL.Ontology))
    g.bind("owl", OWL)
    g.bind("rdf", RDF)
    g.bind("rdfs", RDFS)
    # g.bind("city", base_uri)
    # populate from metadata: [Path, from[name], to[name], from[has[street]], to[has[street]]]
    populate_from_metadata(metadata, g)
    poi = parse_log()
    for entry in poi:
        point = entry[0][0].split('_')[0] + "_" + entry[0][0].split('_')[1]
        metadata_entry = metadata[metadata['REPORT_ID'] == int(entry[0][0].split('_')[2])]
        address_id = metadata_entry[point + '_NAME'].values[0]

        poi_list = entry[0][1]
        for tmp_poi in poi_list:
            # generate an id for the poi
            tmp_poi_id = str(abs(hash(point + '_' + str(address_id) + '_' + tmp_poi)))
            g.add((base_uri[tmp_poi_id], RDF.type, base_uri['Point_of_interest']))
            g.add((base_uri[tmp_poi_id], RDF.type, base_uri[tmp_poi[0].upper() + tmp_poi[1:]]))
            g.add((base_uri[tmp_poi_id], base_uri['locatedAt'], base_uri[str(address_id)]))

    simple_sequence = []
    events = pd.read_csv(path_processed + 'events.csv')
    mapping = pd.read_csv(path_processed + 'mapping.csv').T.to_dict()
    for k, v in mapping.iteritems():
        g.add((base_uri[v['Unnamed: 0']], base_uri['occursAt'], base_uri[str(v['occursAt'])]))
        g.add((base_uri[v['Unnamed: 0']], RDF.type, base_uri[v['type']]))

    for e in events['Id']:
        simple_sequence.append(e)
    with open(path_processed + 'sequence.txt', "wb") as seq_file:
        seq_file.write(','.join(simple_sequence))
    g.serialize(path_kg + 'traffic_individuals.xml', format='xml')
コード例 #18
0
    def __process_users_rdf(self):
        print("Processing Users to RDF")
        # User Vertices - Load, rename column with type, and save

        user_df = pd.read_csv(os.path.join(
            self.raw_directory, 'ml-100k/u.user'), sep='|', encoding='ISO-8859-1',
            names=['~id', 'age:Int', 'gender', 'occupation', 'zip_code'])
        user_df['~id'] = user_df['~id'].apply(
            lambda x: f'user_{x}'
        )

        users_graph = ConjunctiveGraph()

        for index, row in user_df.iterrows():
            users_graph.add((
                self.ns_resource[row['~id']], RDF.type, self.ns_ontology.User, self.ns_ontology.DefaultNamedGraph
            ))
            users_graph.add((
                self.ns_resource[row['~id']], self.ns_ontology.age,
                Literal(row['age:Int'], datatype=XSD.integer), self.ns_ontology.Users
            ))
            users_graph.add((
                self.ns_resource[row['~id']], self.ns_ontology.occupation,
                Literal(row['occupation'], datatype=XSD.string),
                self.ns_ontology.Users
            ))
            users_graph.add((
                self.ns_resource[row['~id']], self.ns_ontology.gender, Literal(row['gender'], datatype=XSD.string),
                self.ns_ontology.Users
            ))
            users_graph.add((
                self.ns_resource[row['~id']], self.ns_ontology.zipCode,
                Literal(row['zip_code'], datatype=XSD.string),
                self.ns_ontology.Users
            ))

        users_rdf_file = os.path.join(self.formatted_directory, 'users.nq')
        users_graph.serialize(format='nquads', destination=users_rdf_file)
コード例 #19
0
ファイル: storer.py プロジェクト: zuphilip/bcite
    def store(self, cur_g, base_dir, base_iri, context_path, tmp_dir=None,
              override=False, already_processed={}, store_now=True):
        self.repok.new_article()
        self.reperr.new_article()

        if len(cur_g) > 0:
            cur_dir_path, cur_file_path = self.dir_and_file_paths(cur_g, base_dir, base_iri)

            try:
                if not os.path.exists(cur_dir_path):
                    os.makedirs(cur_dir_path)

                final_g = ConjunctiveGraph()
                final_g.addN([item + (cur_g.identifier,) for item in list(cur_g)])

                # Merging the data
                if not override:
                    if cur_file_path in already_processed:
                        stored_g = already_processed[cur_file_path]
                        stored_g.addN(final_g.quads((None, None, None, None)))
                        final_g = stored_g
                    elif os.path.exists(cur_file_path):
                        # This is a conjunctive graps that contains all the triples (and graphs)
                        # the file is actually defining - they could be more than those using
                        # 'cur_subject' as subject.
                        final_g = self.load(cur_file_path, cur_g, tmp_dir)

                already_processed[cur_file_path] = final_g

                if store_now:
                    self.__store_in_file(final_g, cur_file_path, context_path)

                return already_processed
            except Exception as e:
                self.reperr.add_sentence("[5] It was impossible to store the RDF statements in %s. %s" %
                                         (cur_file_path, str(e)))

        return None
コード例 #20
0
 def discussion_as_graph(self, discussion_id):
     from assembl.models import Discussion, AgentProfile
     local_uri = self.local_uri()
     discussion = Discussion.get(discussion_id)
     d_storage_name = self.discussion_storage_name()
     d_graph_iri = URIRef(self.discussion_graph_iri())
     v = get_virtuoso(self.session, d_storage_name)
     discussion_uri = URIRef(
         Discussion.uri_generic(discussion_id, local_uri))
     subjects = [
         s for (s, ) in v.query("""SELECT DISTINCT ?s WHERE {
         ?s assembl:in_conversation %s }""" % (discussion_uri.n3()))
     ]
     subjects.append(discussion_uri)
     participant_ids = list(discussion.get_participants(True))
     profiles = {
         URIRef(AgentProfile.uri_generic(id, local_uri))
         for id in participant_ids
     }
     subjects.extend(profiles)
     # add pseudo-accounts
     subjects.extend((URIRef("%sAgentAccount/%d" % (local_uri, id))
                      for id in participant_ids))
     # print len(subjects)
     cg = ConjunctiveGraph(identifier=d_graph_iri)
     self.add_subject_data(v, cg, subjects)
     # add relationships of non-pseudo accounts
     for ((account, p, profile), g) in v.triples(
         (None, SIOC.account_of, None)):
         if profile in profiles:
             cg.add((account, SIOC.account_of, profile, g))
             # Tempting: simplify with this.
             # cg.add((profile, FOAF.account, account, g))
     for (s, o, g) in v.query('''SELECT ?s ?o ?g WHERE {
             GRAPH ?g {?s catalyst:expressesIdea ?o } .
             ?o assembl:in_conversation %s }''' % (discussion_uri.n3())):
         cg.add((s, CATALYST.expressesIdea, o, g))
     return cg
コード例 #21
0
    def test_create_match_graph(self):
        # TODO: verify test accuracy; then remake into test generator (see nose)

        graph = ConjunctiveGraph()

        def classAndSubClass(C1, C2):
            graph.add( (C1, RDF.type, RDFS.Class) )
            graph.add( (C2, RDF.type, RDFS.Class) )
            graph.add( (C2, RDFS.subClassOf, C1) )

        classAndSubClass(ont.T1, ont.T2)
        classAndSubClass(ont.T2, ont.T3)
        classAndSubClass(ont.T3, ont.T4)
        classAndSubClass(ont.T4, ont.T5)

        item1 = URIRef("urn:item1")
        item2 = URIRef("urn:item2")
        item3 = URIRef("urn:item3")
        item4 = URIRef("urn:item4")
        item5 = URIRef("urn:item5")

        graph.add( (item1, RDF.type, ont.T1) )
        graph.add( (item2, RDF.type, ont.T2) )
        graph.add( (item3, RDF.type, ont.T3) )
        graph.add( (item4, RDF.type, ont.T4) )
        graph.add( (item5, RDF.type, ont.T5) )

        matchGraph = self.display.create_match_graph(graph)

        def get_aspect(resource):
            for rdfType in matchGraph.objects(resource, RDF.type):
                return self.display.typeAspects.get(rdfType)

        assert get_aspect(item1) == "T1 handler"
        assert get_aspect(item2) == "T1 handler"
        assert get_aspect(item3) == "T1 handler"
        assert get_aspect(item4) == "T1 handler"
        assert get_aspect(item5) == "T5 handler"
コード例 #22
0
ファイル: rdfview.py プロジェクト: Sunnepah/oort.python
    def to_graph(self, newgraph=None, autotype=False, shallow=False, deep=()):
        """
        Create a new Graph (or populate ``newgraph``) from contained data.

        If ``autotype`` is True, add eventual defined RDF_TYPE as type, but
        only if current subject has no known type.

        If ``shallow`` is True, do not add subsequent statements about objects
        if they are ``URIRef``:s, unless they are referenced by any eventual
        properties listed in ``deep``.
        """
        subject = self._subject or BNode()  # FIXME: is this ok?
        if not subject: return  # FIXME, see fixme in __init__

        lgraph = newgraph or ConjunctiveGraph()
        if not newgraph:
            for key, ns in self._graph.namespaces():
                lgraph.bind(key, ns)

        for t in self._graph.objects(subject, RDF.type):
            lgraph.add((subject, RDF.type, t))

        if autotype and not self._graph.value(self.uri, RDF.type, None):
            if self.RDF_TYPE:
                lgraph.set((self.uri, RDF.type, self.RDF_TYPE))

        for selector in self._selectors.values():
            value = selector.__get__(self)
            if not value:
                continue
            # TODO: never deep for ..where_self_is.. if shallow?
            selector.back_to_graph(lgraph, subject, value, shallow
                                   and selector.predicate not in deep)

        # FIXME: why is this happening; how can we prevent it?
        for t in lgraph:
            if None in t: lgraph.remove(t)
        return lgraph
コード例 #23
0
    def get_util(self):
        try:
            return self._util
        except AttributeError:
            #ns_mgr = Graph().parse('sys/context/base.jsonld',
            #        format='json-ld').namespace_manager
            #ns_mgr.bind("", vocab_uri)
            #graphcache = GraphCache(config['GRAPH_CACHE'])
            #graphcache.graph.namespace_manager = ns_mgr
            #vocabgraph = graphcache.load(config['VOCAB_SOURCE'])
            #vocabgraph.namespace_manager = ns_mgr
            # TODO: load base vocabularies for labels, inheritance here,
            # or in vocab build step? (Or not at all...)
            #for url in vocabgraph.objects(None, OWL.imports):
            #    graphcache.load(vocab_source_map.get(str(url), url))
            vocabgraph = ConjunctiveGraph()
            vocabgraph.parse(data=json.dumps(self.vocab_data[GRAPH]),
                             context=self.context_data,
                             format='json-ld')
            vocabgraph.namespace_manager.bind("", self.vocab_uri)

            self._util = VocabUtil(vocabgraph, self.lang)
            return self._util
コード例 #24
0
    def setUp(self):
        try:
            self.graph = ConjunctiveGraph(store=self.store)
        except ImportError:
            raise SkipTest("Dependencies for store '%s' not available!" % self.store)
        if self.store == "SQLite":
            _, self.tmppath = mkstemp(prefix="test", dir="/tmp", suffix=".sqlite")
        else:
            self.tmppath = mkdtemp()
        self.graph.open(self.tmppath, create=True)
        self.michel = URIRef("michel")
        self.tarek = URIRef("tarek")
        self.bob = URIRef("bob")
        self.likes = URIRef("likes")
        self.hates = URIRef("hates")
        self.pizza = URIRef("pizza")
        self.cheese = URIRef("cheese")

        self.c1 = URIRef("context-1")
        self.c2 = URIRef("context-2")

        # delete the graph for each test!
        self.graph.remove((None, None, None))
コード例 #25
0
def testFinalNewline():
    """
    http://code.google.com/p/rdflib/issues/detail?id=5
    """
    import sys
    import platform
    if getattr(sys, 'pypy_version_info', None) or platform.system() == 'Java':
        from nose import SkipTest
        raise SkipTest(
            'Testing under pypy and Jython2.5 fails to detect that ' + \
            'IOMemory is a context_aware store')

    graph = ConjunctiveGraph()
    graph.add((URIRef("http://ex.org/a"), URIRef("http://ex.org/b"),
               URIRef("http://ex.org/c")))

    failed = set()
    for p in rdflib.plugin.plugins(None, rdflib.plugin.Serializer):
        v = graph.serialize(format=p.name)
        lines = v.split(b("\n"))
        if b("\n") not in v or (lines[-1] != b('')):
            failed.add(p.name)
    assert len(failed) == 0, "No final newline for formats: '%s'" % failed
コード例 #26
0
ファイル: serialize.py プロジェクト: fserena/agora-gw
def serialize_graph(g, format=TURTLE, frame=None, skolem=True):
    if skolem:
        cg = skolemize(g)
    else:
        cg = ConjunctiveGraph()
        cg.__iadd__(g)

    context = build_graph_context(g)

    if format == TURTLE:
        for prefix, uri in g.namespaces():
            if prefix in context:
                cg.bind(prefix, uri)

        return cg.serialize(format='turtle')

    ted_nquads = cg.serialize(format='nquads')
    ld = jsonld.from_rdf(ted_nquads)
    if frame is not None:
        ld = jsonld.frame(ld, {'context': context, '@type': str(frame)})
    ld = jsonld.compact(ld, context)

    return json.dumps(ld, indent=3, sort_keys=True)
コード例 #27
0
def verify_rdf(rdf_output):
    ids_ns = Namespace("http://foo.example.org/CSV/People-IDs/")
    ages_ns = Namespace("http://foo.example.org/CSV/People-Ages/")
    g = ConjunctiveGraph()
    g.parse(data=rdf_output, format="turtle")

    all_subjects = {x for x in g.subjects()}
    assert len(all_subjects) == 2

    bob_subj = ids_ns['1']
    joe_subj = ids_ns['2']
    assert bob_subj in all_subjects
    assert joe_subj in all_subjects

    # Bob's details
    assert len([g.triples((bob_subj, ids_ns.id, Literal(1)))]) == 1
    assert len([g.triples((bob_subj, ids_ns.name, Literal("Bob")))]) == 1
    assert len([g.triples((bob_subj, ages_ns.age, Literal(34)))]) == 1

    # Joe's details
    assert len([g.triples((joe_subj, ids_ns.id, Literal(2)))]) == 1
    assert len([g.triples((joe_subj, ids_ns.name, Literal("Joe")))]) == 1
    assert len([g.triples((joe_subj, ages_ns.age, Literal(54)))]) == 1
コード例 #28
0
def export_collection(collection):
    g = ConjunctiveGraph()
    domain = URIRef(HOST)
    ctx = URIRef('%s/collections/%s' % (HOST, collection['id']))
    g.add((ctx, RDFS.label, Literal(collection['label']), domain))
    g.add((ctx, ALEPH.foreignId, Literal(collection['foreign_id']), domain))
    # print g.serialize(format='nquads')
    # pprint(collection)

    q = {
        'query': {
            'term': {
                'collection_id': collection['id']
            }
        },
        '_source': {
            'exclude': ['text']
        }
    }
    for row in scan(es, index=entity_index, query=q):
        entity = row['_source']
        entity['id'] = row['_id']
        export_entity(ctx, entity)
コード例 #29
0
    def parse(self, inputsource, sink, **kwargs):
        """Parse f as an N-Triples file."""
        assert sink.store.context_aware, ("NQuadsParser must be given"
                                          " a context aware store.")
        self.sink = ConjunctiveGraph(store=sink.store)

        source = inputsource.getByteStream()

        if not hasattr(source, 'read'):
            raise ParseError("Item to parse must be a file-like object.")

        source = getreader('utf-8')(source)

        self.file = source
        self.buffer = ''
        while True:
            self.line = __line = self.readline()
            if self.line is None:
                break
            try:
                self.parseline()
            except ParseError, msg:
                raise ParseError("Invalid line (%s):\n%r" % (msg, __line))
コード例 #30
0
ファイル: nquads.py プロジェクト: ClassWizard/PodLockParser
    def parse(self, inputsource, sink, bnode_context=None, **kwargs):
        """
        Parse inputsource as an N-Quads file.

        :type inputsource: `rdflib.parser.InputSource`
        :param inputsource: the source of N-Quads-formatted data
        :type sink: `rdflib.graph.Graph`
        :param sink: where to send parsed triples
        :type bnode_context: `dict`, optional
        :param bnode_context: a dict mapping blank node identifiers to `~rdflib.term.BNode` instances.
                              See `.NTriplesParser.parse`
        """
        assert sink.store.context_aware, ("NQuadsParser must be given"
                                          " a context aware store.")
        self.sink = ConjunctiveGraph(store=sink.store,
                                     identifier=sink.identifier)

        source = inputsource.getCharacterStream()
        if not source:
            source = inputsource.getByteStream()
            source = getreader("utf-8")(source)

        if not hasattr(source, "read"):
            raise ParseError("Item to parse must be a file-like object.")

        self.file = source
        self.buffer = ""
        while True:
            self.line = __line = self.readline()
            if self.line is None:
                break
            try:
                self.parseline(bnode_context)
            except ParseError as msg:
                raise ParseError("Invalid line (%s):\n%r" % (msg, __line))

        return self.sink