예제 #1
0
    def test_issue_250(self):
        """

        https://github.com/RDFLib/rdflib/issues/250

        When I have a ConjunctiveGraph with the default namespace set,
        for example

        import rdflib
        g = rdflib.ConjunctiveGraph()
        g.bind(None, "http://defaultnamespace")

        then the Trix serializer binds the default namespace twice in its XML
        output, once for the Trix namespace and once for the namespace I used:

        print(g.serialize(format='trix').decode('UTF-8'))

        <?xml version="1.0" encoding="utf-8"?>
        <TriX
          xmlns:xml="http://www.w3.org/XML/1998/namespace"
          xmlns="http://defaultnamespace"
          xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
          xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#"
          xmlns="http://www.w3.org/2004/03/trix/trix-1/"
        />

        """

        graph = ConjunctiveGraph()
        graph.bind(None, "http://defaultnamespace")
        sg = graph.serialize(format='trix').decode('UTF-8')
        self.assertTrue(
            'xmlns="http://defaultnamespace"' not in sg, sg)
        self.assertTrue(
            'xmlns="http://www.w3.org/2004/03/trix/trix-1/' in sg, sg)
예제 #2
0
def make_rdf_graph(movies):
    mg = ConjunctiveGraph()

    mg.bind('fb', FB)
    mg.bind('dc', DC)
    for movie in movies:

        # Make a movie node
        movie_node = IVA_MOVIE[movie['id']]
        mg.add((movie_node, DC['title'], Literal(movie['title'])))

        # Make the director node, give it a name and link it to the movie
        dir_node = IVA_PERSON[movie['director']['id']]
        mg.add((movie_node, FB['film.film.directed_by'], dir_node))
        mg.add((dir_node, DC['title'], Literal(movie['director']['name'])))

        for actor in movie['actors']:
            # The performance node is a blank node -- it has no URI
            performance = BNode()

            # The performance is connected to the actor and the movie
            actor_node = IVA_PERSON[actor['id']]

            mg.add((actor_node, DC['title'], Literal(actor['name'])))
            mg.add((performance, FB['film.performance.actor'], actor_node))
            # If you had the name of the role, you could also add it to the
            # performance node, e.g.
            # mg.add((performance,FB['film.performance.role'],Literal('Carrie Bradshaw')))

            mg.add((movie_node, FB['film.film.performances'], performance))

    return mg
예제 #3
0
    def create_ontology(self, tr, predicate, subClass, address, booktitle):
        LDT = Namespace("http://www.JceFinalProjectOntology.com/")
        ut = Namespace("http://www.JceFinalProjectOntology.com/subject/#")
        usubClass = URIRef("http://www.JceFinalProjectOntology.com/subject/" +
                           subClass.strip() + '#')
        #LDT.subClass=LDT[subClass]
        print(ut)
        print(usubClass)

        store = IOMemory()

        sty = LDT[predicate]
        g = rdflib.Graph(store=store, identifier=LDT)
        t = ConjunctiveGraph(store=store, identifier=ut)
        print('Triples in graph before add: ', len(t))
        #g.add((LDT,RDF.type,RDFS.Class))
        g.add((URIRef(LDT), RDF.type, RDFS.Class))
        g.add((URIRef(LDT), RDFS.label, Literal("JFPO")))
        g.add((URIRef(LDT), RDFS.comment, Literal('class of all properties')))
        for v in self.symbols.values():
            if self.if_compoTerm(v) == True:
                vs = self.splitTerms(v)[0]
            else:
                vs = v
            g.add((LDT[vs], RDF.type, RDF.Property))
            g.add((LDT[vs], RDFS.label, Literal('has' + vs)))
            g.add((LDT[vs], RDFS.comment, Literal(v)))
            g.add((LDT[vs], RDFS.range, OWL.Class))
            g.add((LDT[vs], RDFS.domain, Literal(vs)))
        g.bind('JFPO', LDT)
        #g.commit()
        g.serialize('trtst.rdf', format='turtle')

        t.add((ut[tr], RDF.type, OWL.Class))
        t.add((ut[tr], RDFS.subClassOf, OWL.Thing))
        t.add((ut[tr], RDFS.label, Literal(tr)))
        t.add((ut[tr], DC.title, Literal(booktitle)))
        t.add((ut[tr], DC.source, Literal(address)))

        t.add((ut[tr], DC[predicate], URIRef(usubClass)))
        t.add((ut[tr], LDT[predicate], RDF.Property))

        t.add((ut[tr], DC[predicate], URIRef(usubClass)))

        t.add((ut[tr], DC[predicate], URIRef(usubClass)))
        relation = 'has' + predicate
        t.add((ut[tr], LDT.term(predicate), URIRef(usubClass)))

        t.add((usubClass, RDF.type, OWL.Class))
        t.add((usubClass, RDFS.subClassOf, OWL.Thing))
        t.add((usubClass, RDFS.subClassOf, URIRef(sty)))
        t.add((usubClass, RDFS.label, Literal(subClass)))

        #tc=Graph(store=store,identifier=usubClass)
        t.bind("dc", "http://http://purl.org/dc/elements/1.1/")
        t.bind('JFPO', LDT)
        t.commit()
        #print(t.serialize(format='pretty-xml'))

        t.serialize('test2.owl', format='turtle')
예제 #4
0
    def test_issue_250(self):
        """

        https://github.com/RDFLib/rdflib/issues/250

        When I have a ConjunctiveGraph with the default namespace set,
        for example

        import rdflib
        g = rdflib.ConjunctiveGraph()
        g.bind(None, "http://defaultnamespace")

        then the Trix serializer binds the default namespace twice in its XML
        output, once for the Trix namespace and once for the namespace I used:

        print(g.serialize(format='trix').decode('UTF-8'))

        <?xml version="1.0" encoding="utf-8"?>
        <TriX
          xmlns:xml="http://www.w3.org/XML/1998/namespace"
          xmlns="http://defaultnamespace"
          xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
          xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#"
          xmlns="http://www.w3.org/2004/03/trix/trix-1/"
        />

        """

        graph = ConjunctiveGraph()
        graph.bind(None, "http://defaultnamespace")
        sg = graph.serialize(format="trix")
        self.assertTrue('xmlns="http://defaultnamespace"' not in sg, sg)
        self.assertTrue('xmlns="http://www.w3.org/2004/03/trix/trix-1/' in sg,
                        sg)
    def create_ontology(self,tr,predicate,subClass,address,booktitle):
        LDT= Namespace("http://www.JceFinalProjectOntology.com/")
        ut=Namespace("http://www.JceFinalProjectOntology.com/subject/#")
        usubClass=URIRef("http://www.JceFinalProjectOntology.com/subject/"+subClass.strip()+'#')
        #LDT.subClass=LDT[subClass]
        print(ut)
        print(usubClass)

        store=IOMemory()

        sty=LDT[predicate]
        g = rdflib.Graph(store=store,identifier=LDT)
        t = ConjunctiveGraph(store=store,identifier=ut)
        print ('Triples in graph before add: ', len(t))
        #g.add((LDT,RDF.type,RDFS.Class))
        g.add((URIRef(LDT),RDF.type,RDFS.Class))
        g.add((URIRef(LDT),RDFS.label,Literal("JFPO")))
        g.add((URIRef(LDT),RDFS.comment,Literal('class of all properties')))
        for  v in self.symbols.values():
            if self.if_compoTerm(v)==True:
                vs=self.splitTerms(v)[0]
            else:
                vs =v
            g.add((LDT[vs],RDF.type,RDF.Property))
            g.add((LDT[vs],RDFS.label,Literal('has'+vs)))
            g.add((LDT[vs],RDFS.comment,Literal(v)))
            g.add((LDT[vs],RDFS.range,OWL.Class))
            g.add((LDT[vs],RDFS.domain,Literal(vs)))
        g.bind('JFPO',LDT)
        #g.commit()
        g.serialize('trtst.rdf',format='turtle')

        t.add( (ut[tr], RDF.type,OWL.Class) )
        t.add((ut[tr],RDFS.subClassOf,OWL.Thing))
        t.add((ut[tr],RDFS.label,Literal(tr)))
        t.add((ut[tr],DC.title,Literal(booktitle)))
        t.add((ut[tr],DC.source,Literal(address)))

        t.add((ut[tr],DC[predicate],URIRef(usubClass)))
        t.add((ut[tr],LDT[predicate],RDF.Property))

        t.add((ut[tr],DC[predicate],URIRef(usubClass)))

        t.add((ut[tr],DC[predicate],URIRef(usubClass)))
        relation='has'+predicate
        t.add((ut[tr],LDT.term(predicate),URIRef(usubClass)))

        t.add( (usubClass,RDF.type,OWL.Class))
        t.add((usubClass,RDFS.subClassOf,OWL.Thing))
        t.add((usubClass,RDFS.subClassOf,URIRef(sty)))
        t.add((usubClass,RDFS.label,Literal(subClass)))

        #tc=Graph(store=store,identifier=usubClass)
        t.bind("dc", "http://http://purl.org/dc/elements/1.1/")
        t.bind('JFPO',LDT)
        t.commit()
                #print(t.serialize(format='pretty-xml'))

        t.serialize('test2.owl',format='turtle')
예제 #6
0
    def build_graph(self):
        graph = ConjunctiveGraph()
        graph.bind('sioc', SIOC)
        graph.bind('foaf', FOAF)
        graph.bind('rdfs', RDFS)
        graph.bind('dct', DCT)
        graph.bind('mvcb', MVCB)

        swaml = URIRef("http://swaml.berlios.de/doap#swaml")
        doc = URIRef(self.base)
        graph.add((doc, RDF.type, FOAF["Document"]))
        graph.add((doc, RDFS.label, Literal("RDF version of the message '%s' retrieved from MarkMail API" % self.key))) #FIXME: this should go out of this api
        graph.add((doc, MVCB.generatorAgent, swaml))
        message = URIRef(self.get_uri())
        graph.add((message, RDF.type, SIOC.Post))
        graph.add((message, RDF.type, SIOCT.MailMessage))
        graph.add((doc, FOAF.primaryTopic, message))

        graph.add((message, SIOC.id, Literal(self.key)))
        graph.add((message, SIOC.link, URIRef("http://markmail.org/message/%s" % self.key)))  
        #graph.add((message, SIOC.has_container,URIRef(self.config.get('base')+'forum')))   
        #graph.add((message, SIOC.has_creator, URIRef(self.getSender().getUri())))                    
        graph.add((message, DCT.title, Literal(self.title))) 
        #graph.add((message, DCT.created, Literal(self.getDate(), datatype=XSD[u'dateTime'])))  
        graph.add((message, SIOC.content, Literal(self.content)))

        self.set_graph(graph)
예제 #7
0
    def build_graph(self):
        graph = ConjunctiveGraph()
        graph.bind('sioc', SIOC)
        graph.bind('foaf', FOAF)
        graph.bind('rdfs', RDFS)
        graph.bind('dct', DCT)
        graph.bind('mvcb', MVCB)

        swaml = URIRef("http://swaml.berlios.de/doap#swaml")
        doc = URIRef(self.base)
        graph.add((doc, RDF.type, FOAF["Document"]))
        graph.add((
            doc, RDFS.label,
            Literal(
                "RDF version of the message '%s' retrieved from MarkMail API" %
                self.key)))  #FIXME: this should go out of this api
        graph.add((doc, MVCB.generatorAgent, swaml))
        message = URIRef(self.get_uri())
        graph.add((message, RDF.type, SIOC.Post))
        graph.add((message, RDF.type, SIOCT.MailMessage))
        graph.add((doc, FOAF.primaryTopic, message))

        graph.add((message, SIOC.id, Literal(self.key)))
        graph.add((message, SIOC.link,
                   URIRef("http://markmail.org/message/%s" % self.key)))
        #graph.add((message, SIOC.has_container,URIRef(self.config.get('base')+'forum')))
        #graph.add((message, SIOC.has_creator, URIRef(self.getSender().getUri())))
        graph.add((message, DCT.title, Literal(self.title)))
        #graph.add((message, DCT.created, Literal(self.getDate(), datatype=XSD[u'dateTime'])))
        graph.add((message, SIOC.content, Literal(self.content)))

        self.set_graph(graph)
예제 #8
0
파일: sioc.py 프로젝트: haxwithaxe/couchit
class SiocWiki(object):
    def __init__(self, uri, title=None, created=None):
        self.graph = Graph()
        self.graph.bind('sioc', SIOC)
        self.graph.bind('dc', DC)
        self.graph.bind('dcterms', DCTERMS)
        self.graph.bind('rdf', RDF)
        
        self._add_site(uri, title)
        
        
    def _add_site(self, uri, title):
        node = URIRef(uri)
        self.graph.add((node, RDF.type, SIOC['Site']))
        self.graph.add((node, DC['title'], Literal(title)))
        return node
        
    def add_page(self, content, title, uri, updated):
        node = URIRef(uri)
        self.graph.add((node, RDF.type, SIOC['Wiki']))      
        self.graph.add((node, SIOC['link'], URIRef(uri)))
        self.graph.add((node, DC['title'], Literal(title)))
        self.graph.add((node, DC['content'], Literal(content)))
        self.graph.add((node, DCTERMS['updated'], Literal(updated)))
    
    def to_str(self):
        return self.graph.serialize(format="pretty-xml")
예제 #9
0
파일: provrdf.py 프로젝트: vreuter/prov
    def serialize(self, stream=None, rdf_format='trig', **kwargs):
        """
        Serializes a :class:`~prov.model.ProvDocument` instance to
        `Prov-O <https://www.w3.org/TR/prov-o/>`_.

        :param stream: Where to save the output.
        """
        container = self.encode_document(self.document)
        newargs = kwargs.copy()
        newargs['format'] = rdf_format

        if newargs['format'] == 'trig':
            gr = ConjunctiveGraph()
            gr.context_aware = True
            gr.parse(data=container.serialize(format='nquads'),
                     format='nquads')
            for namespace in container.namespaces():
                if namespace not in list(gr.namespaces()):
                    gr.bind(namespace[0], namespace[1])
            container = gr

        if six.PY2:
            buf = io.BytesIO()
            try:
                container.serialize(buf, **newargs)
                buf.seek(0, 0)
                # Right now this is a bytestream. If the object to stream to is
                # a text object is must be decoded. We assume utf-8 here which
                # should be fine for almost every case.
                if isinstance(stream, io.TextIOBase):
                    stream.write(buf.read().decode('utf-8'))
                else:
                    stream.write(buf.read())
            finally:
                buf.close()
        else:
            buf = io.BytesIO()
            try:
                container.serialize(buf, **newargs)
                buf.seek(0, 0)
                # Right now this is a bytestream. If the object to stream to is
                # a text object is must be decoded. We assume utf-8 here which
                # should be fine for almost every case.
                if isinstance(stream, io.TextIOBase):
                    stream.write(buf.read().decode('utf-8'))
                else:
                    stream.write(buf.read())  #.encode('utf-8'))
            finally:
                buf.close()
예제 #10
0
파일: provrdf.py 프로젝트: cmaumet/prov
    def serialize(self, stream=None, rdf_format='trig', **kwargs):
        """
        Serializes a :class:`~prov.model.ProvDocument` instance to
        `Prov-O <https://www.w3.org/TR/prov-o/>`_.

        :param stream: Where to save the output.
        """
        container = self.encode_document(self.document)
        newargs = kwargs.copy()
        newargs['format'] = rdf_format

        if newargs['format'] == 'trig':
            gr = ConjunctiveGraph()
            gr.context_aware = True
            gr.parse(data=container.serialize(format='nquads'), format='nquads')
            for namespace in container.namespaces():
                if namespace not in list(gr.namespaces()):
                    gr.bind(namespace[0], namespace[1])
            container = gr

        if six.PY2:
            buf = io.BytesIO()
            try:
                container.serialize(buf, **newargs)
                buf.seek(0, 0)
                # Right now this is a bytestream. If the object to stream to is
                # a text object is must be decoded. We assume utf-8 here which
                # should be fine for almost every case.
                if isinstance(stream, io.TextIOBase):
                    stream.write(buf.read().decode('utf-8'))
                else:
                    stream.write(buf.read())
            finally:
                buf.close()
        else:
            buf = io.BytesIO()
            try:
                container.serialize(buf, **newargs)
                buf.seek(0, 0)
                # Right now this is a bytestream. If the object to stream to is
                # a text object is must be decoded. We assume utf-8 here which
                # should be fine for almost every case.
                if isinstance(stream, io.TextIOBase):
                    stream.write(buf.read().decode('utf-8'))
                else:
                    stream.write(buf.read()) #.encode('utf-8'))
            finally:
                buf.close()
예제 #11
0
def rdf_description(name, notation='xml'):
    """
    Funtion takes  title of node, and rdf notation.
    """
    valid_formats = ["xml", "n3", "ntriples", "trix"]
    default_graph_uri = "http://gstudio.gnowledge.org/rdfstore"
    configString = "/var/tmp/rdfstore"

    # Get the Sleepycat plugin.
    store = plugin.get('Sleepycat', Store)('rdfstore')

    # Open previously created store, or create it if it doesn't exist yet
    graph = Graph(store="Sleepycat", identifier=URIRef(default_graph_uri))
    path = mkdtemp()
    rt = graph.open(path, create=False)
    if rt == NO_STORE:
        #There is no underlying Sleepycat infrastructure, create it
        graph.open(path, create=True)
    else:
        assert rt == VALID_STORE, "The underlying store is corrupt"

    # Now we'll add some triples to the graph & commit the changes
    rdflib = Namespace('http://sbox.gnowledge.org/gstudio/')
    graph.bind("gstudio", "http://gnowledge.org/")
    exclusion_fields = [
        "id", "rght", "node_ptr_id", "image", "lft", "_state",
        "_altnames_cache", "_tags_cache", "nid_ptr_id", "_mptt_cached_fields"
    ]
    node = Objecttype.objects.get(title=name)
    node_dict = node.__dict__

    subject = str(node_dict['id'])
    for key in node_dict:
        if key not in exclusion_fields:
            predicate = str(key)
            pobject = str(node_dict[predicate])
            graph.add((rdflib[subject], rdflib[predicate], Literal(pobject)))

    graph.commit()

    print graph.serialize(format=notation)

    graph.close()
예제 #12
0
def rdf_description(name, notation='xml' ):
    """
    Funtion takes  title of node, and rdf notation.
    """
    valid_formats = ["xml", "n3", "ntriples", "trix"]
    default_graph_uri = "http://gstudio.gnowledge.org/rdfstore"
    configString = "/var/tmp/rdfstore"

    # Get the Sleepycat plugin.
    store = plugin.get('Sleepycat', Store)('rdfstore')

    # Open previously created store, or create it if it doesn't exist yet
    graph = Graph(store="Sleepycat",
               identifier = URIRef(default_graph_uri))
    path = mkdtemp()
    rt = graph.open(path, create=False)
    if rt == NO_STORE:
    #There is no underlying Sleepycat infrastructure, create it
        graph.open(path, create=True)
    else:
        assert rt == VALID_STORE, "The underlying store is corrupt"


    # Now we'll add some triples to the graph & commit the changes
    rdflib = Namespace('http://sbox.gnowledge.org/gstudio/')
    graph.bind("gstudio", "http://gnowledge.org/")
    exclusion_fields = ["id", "rght", "node_ptr_id", "image", "lft", "_state", "_altnames_cache", "_tags_cache", "nid_ptr_id", "_mptt_cached_fields"]
    node=Objecttype.objects.get(title=name)
    node_dict=node.__dict__

    subject=str(node_dict['id'])
    for key in node_dict:
        if key not in exclusion_fields:
            predicate=str(key)
            pobject=str(node_dict[predicate])
            graph.add((rdflib[subject], rdflib[predicate], Literal(pobject)))
     
     
    graph.commit()

    print graph.serialize(format=notation)

    graph.close()
예제 #13
0
    def parse(self, source, graph):
        # we're currently being handed a Graph, not a ConjunctiveGraph
        assert graph.store.context_aware # is this implied by formula_aware
        assert graph.store.formula_aware

        conj_graph = ConjunctiveGraph(store=graph.store)
        conj_graph.default_context = graph # TODO: CG __init__ should have a default_context arg
        # TODO: update N3Processor so that it can use conj_graph as the sink
        conj_graph.namespace_manager = graph.namespace_manager
        sink = Sink(conj_graph)
        if False:
            sink.quantify = lambda *args: True
            sink.flatten = lambda *args: True
        baseURI = graph.absolutize(source.getPublicId() or source.getSystemId() or "")
        p = N3Processor("nowhere", sink, baseURI=baseURI) # pass in "nowhere" so we can set data instead
        p.userkeys = True # bah
        p.data = source.getByteStream().read() # TODO getCharacterStream?
        p.parse()
        for prefix, namespace in p.bindings.items():
            conj_graph.bind(prefix, namespace)
예제 #14
0
def output_to_oac(fileid, dir, metadata, annotations):
	"""
	TODO
	"""
	# import libraries
	from rdflib import Namespace, BNode, Literal, URIRef,RDF,RDFS
	from rdflib.graph import Graph, ConjunctiveGraph
	from rdflib.plugins.memory import IOMemory
	# declare namespaces
	oac = Namespace("http://www.w3.org/ns/oa#")
	perseus = Namespace("http://data.perseus.org/citations/")
	myanno = Namespace("http://hellespont.org/annotations/jstor")
	store = IOMemory()
	# initialise the graph
	g = ConjunctiveGraph(store=store)
	# bind namespaces
	g.bind("oac",oac)
	g.bind("perseus",perseus)
	g.bind("myanno",myanno)
	for n,ann in enumerate(metadata["citations"]):
	    anno1 = URIRef(myanno["#%i"%n])
	    g.add((anno1, RDF.type,oac["Annotation"]))
	    g.add((anno1, oac["hasTarget"],URIRef("%s%s"%("http://jstor.org/stable/",metadata["doi"]))))
	    g.add((anno1, RDFS.label, Literal(ann["label"])))
	    g.add((anno1,oac["hasBody"],perseus[ann["ctsurn"]]))
	    g.add((anno1,oac["motivatedBy"],oac["linking"]))
	fname="%s%s"%(dir, fileid.replace(".txt",".ttl"))
	f=open(fname,"w")
	f.write(g.serialize(format="turtle"))
	f.close()
	return
예제 #15
0
def gnis2rdf(gnisfilename, rdffilename):
    gnisfile = open(gnisfilename, "rb")
    store = ConjunctiveGraph(identifier="temp")

    if not gnisfile:
        print("Error opening gnis file!")
        return False

    gnisreader = csv.reader(gnisfile, delimiter="|")

    # Drop first row
    gnisreader.next()

    for r in gnisreader:
        InsertGNISFeature(r, store)

    # Add prefixes to store
    store.bind("gnis", gnis)
    store.bind("geo", geo)

    print("Serializing rdf...")
    store.serialize(destination=rdffilename, format="n3")
    print("created " + str(len(store)) + " triples")
예제 #16
0
def gnis2rdf(gnisfilename, rdffilename):
    gnisfile = open(gnisfilename, 'rb')
    store = ConjunctiveGraph(identifier='temp')
        
    if not gnisfile:
        print('Error opening gnis file!')
        return False

    gnisreader = csv.reader(gnisfile, delimiter='|')

    # Drop first row
    gnisreader.next()

    for r in gnisreader:
        InsertGNISFeature(r, store)

    # Add prefixes to store
    store.bind('gnis', gnis)
    store.bind('geo', geo)

    print('Serializing rdf...')
    store.serialize(destination=rdffilename, format='n3')
    print('created ' + str(len(store)) + ' triples')
예제 #17
0
    def build_graph(self):
        graph = ConjunctiveGraph()
        graph.bind('sioc', SIOC)
        graph.bind('foaf', FOAF)
        graph.bind('rdfs', RDFS)
        graph.bind('dct', DCT)
        graph.bind('mvcb', MVCB)

        swaml = URIRef("http://swaml.berlios.de/doap#swaml")
        doc = URIRef("%s/thread/%s" % (self.base, self.key))
        graph.add((doc, RDF.type, FOAF["Document"]))
        graph.add(
            (doc, RDFS.label,
             Literal(
                 "RDF version of the thread '%s' retrieved from MarkMail API" %
                 self.key)))  #FIXME: this should go out of this api
        graph.add((doc, MVCB.generatorAgent, swaml))
        thread = URIRef("%s/thread/%s#thread" % (self.base, self.key))
        graph.add((thread, RDF.type, SIOC["Thread"]))
        graph.add((doc, FOAF["primaryTopic"], thread))

        graph.add((thread, SIOC.id, Literal(self.key)))
        graph.add((thread, SIOC.link, URIRef(self.homepage)))
        graph.add((thread, DCT.title, Literal(self.title)))
        graph.add(
            (thread, SIOC.num_item, Literal(len(self.messages), XSD.Integer)))
        for message in self.messages:
            url = "%s/message/%s" % (self.base, message["id"])
            post = URIRef("%s#message" % url)
            graph.add((post, RDF.type, SIOC.Post))
            graph.add((post, RDFS.seeAlso, URIRef(url)))
            graph.add((thread, SIOC.container_of, post))
            graph.add((post, SIOC.has_container, thread))
            graph.add((post, SIOC.id, Literal(self.key)))
            graph.add((post, SIOC.link,
                       URIRef("http://markmail.org%s" % message["url"])))
            author = BNode()
            graph.add((post, SIOC.has_creator, author))
            graph.add((author, RDF.type, SIOC.UserAccount))
            graph.add((author, SIOC.name, Literal(message["from"])))
            graph.add((post, DCT.created,
                       Literal(message["date"], datatype=XSD.dateTime)))

        self.set_graph(graph)
예제 #18
0
def gatherAndExportUserData(repo_name,userId,userToken):
	store = IOMemory()

	g=ConjunctiveGraph(store=store)
	g.bind("av",ns)
	g.bind("sc",sc)
	g.bind("dbo",dbo)
	g.bind("fb",fb)

	createGraphForFBUser(store,repo_name,userId,userToken)

	graphString = g.serialize(format="n3")
	with open("user.ttl","w") as f:
		f.write(graphString)

	response = sesame.import_content(repo_name,graphString)
예제 #19
0
    def build_graph(self):
        graph = ConjunctiveGraph()
        graph.bind('sioc', SIOC)
        graph.bind('foaf', FOAF)
        graph.bind('rdfs', RDFS)
        graph.bind('dct', DCT)
        graph.bind('mvcb', MVCB)

        swaml = URIRef("http://swaml.berlios.de/doap#swaml")
        doc = URIRef("%s/thread/%s" % (self.base, self.key))
        graph.add((doc, RDF.type, FOAF["Document"]))
        graph.add((doc, RDFS.label, Literal("RDF version of the thread '%s' retrieved from MarkMail API" % self.key))) #FIXME: this should go out of this api
        graph.add((doc, MVCB.generatorAgent, swaml))
        thread = URIRef("%s/thread/%s#thread" % (self.base, self.key))
        graph.add((thread, RDF.type, SIOC["Thread"]))
        graph.add((doc, FOAF["primaryTopic"], thread))

        graph.add((thread, SIOC.id, Literal(self.key)))
        graph.add((thread, SIOC.link, URIRef(self.homepage)))              
        graph.add((thread, DCT.title, Literal(self.title))) 
        graph.add((thread, SIOC.num_item, Literal(len(self.messages), XSD.Integer))) 
        for message in self.messages:
            url = "%s/message/%s" % (self.base, message["id"])
            post = URIRef("%s#message" % url)
            graph.add((post, RDF.type, SIOC.Post))
            graph.add((post, RDFS.seeAlso, URIRef(url)))
            graph.add((thread, SIOC.container_of, post))
            graph.add((post, SIOC.has_container, thread))
            graph.add((post, SIOC.id, Literal(self.key)))
            graph.add((post, SIOC.link, URIRef("http://markmail.org%s" % message["url"])))  
            author = BNode()
            graph.add((post, SIOC.has_creator, author))
            graph.add((author, RDF.type, SIOC.UserAccount))
            graph.add((author, SIOC.name, Literal(message["from"])))
            graph.add((post, DCT.created, Literal(message["date"], datatype=XSD.dateTime)))

        self.set_graph(graph)
예제 #20
0
def gatherAndExportGenreData(repo_name):
	store = IOMemory()

	g=ConjunctiveGraph(store=store)
	g.bind("av",ns)
	g.bind("sc",sc)
	g.bind("dbo",dbo)
	g.bind("fb",fb)

	genreRelations = dbpedia.getDBpediaGenreRelations()
	genreNames = dbpedia.getDbpediaMusicGenres()
	createGraphForGenres(store,genreNames,genreRelations)


	graphString = g.serialize(format="n3")

	with open("genres.ttl","w") as f:
		f.write(graphString)

	response = sesame.import_content(repo_name,graphString)
예제 #21
0
def gatherAndExportGlobalData(repo_name):
	store = IOMemory()

	g=ConjunctiveGraph(store=store)
	g.bind("av",ns)
	g.bind("sc",sc)
	g.bind("dbo",dbo)
	g.bind("fb",fb)

	venues = importVenuesFromFile("fb_data_stuff/venues.txt")
	events = importEventsFromDirectory("fb_data_stuff/events/")
	

	createGraphForEvents(store,repo_name,events)
	createGraphForVenues(store,venues)
	createGraphForEventArtistsAndGenres(store,repo_name,events)

	graphString = g.serialize(format="n3")

	with open("global.ttl","w") as f:
		f.write(graphString)
예제 #22
0
파일: rdfize.py 프로젝트: edsu/muldicat
def convert(muldicat_csv):
    g = ConjunctiveGraph()
    g.bind('skos', SKOS)
    g.bind('dct', DCT)

    # add concept scheme
    g.add((muldicat, RDF.type, SKOS.ConceptScheme))
    g.add((muldicat, DCT.title, Literal("Multilingual Dictionary of Cataloging Terms and Concepts", lang="en")))
    g.add((muldicat, DCT.description, Literal(description, datatype=XHTML)))
    g.add((muldicat, DCT.modified, Literal(datetime.date.today())))

    # work through each row of the spreadsheet, adding concepts as we go
    subject = None
    for row in unicode_csv_reader(codecs.open(muldicat_csv, encoding='utf-8')):
        # strip whitespace from row
        row = [cell.strip() for cell in row]

        # older version of the table had an unused ID column
        if len(row) == 8:
            print "popping"
            row.pop(0)

        if row[0] == 'Language':
            continue
        elif row == [u'', u'', u'', u'', u'', u'', u'', u'']:
            continue
        else:
            lang, label, definition, see, see_also, source, modified  = row
            lang = languages.get(lang, None)
            label = label.strip()
            if not lang or not label:
                continue

            # use the english label to form part of the URI for the concept 
            # hopefully not too controversial?
            if lang == 'en':
                subject = make_id(label)
            
            g.add((subject, RDF.type, SKOS.Concept))
            g.add((subject, SKOS.prefLabel, Literal(label, lang=lang)))
            g.add((subject, SKOS.inScheme, muldicat))

            if definition:
                g.add((subject, SKOS.definition, Literal(definition, lang=lang)))

            if source:
                g.add((subject, DCT.source, Literal(source, lang=lang)))

            if modified:
                date = datetime.datetime.strptime(modified, '%Y%m%d').date()

                # only record the latest last modification date for the concept
                existing_date = g.value(subject, DCT.modified)
                if not existing_date and date:
                    g.add((subject, DCT.modified, Literal(date)))
                elif date and existing_date and date > existing_date.toPython():
                    g.remove((subject, DCT.modified, existing_date))
                    g.add((subject, DCT.modified, Literal(date)))

            for alt_label in see.split(','):
                if not alt_label:
                    continue
                alt_label = alt_label.strip()
                g.add((subject, SKOS.altLabel, Literal(alt_label, lang=lang)))
            
            # link up relations if we have the english label
            if lang == 'en' and see_also:
                for s in see_also.split(','):
                    s = s.strip()
                    match = re.match(r'(.*) \[(.*?)\]', s)
                    if not match:
                        continue
                    label, reltype = match.groups()
                    reltype = reltype.strip('[]') # some are formatted wrong
                    
                    object = make_id(label)

                    if reltype == 'BT':
                        g.add((subject, SKOS.broader, object))
                        g.add((object, SKOS.narrower, subject))
                    elif reltype == 'NT':
                        g.add((subject, SKOS.narrower, object))
                        g.add((object, SKOS.broader, subject))
                    elif reltype == 'RT':
                        g.add((subject, SKOS.related, object))
                        g.add((object, SKOS.related, subject))
                    else:
                        raise RuntimeError(reltype)
    return g
예제 #23
0
def locationtoturtle(ellist, meta):
    rdf=Graph();
    cs = Namespace("http://cs.unibo.it/ontology/")
    colon=Namespace("http://www.essepuntato.it/resource/")
    dcterms=Namespace("http://purl.org/dc/terms/")
    xsd=Namespace("http://www.w3.org/2001/XMLSchema#")
    this=Namespace("http://vitali.web.cs.unibo.it/twiki/pub/TechWeb12/DataSource2/posteBO2011.ttl#")
    vcard = Namespace("http://www.w3.org/2006/vcard/ns#")
    rdf.bind("vcard", vcard)
    rdf.bind("cs", cs)
    rdf.bind("", colon)
    rdf.bind("dcterms", dcterms)
    rdf.bind("xsd", xsd)
    rdf.bind("this", this)
    rdf.add((this["metadata"], dcterms["creator"], Literal(meta.creator)))
    rdf.add((this["metadata"], dcterms["created"], Literal(meta.created,datatype=XSD.date)))
    rdf.add((this["metadata"], dcterms["description"], Literal(meta.version)))
    rdf.add((this["metadata"], dcterms["valid"], Literal(meta.valid,datatype=XSD.date)))
    rdf.add((this["metadata"], dcterms["source"], Literal(meta.source)))
    for location in ellist:
        rdf.add((colon[location.id], vcard["fn"], Literal(location.name)))
        rdf.add((colon[location.id], vcard["extended-address"], Literal(location.address)))
        rdf.add((colon[location.id], vcard["category"], Literal(location.category)))
        rdf.add((colon[location.id], vcard["latitude"], Literal(location.lat)))
        rdf.add((colon[location.id], vcard["longitude"], Literal(location.long)))
        if(location.tel):
            rdf.add((colon[location.id], vcard["tel"], Literal(location.tel)))
        if(location.note):
            rdf.add((colon[location.id], vcard["note"], Literal(location.note)))
        rdf.add((colon[location.id], cs["opening"], Literal(location.opening)))
        rdf.add((colon[location.id], cs["closing"], Literal(location.closing)))
    print("Content-type: text/turtle; charset=UTF-8\n")
    print rdf.serialize(format="n3")
예제 #24
0
SNode2 = URIRef("http://homesensor.com/Node2/")
SNode3= URIRef("http://homesensor.com/Node3/")
SNode4= URIRef("http://homesensor.com/Node4/")



mystore_graph_uri = "http://homesensor.com/mystore"
configString = "/var/tmp/mystore"

# Get the Sleepycat plugin.
mystore = plugin.get('Sleepycat', Store)('mystore')
mystore.open("ay_folder", create=False)

#
g = ConjunctiveGraph(store=mystore)
g.bind("homesensor",hs)


#
gNode1 = Graph(store=mystore)



# Addition of triples to store

gNode1.add((SNode1, hs['hasTemperature'], Literal('64')))
gNode1.add((SNode1, hs['hasLight'], Literal('50')))
gNode1.add((SNode1, hs['hasHumidity'], Literal('55')))
gNode1.add((SNode1, hs['Located'], Literal('')))
gNode1.add((SNode1, hs['StartTime'],  Literal("2012-06-19T01:52:02Z")))
gNode1.add((SNode1, hs['EndTime'],  Literal("2012-06-19T01:52:02Z")))
예제 #25
0
def make_graph():
    g = Graph()

    # add namespaces
    g.bind("inpho", "http://inpho.cogs.indiana.edu/")
    g.bind("thinker", "http://inpho.cogs.indiana.edu/thinker/")
    g.bind("journal", "http://inpho.cogs.indiana.edu/journal/")
    g.bind("foaf", "http://xmlns.com/foaf/0.1/")
    g.bind("rdf", "http://www.w3.org/1999/02/22-rdf-syntax-ns#")
    g.bind("rdfs", "http://www.w3.org/TR/rdf-schema/#")
    g.bind("owl", "http://www.w3.org/2002/07/owl#")
    g.bind("idea", "http://inpho.cogs.indiana.edu/idea/")
    g.bind("skos", "http://www.w3.org/2004/02/skos/core#")
    g.bind ("db", "http://dbpedia.org/")
    g.bind ("dc", "http://purl.org/dc/elements/1.1/")
    
    # user namespace currently doesn't exist?
    g.bind("user", "http://inpho.cogs.indiana.edu/user/")

    # OWL disjoints
    disjoint_objects = ["thinker", "journal", "idea", "user"]
    for a, b in combinations(disjoint_objects, 2):
        g.add((inpho[a], owl['disjointWith'], inpho[b]))
 
    g = populate_thinkers(g)
    g = populate_ideas(g)
    g = populate_journals(g)

    return g
예제 #26
0
if __name__ == "__main__":

    LOVE = Namespace("http://love.com#")
    LOVERS = Namespace("http://love.com/lovers/")

    mary = URIRef("http://love.com/lovers/mary")
    john = URIRef("http://love.com/lovers/john")

    cmary = URIRef("http://love.com/lovers/mary")
    cjohn = URIRef("http://love.com/lovers/john")

    store = Memory()

    g = ConjunctiveGraph(store=store)
    g.bind("love", LOVE)
    g.bind("lovers", LOVERS)

    # Add a graph containing Mary's facts to the Conjunctive Graph
    gmary = Graph(store=store, identifier=cmary)
    # Mary's graph only contains the URI of the person she loves, not his cute name
    gmary.add((mary, LOVE.hasName, Literal("Mary")))
    gmary.add((mary, LOVE.loves, john))

    # Add a graph containing John's facts to the Conjunctive Graph
    gjohn = Graph(store=store, identifier=cjohn)
    # John's graph contains his cute name
    gjohn.add((john, LOVE.hasCuteName, Literal("Johnny Boy")))

    # Enumerate contexts
    print("Contexts:")
예제 #27
0
        pass
    

if __name__ == "__main__":
    
    ns = Namespace("http://love.com#")
    
    mary = URIRef("http://love.com/lovers/mary#")
    john = URIRef("http://love.com/lovers/john#")
    
    cmary=URIRef("http://love.com/lovers/mary#")
    cjohn=URIRef("http://love.com/lovers/john#")
    
    store = IOMemory()
    
    g = ConjunctiveGraph(store=store)
    g.bind("loves",ns)
    
    gmary = Graph(store=store, identifier=cmary)
    
    gmary.add((mary, ns['hasName'], Literal("Mary")))
    gmary.add((mary, ns['loves'], john))
    
    gjohn = Graph(store=store, identifier=cjohn)
    gjohn.add((john, ns['hasName'], Literal("John")))
    
    
    p1=Process(target=webservices)
    p2=Process(target=mainloop)
    p1.start()
    p2.start()
예제 #28
0
class RDFAggregator(Aggregator):
    def __init__(self, *args, **kw):
        """Inicializa o agregador RDF.
        """
        super(RDFAggregator, self).__init__('csv', *args, **kw)
        self.aggregator = ConjunctiveGraph()
        self.aggregator.bind(u'owl', OWL)
        self.aggregator.bind(u'lic', LIC)
        self.aggregator.bind(u'siorg', SIORG)
        self.aggregator.bind(u'siafi', SIAFI)
        self.aggregator.bind(u'geo', GEO)
        self.aggregator.bind(u'dbpedia', DBPEDIA)
        self.aggregator.bind(u'dbprop', DBPROP)
        self.aggregator.bind(u'dbo', DBONT)
        self.aggregator.bind(u'void', VOID)
        self.aggregator.bind(u'foaf', FOAF)
        self.aggregator.bind(u'vcard', VCARD)
    def add(self, obj):
        """Acrescenta as triplas do objeto ao grafo agregador.
        """
        if getattr(obj, 'repr_rdf', None):
            # objeto tem um metodo para representacao propria em rdf
            triplas = obj.repr_rdf()
            for t in triplas:
                self.aggregator.add(t)
        else:
            # o objeto nao tem o metodo, tenta criar triplas por heuristicas
            subject = obj.uri
            doc = obj.doc_uri
            if doc == subject:
                doc = None
            class_uri = getattr(obj.__class__, '__class_uri__', None)
            expostos = getattr(obj.__class__,self.atributo_serializar, set())
            prop_map = getattr(obj.__class__, '__rdf_prop__', {})
            g = self.aggregator
            #  classe
            if class_uri:
                g.add((URIRef(subject), RDF['type'], URIRef(class_uri)))
            # documento
            if doc:
                g.add((URIRef(doc), RDF['type'], FOAF['Document']))
                g.add((URIRef(subject), FOAF['isPrimaryTopicOf'], URIRef(doc)))
                g.add((URIRef(doc), FOAF['primaryTopic'], URIRef(subject)))
            #  nome
            if getattr(obj, 'nome', None):
                if getattr(obj, '__rdf_prop__', None) is None or \
                        obj.__rdf_prop__.get('nome', None) is None:
                    g.add((URIRef(subject), RDFS['label'], Literal(obj.nome)))
            #  localizacao geo
            if getattr(obj, 'geo_ponto', None):
                ponto = obj.geo_ponto
                if ponto:
                    g.add((URIRef(subject), GEO['lat'], Literal(ponto['lat'])))
                    g.add((URIRef(subject), GEO['long'], Literal(ponto['lon'])))
            #  propriedades
            for atr in expostos:
                if atr in prop_map.keys():
                    if getattr(prop_map[atr], '__call__', None):
                        # as triplas da propriedade sao dadas por uma funcao
                        triplas = prop_map[atr](obj)
                        if triplas:
                            for t in triplas:
                                g.add(t)
                    elif prop_map[atr].get('metodo', None):
                        # as triplas da propriedade sao dadas por um metodo
                        m = getattr(obj, prop_map[atr]['metodo'])
                        triplas = m(atr)
                        if triplas:
                            for t in triplas:
                                g.add(t)
                    elif prop_map[atr].get('pred_uri', None):
                        # a propriedade corresponde a uma unica tripla
                        pred_uri = prop_map[atr]['pred_uri']
                        object = getattr(obj, atr, None)
                        if object:
                            obj_uri = getattr(object, 'uri', lambda: None)()
                            obj_cls_uri = getattr(object, '__class_uri__', None)
                            # o objeto tem uri definida?
                            if obj_uri:
                                g.add((URIRef(subject), URIRef(pred_uri), URIRef(obj_uri)))
                            elif obj_cls_uri:
                                # se o objeto nao tem uri mas tem uri da classe,
                                # tenta criar blank node
                                bn = BNode()
                                g.add((URIRef(subject), URIRef(pred_uri), bn))
                                g.add((bn, RDF['type'], URIRef(obj_cls_uri)))
                                g.add((bn, RDFS['comment'], Literal(unicode(obj))))
                            else:
                                # caso contrario, tratar a propriedade como um literal
                                g.add((URIRef(subject), URIRef(pred_uri), Literal(unicode(object))))
    def serialize(self, format="n3"):
        """Retorna a serializacao do agregador RDF (uniao dos grafos).
        """
        format_map = {
            'xml': 'xml',
            'rdf': 'pretty-xml',
            'rdf/xml': 'pretty-xml',
            'ttl': 'n3',
            'n3': 'n3',
            'nt': 'nt',
        }
        f = format_map.get(format, 'n3')
        current_url = self.dataset_split.get('current_url', '') # url do documento atual
        dataset_url = self.dataset_split.get('dataset_url', '') # url geral do dataset
        next_url = self.dataset_split.get('next_url', '') # url da proxima pagina
        # a uri do dataset: url do documento acrescida de #dataset
        if current_url:
            self.aggregator.add((URIRef(current_url+"#dataset"),RDF['type'],VOID['Dataset']))
            self.aggregator.add((URIRef(current_url),RDF['type'],VOID['DatasetDescription']))
            self.aggregator.add((URIRef(current_url),FOAF['primaryTopic'],URIRef(current_url+"#dataset")))
            if next_url:
                self.aggregator.add((URIRef(current_url+"#dataset"),RDFS['seeAlso'],URIRef(next_url+"#dataset")))
        if next_url:
            self.aggregator.add((URIRef(next_url+"#dataset"),RDF['type'], VOID['Dataset']))
            self.aggregator.add((URIRef(next_url),RDF['type'],VOID['DatasetDescription']))
            self.aggregator.add((URIRef(next_url),FOAF['primaryTopic'],URIRef(next_url+"#dataset")))
        if dataset_url:
            self.aggregator.add((URIRef(dataset_url+"#dataset"),RDF['type'], VOID['Dataset']))
            self.aggregator.add((URIRef(dataset_url),RDF['type'],VOID['DatasetDescription']))
            self.aggregator.add((URIRef(dataset_url),FOAF['primaryTopic'],URIRef(dataset_url+"#dataset")))
            if current_url:
                self.aggregator.add((URIRef(dataset_url+"#dataset"),VOID['subset'],URIRef(current_url+"#dataset")))
            if next_url:
                self.aggregator.add((URIRef(dataset_url+"#dataset"),VOID['subset'],URIRef(next_url+"#dataset")))
        return self.aggregator.serialize(format=f)
예제 #29
0
    def convert_gml(self, ttl_output_file, uri_part, specific_part):
            """
            Pelagios conversion GML to TTL
            @type       ttl_output_file: string
            @param      ttl_output_file: Absolute path to TTL output file
            @type       uri_part: string
            @param      uri_part: URI for the region to be displayed (e.g. http://earkdev.ait.ac.at/earkweb/sip2aip/working_area/sip2aip/34809536-b9f8-4c51-83d1-ef365ca658f5/)
            @type       specific_part: string
            @param      specific_part: Specific part that distinguishes the URI from other URIs (e.g. 1994)
            """
            cito_ns = Namespace("http://purl.org/spar/cito")
            cnt_ns = Namespace("http://www.w3.org/2011/content#")
            dcterms_ns = Namespace("http://purl.org/dc/terms/")
            foaf_ns = Namespace("http://xmlns.com/foaf/0.1/")
            geo_ns = Namespace("http://www.w3.org/2003/01/geo/wgs84_pos#")
            geosparql_ns = Namespace("http://www.opengis.net/ont/geosparql#")
            gn_ns = Namespace("http://www.geonames.org/ontology#")
            lawd_ns = Namespace("http://lawd.info/ontology/")
            rdfs_ns = Namespace("http://www.w3.org/2000/01/rdf-schema#")
            skos_ns = Namespace("http://www.w3.org/2004/02/skos/core#")

            slovenia = URIRef("http://earkdev.ait.ac.at/earkweb/sip2aip/working_area/sip2aip/5c6f5563-7665-4719-a2b6-4356ea033c1d/#place/Slovenia")

            store = IOMemory()

            g = ConjunctiveGraph(store=store)
            g.bind("cito", cito_ns)
            g.bind("cnt", cnt_ns)
            g.bind("dcterms", dcterms_ns)
            g.bind("foaf", foaf_ns)
            g.bind("geo", geo_ns)
            g.bind("geosparql", geosparql_ns)
            g.bind("gn", gn_ns)
            g.bind("lawd", lawd_ns)
            g.bind("rdfs", rdfs_ns)
            g.bind("skos", skos_ns)

            graph_slovenian_districts = Graph(store=store, identifier=slovenia)
            gml_to_wkt = GMLtoWKT(self.gml_file)
            district_included = {}
            i = 1
            print "Processing GML file: %s" % self.gml_file
            for district_wkt in gml_to_wkt.get_wkt_linear_ring():
                techname = whsp_to_unsc(district_wkt["name"])
                print "District %d: %s" % (i, whsp_to_unsc(district_wkt["name"]))
                if techname not in district_included:
                    district = URIRef("%s#place/%s/%s" % (uri_part, whsp_to_unsc(district_wkt["name"]), specific_part))
                    graph_slovenian_districts.add((district, RDF.type, lawd_ns.Place))
                    graph_slovenian_districts.add((district, dcterms_ns['isPartOf'], slovenia))
                    graph_slovenian_districts.add((district, dcterms_ns['temporal'], Literal(str(district_wkt["year"]))))
                    graph_slovenian_districts.add((district, gn_ns['countryCode'], Literal(u'SI')))
                    graph_slovenian_districts.add((district, rdfs_ns['label'], Literal(district_wkt["name"], lang=u'si')))
                    polygons = BNode()
                    graph_slovenian_districts.add((district, geosparql_ns['hasGeometry'], polygons))
                    g.add((polygons, geosparql_ns['asWKT'], Literal(district_wkt["polygon"])))
                    district_included[techname] = True
                i += 1
            with open(ttl_output_file, 'w') as f:
                f.write(g.serialize(format='n3'))
            f.close()
예제 #30
0
:Woman = foo:FemaleAdult .
:Title a rdf:Property; = dc:title .



""" # --- End of primer code

# To make this go easier to spit back out... 
# technically, we already created a namespace
# with the object init (and it added some namespaces as well)
# By default, your main namespace is the URI of your 
# current working directory, so lets make that simpler:

myNS = Namespace(URIRef('http://www.w3.org/2000/10/swap/Primer#'))
primer.bind('', myNS)
primer.bind('owl', 'http://www.w3.org/2002/07/owl#')
primer.bind('dc', 'http://purl.org/dc/elements/1.1/')
primer.bind('swap', 'http://www.w3.org/2000/10/swap/')
sourceCode = StringInputSource(mySource, myNS)

# Lets load it up!

primer.parse(sourceCode, format='n3')


# Now you can query, either directly straight into a list:

[(x, y, z) for x, y, z in primer]

# or spit it back out (mostly) the way we created it:
예제 #31
0
    def encode_container(self, bundle, container=None, identifier=None):
        if container is None:
            container = ConjunctiveGraph(identifier=identifier)
            nm = container.namespace_manager
            nm.bind('prov', PROV.uri)

        for namespace in bundle.namespaces:
            container.bind(namespace.prefix, namespace.uri)

        id_generator = AnonymousIDGenerator()
        real_or_anon_id = lambda record: record._identifier.uri if \
            record._identifier else id_generator.get_anon_id(record)

        for record in bundle._records:
            rec_type = record.get_type()
            if hasattr(record, 'identifier') and record.identifier:
                identifier = URIRef(six.text_type(real_or_anon_id(record)))
                container.add((identifier, RDF.type, URIRef(rec_type.uri)))
            else:
                identifier = None
            if record.attributes:
                bnode = None
                formal_objects = []
                used_objects = []
                all_attributes = list(record.formal_attributes) + list(
                    record.attributes)
                formal_qualifiers = False
                for attrid, (attr, value) in enumerate(
                        list(record.formal_attributes)):
                    if (identifier is not None and value is not None) or \
                            (identifier is None and value is not None and attrid > 1):
                        formal_qualifiers = True
                has_qualifiers = len(
                    record.extra_attributes) > 0 or formal_qualifiers
                for idx, (attr, value) in enumerate(all_attributes):
                    if record.is_relation():
                        if rec_type.namespace.prefix == 'prov':
                            pred = URIRef(PROV[PROV_N_MAP[rec_type]].uri)
                        else:
                            pred = URIRef(PROVONE[PROVONE_N_MAP[rec_type]].uri)
                        # create bnode relation
                        if bnode is None:
                            valid_formal_indices = set()
                            for idx, (key, val) in enumerate(
                                    record.formal_attributes):
                                formal_objects.append(key)
                                if val:
                                    valid_formal_indices.add(idx)
                            used_objects = [record.formal_attributes[0][0]]
                            subj = None
                            if record.formal_attributes[0][1]:
                                subj = URIRef(
                                    record.formal_attributes[0][1].uri)
                            if identifier is None and subj is not None:
                                try:
                                    obj_val = record.formal_attributes[1][1]
                                    obj_attr = URIRef(
                                        record.formal_attributes[1][0].uri)
                                    # TODO: Why is obj_attr above not used anywhere?
                                except IndexError:
                                    obj_val = None
                                if obj_val and (rec_type not in {
                                        PROV_END, PROV_START, PROV_USAGE,
                                        PROV_GENERATION, PROV_DERIVATION,
                                        PROV_ASSOCIATION, PROV_INVALIDATION
                                } or (valid_formal_indices == {0, 1}
                                      and len(record.extra_attributes) == 0)):
                                    used_objects.append(
                                        record.formal_attributes[1][0])
                                    obj_val = self.encode_rdf_representation(
                                        obj_val)
                                    if rec_type == PROV_ALTERNATE:
                                        subj, obj_val = obj_val, subj
                                    container.add((subj, pred, obj_val))
                                    if rec_type == PROV_MENTION:
                                        if record.formal_attributes[2][1]:
                                            used_objects.append(
                                                record.formal_attributes[2][0])
                                            obj_val = self.encode_rdf_representation(
                                                record.formal_attributes[2][1])
                                            container.add(
                                                (subj,
                                                 URIRef(
                                                     PROV['asInBundle'].uri),
                                                 obj_val))
                                        has_qualifiers = False
                            if rec_type in [PROV_ALTERNATE]:
                                continue
                            if subj and (has_qualifiers or identifier):
                                qualifier = rec_type._localpart
                                rec_uri = rec_type.uri
                                for attr_name, val in record.extra_attributes:
                                    if attr_name == PROV['type']:
                                        if PROV['Revision'] == val or \
                                              PROV['Quotation'] == val or \
                                                PROV['PrimarySource'] == val:
                                            qualifier = val._localpart
                                            rec_uri = val.uri
                                            if identifier is not None:
                                                container.remove(
                                                    (identifier, RDF.type,
                                                     URIRef(rec_type.uri)))
                                QRole = URIRef(PROV['qualified' +
                                                    qualifier].uri)
                                if identifier is not None:
                                    container.add((subj, QRole, identifier))
                                else:
                                    bnode = identifier = BNode()
                                    container.add((subj, QRole, identifier))
                                    container.add(
                                        (identifier, RDF.type, URIRef(rec_uri)
                                         ))  # reset identifier to BNode
                        if value is not None and attr not in used_objects:
                            if attr in formal_objects:
                                pred = attr2rdf(attr)
                            elif attr == PROV['role']:
                                pred = URIRef(PROV['hadRole'].uri)
                            elif attr == PROV['plan']:
                                pred = URIRef(PROV['hadPlan'].uri)
                            elif attr == PROV['type']:
                                pred = RDF.type
                            elif attr == PROV['label']:
                                pred = RDFS.label
                            elif isinstance(attr, pm.QualifiedName):
                                pred = URIRef(attr.uri)
                            else:
                                pred = self.encode_rdf_representation(attr)
                            if PROV['plan'].uri in pred:
                                pred = URIRef(PROV['hadPlan'].uri)
                            if PROV['informant'].uri in pred:
                                pred = URIRef(PROV['activity'].uri)
                            if PROV['responsible'].uri in pred:
                                pred = URIRef(PROV['agent'].uri)
                            if rec_type == PROV_DELEGATION and \
                                            PROV['activity'].uri in pred:
                                pred = URIRef(PROV['hadActivity'].uri)
                            if (rec_type in [PROV_END, PROV_START] and
                                            PROV['trigger'].uri in pred) or\
                                (rec_type in [PROV_USAGE] and
                                         PROV['used'].uri in pred):
                                pred = URIRef(PROV['entity'].uri)
                            if rec_type in [
                                    PROV_GENERATION, PROV_END, PROV_START,
                                    PROV_USAGE, PROV_INVALIDATION
                            ]:
                                if PROV['time'].uri in pred:
                                    pred = URIRef(PROV['atTime'].uri)
                                if PROV['ender'].uri in pred:
                                    pred = URIRef(PROV['hadActivity'].uri)
                                if PROV['starter'].uri in pred:
                                    pred = URIRef(PROV['hadActivity'].uri)
                                if PROV['location'].uri in pred:
                                    pred = URIRef(PROV['atLocation'].uri)
                            if rec_type in [PROV_ACTIVITY]:
                                if PROV_ATTR_STARTTIME in pred:
                                    pred = URIRef(PROV['startedAtTime'].uri)
                                if PROV_ATTR_ENDTIME in pred:
                                    pred = URIRef(PROV['endedAtTime'].uri)
                            if rec_type == PROV_DERIVATION:
                                if PROV['activity'].uri in pred:
                                    pred = URIRef(PROV['hadActivity'].uri)
                                if PROV['generation'].uri in pred:
                                    pred = URIRef(PROV['hadGeneration'].uri)
                                if PROV['usage'].uri in pred:
                                    pred = URIRef(PROV['hadUsage'].uri)
                                if PROV['usedEntity'].uri in pred:
                                    pred = URIRef(PROV['entity'].uri)
                            container.add(
                                (identifier, pred,
                                 self.encode_rdf_representation(value)))
                        continue
                    if value is None:
                        continue
                    if isinstance(value, pm.ProvRecord):
                        obj = URIRef(six.text_type(real_or_anon_id(value)))
                    else:
                        #  Assuming this is a datetime value
                        obj = self.encode_rdf_representation(value)
                    if attr == PROV['location']:
                        pred = URIRef(PROV['atLocation'].uri)
                        if False and isinstance(value,
                                                (URIRef, pm.QualifiedName)):
                            if isinstance(value, pm.QualifiedName):
                                value = URIRef(value.uri)
                            container.add((identifier, pred, value))
                        else:
                            container.add(
                                (identifier, pred,
                                 self.encode_rdf_representation(obj)))
                        continue
                    if attr == PROV['type']:
                        pred = RDF.type
                    elif attr == PROV['label']:
                        pred = RDFS.label
                    elif attr == PROV_ATTR_STARTTIME:
                        pred = URIRef(PROV['startedAtTime'].uri)
                    elif attr == PROV_ATTR_ENDTIME:
                        pred = URIRef(PROV['endedAtTime'].uri)
                    else:
                        pred = self.encode_rdf_representation(attr)
                    container.add((identifier, pred, obj))
        return container
예제 #32
0
#
SNode1 = URIRef("http://homesensor.com/Node1/")
SNode2 = URIRef("http://homesensor.com/Node2/")
SNode3 = URIRef("http://homesensor.com/Node3/")
SNode4 = URIRef("http://homesensor.com/Node4/")

mystore_graph_uri = "http://homesensor.com/mystore"
configString = "/var/tmp/mystore"

# Get the Sleepycat plugin.
mystore = plugin.get('Sleepycat', Store)('mystore')
mystore.open("ay_folder", create=False)

#
g = ConjunctiveGraph(store=mystore)
g.bind("homesensor", hs)

#
gNode1 = Graph(store=mystore)

# Addition of triples to store

gNode1.add((SNode1, hs['hasTemperature'], Literal('64')))
gNode1.add((SNode1, hs['hasLight'], Literal('50')))
gNode1.add((SNode1, hs['hasHumidity'], Literal('55')))
gNode1.add((SNode1, hs['Located'], Literal('')))
gNode1.add((SNode1, hs['StartTime'], Literal("2012-06-19T01:52:02Z")))
gNode1.add((SNode1, hs['EndTime'], Literal("2012-06-19T01:52:02Z")))

gNode1.add((SNode2, hs['hasTemperature'], Literal('64')))
gNode1.add((SNode2, hs['hasLight'], Literal('56')))
예제 #33
0
        geo_ns = Namespace("http://www.w3.org/2003/01/geo/wgs84_pos#")
        geosparql_ns = Namespace("http://www.opengis.net/ont/geosparql#")
        gn_ns = Namespace("http://www.geonames.org/ontology#")
        lawd_ns = Namespace("http://lawd.info/ontology/")
        rdfs_ns = Namespace("http://www.w3.org/2000/01/rdf-schema#")
        skos_ns = Namespace("http://www.w3.org/2004/02/skos/core#")

        newspaper = URIRef(
            'http://anno.onb.ac.at/anno-suche/#searchMode=complex&title=Die+Presse&resultMode=calendar&year=1875&month=11'
        )
        # newspaper = URIRef('http://anno.onb.ac.at')

        store = IOMemory()

        g = ConjunctiveGraph(store=store)
        g.bind("cito", cito_ns)
        g.bind("cnt", cnt_ns)
        g.bind("dcterms", dcterms_ns)
        g.bind("foaf", foaf_ns)
        g.bind("geo", geo_ns)
        g.bind("geosparql", geosparql_ns)
        g.bind("gn", gn_ns)
        g.bind("lawd", lawd_ns)
        g.bind("rdfs", rdfs_ns)
        g.bind("skos", skos_ns)

        graph_newspaper_geo = Graph(store=store, identifier=newspaper)

        source = etree.parse(
            "/var/data/earkweb/work/newspapers/ner/%s_tokenized.xml" %
            ausgabe).getroot()
예제 #34
0
from rdflib.plugins.memory import IOMemory

if __name__ == '__main__':
    ns = Namespace("http://love.com#"
                   )  #Cogemos el espacio de nombres con el que trabajaremos
    mary = URIRef(
        "http://love.com/lovers/mary#"
    )  #Obtenemos la referencia del sujeto con el que vamos a trabajar
    john = URIRef("http://love.com/lovers/john#")
    cmary = URIRef("http://love.com/lovers/mary#")
    cjohn = URIRef("http://love.com/lovers/john#")
    store = IOMemory()  #Reservamos memoria para nuestro grafo
    g = ConjunctiveGraph(
        store=store
    )  #Creamos un nuevo grafo de conjuntos donde iremos almacenando los grafos de cada sujeto
    g.bind("love", ns)  #Liga a la etiqueta love el espacio de nombre ns
    gmary = Graph(
        store=store, identifier=cmary
    )  #Creamos un grafo para Mary para así almanenar sus propiedades
    gmary.add((mary, ns['hasName'], Literal("Mary")))
    gmary.add((mary, ns['loves'], john))
    gjohn = Graph(store=store, identifier=cjohn)
    gjohn.add((john, ns['hasName'], Literal("John")))

    #Una vez creados los grafos tanto para Mary como para John Mostraremos el contenido

    print('#Contenido del grafo de Conjuntos')
    print
    print
    for c in g.contexts():
        print("-- %s " % c)
예제 #35
0
#!/usr/bin/python
import re
from rdflib import Namespace, URIRef
from rdflib.namespace import RDF
from rdflib.graph import ConjunctiveGraph 

LOA = Namespace("http://vocab.e.gov.br/2012/08/loa2012#")
LOA_I = Namespace("http://orcamento.dados.gov.br/id/")
g = ConjunctiveGraph()
g.bind('loa', LOA)
g.bind('loa-i', LOA_I)
with open("rdf_loa2012_uri_dadosgovbr.ttl", "r") as f:
    g.load(f, format="n3")

def troca_uri(classe, padrao_uri, novo_padrao):
    padrao = re.compile(padrao_uri)
    for uri_antiga,p,o in g.triples((None, RDF['type'], classe)):
        match = padrao.match(uri_antiga)
        if match:
            id_instancia = match.group(1)
            uri_nova = URIRef(novo_padrao % id_instancia)
            print "Trocando %s por %s..." % (uri_antiga, uri_nova)
            g.remove((uri_antiga, RDF['type'], classe))
            g.add((uri_nova, RDF['type'], classe))
            # onde aparece como sujeito
            for s,p,o in g.triples((uri_antiga, None, None)):
                g.remove((s, p, o))
                g.add((uri_nova, p, o))
            # onde aparece como objeto
            for s,p,o in g.triples((None, None, uri_antiga)):
                g.remove((s, p, o))
예제 #36
0
파일: main2.py 프로젝트: huanjiayang/SWoT
default_graph_uri = "http://rdflib.net/rdfstore"
configString = "/var/tmp/rdfstore"

# Get the Sleepycat plugin.
store = plugin.get('Sleepycat', Store)('rdfstore')

# Open previously created store, or create it if it doesn't exist yet
graph = Graph(store="Sleepycat", identifier=URIRef(default_graph_uri))
path = mkdtemp()
rt = graph.open(path, create=False)
if rt == NO_STORE:
    # There is no underlying Sleepycat infrastructure, create it
    graph.open(path, create=True)
else:
    assert rt == VALID_STORE, "The underlying store is corrupt"

print "Triples in graph before add: ", len(graph)

# Now we'll add some triples to the graph & commit the changes
rdflib = Namespace('http://rdflib.net/test/')
graph.bind("test", "http://rdflib.net/test/")

graph.add((rdflib['pic:1'], rdflib['name'], Literal('Jane & Bob')))
graph.add((rdflib['pic:2'], rdflib['name'], Literal('Squirrel in Tree')))
graph.commit()

print "Triples in graph after add: ", len(graph)

# display the graph in RDF/XML
print graph.serialize()
예제 #37
0
def rdf_description(notation='xml'):
    """
    	Funtion takes  title of node, and rdf notation.
    	"""
    name = 'student'
    valid_formats = ["xml", "n3", "ntriples", "trix"]
    default_graph_uri = "http://gstudio.gnowledge.org/rdfstore"
    # default_graph_uri = "http://example.com/"
    configString = "/var/tmp/rdfstore"

    # Get the Sleepycat plugin.
    store = plugin.get('IOMemory', Store)('rdfstore')

    # Open previously created store, or create it if it doesn't exist yet
    graph = Graph(store="IOMemory", identifier=URIRef(default_graph_uri))
    path = mkdtemp()
    rt = graph.open(path, create=False)
    if rt == NO_STORE:
        #There is no underlying Sleepycat infrastructure, create it
        graph.open(path, create=True)
    else:
        assert rt == VALID_STORE, "The underlying store is corrupt"

# Now we'll add some triples to the graph & commit the changes

# rdflib = Namespace('http://sbox.gnowledge.org/gstudio/')
    graph.bind("gstudio", "http://gnowledge.org/")
    exclusion_fields = [
        "id", "rght", "node_ptr_id", "image", "lft", "_state",
        "_altnames_cache", "_tags_cache", "nid_ptr_id", "_mptt_cached_fields"
    ]
    node_type = get_nodetype()
    if (node_type == 'gbobject'):
        node = Gbobject.objects.get(title=name)
    elif (node_type == 'objecttype'):
        node = Objecttype.objects.get(title=name)
    elif (node_type == 'metatype'):
        node = Metatype.objects.get(title=name)
    elif (node_type == 'attributetype'):
        node = Attributetype.objects.get(title=name)
    elif (node_type == 'relationtype'):
        node = Relationtype.objects.get(title=name)
    elif (node_type == 'attribute'):
        node = Attribute.objects.get(title=name)
    elif (node_type == 'complement'):
        node = Complement.objects.get(title=name)
    elif (node_type == 'union'):
        node = Union.objects.get(title=name)
    elif (node_type == 'intersection'):
        node = Intersection.objects.get(title=name)
    elif (node_type == 'expression'):
        node = Expression.objects.get(title=name)
    elif (node_type == 'processtype'):
        node = Processtype.objects.get(title=name)
    elif (node_type == 'systemtype'):
        node = Systemtype.objects.get(title=name)

    node_url = node.get_absolute_url()
    site_add = node.sites.all()
    a = site_add[0]
    host_name = a.name
    #host_name=name
    link = 'http://'
    #Concatenating the above variables will give the url address.

    url_add = link + host_name + node_url
    rdflib = Namespace(url_add)
    # node=Objecttype.objects.get(title=name)

    node_dict = node.__dict__

    subject = str(node_dict['id'])
    for key in node_dict:
        if key not in exclusion_fields:
            predicate = str(key)
            pobject = str(node_dict[predicate])
            graph.add((rdflib[subject], rdflib[predicate], Literal(pobject)))

    rdf_code = graph.serialize(format=notation)

    #graph = rdflib.Graph("IOMemory")
    #graph.open("store", create=True)
    #graph.parse(rdf_code)

    # print out all the triples in the graph
    # for subject, predicate, object in graph:
    #  print subject, predicate, object
    # store.add(self,(subject, predicate, object),context)

    graph.commit()
    print rdf_code
    graph.close()
예제 #38
0
def to_RDF(records, base_namespace, lang_codes=None,skosxl=False):
	"""
	docstring for as_RDF
	"""
	from rdflib import Namespace, BNode, Literal, URIRef,RDF,RDFS
	from rdflib.graph import Graph, ConjunctiveGraph
	from rdflib.plugins.memory import IOMemory
	print >> sys.stderr, base_namespace
	store = IOMemory()
	g = ConjunctiveGraph(store=store)
	skos = Namespace('http://www.w3.org/2004/02/skos/core#')
	skosxl = Namespace('http://www.w3.org/2008/05/skos-xl#')
	base = Namespace(base_namespace)
	g.bind('skos',skos)
	g.bind('skosxl',skosxl)
	g.bind('base',base)
	scheme_label = "schemes/1"
	thesaurus = URIRef(base[scheme_label])
	g.add((thesaurus,RDF.type, skos["ConceptScheme"]))
	for n,record in enumerate(records):
		label_counter = 1
		try:
			if(record is not None):
				uri = URIRef(base["%s/concepts/%i"%(scheme_label,int(record['id']))])
				g.add((uri, RDF.type, skos['Concept']))
				g.add((uri,skos["inScheme"],thesaurus))
				if(record['broader_id'] is not None):
					broader_uri = URIRef(base["%s/concepts/%i"%(scheme_label,int(record['broader_id']))])
					g.add((uri,skos['broader'],broader_uri)) 
					g.add((broader_uri,skos['narrower'],uri))
				else:
					g.add((uri,skos["topConceptOf"],thesaurus))
				if(record['hidden_label'] is not None):
					if(skosxl):
						label_uri = URIRef("%s#l%i"%(uri,label_counter))
						g.add((label_uri,RDF.type,skosxl["Label"]))
						g.add((label_uri,skosxl["literalForm"],Literal(record['hidden_label'])))
						g.add((uri,skosxl["hiddenLabel"],label_uri))
						label_counter += 1
					else:
						g.add((uri,skos["hiddenLabel"],Literal(record['hidden_label'])))
				if(record['labels'] is not None):
					# when transforming into SKOS-XL append the hiddenLabel to the preferredLabel@de
					# of a given term. This way it becomes possible to use the hiddenLabel to distinguish
					# between concepts with the same label but different provenance (i.e. they are found 
					# within different branches of the same thesaurus tree. 
					if(skosxl):
						label_uri = URIRef("%s#l%i"%(uri,label_counter))
						g.add((label_uri,RDF.type,skosxl["Label"]))
						g.add((label_uri,skosxl["literalForm"],Literal("%s (%s)"%(record['labels']["ger"],record['hidden_label']),lang=lang_codes["ger"])))
						g.add((uri,skosxl["prefLabel"],label_uri))
						label_counter += 1
					for lang in record['labels'].keys():
						if(skosxl):
							label_uri = URIRef("%s#l%i"%(uri,label_counter))
							g.add((label_uri,RDF.type,skosxl["Label"]))
							g.add((label_uri,skosxl["literalForm"],Literal(record['labels'][lang],lang=lang_codes[lang])))
							g.add((uri,skosxl["prefLabel"],label_uri))
							label_counter += 1
						else:
							g.add((uri,skos["prefLabel"],Literal(record['labels'][lang],lang=lang_codes[lang])))
				if(record['anon_nodes'] is not None):
					for node_id,node in record['anon_nodes']:
						temp = URIRef(base["%s/concepts/%s"%(scheme_label,node_id)])
						print >> sys.stderr, temp
						g.add((temp,RDF.type,skos['Concept']))
						g.add((temp,skos["inScheme"],thesaurus))
						g.add((temp,skos['broader'],uri))
						if(skosxl):
							label_uri = URIRef("%s#l%i"%(temp,label_counter))
							g.add((label_uri,RDF.type,skosxl["Label"]))
							g.add((label_uri,skosxl["literalForm"],Literal(node,lang="de")))
							g.add((temp,skosxl["prefLabel"],label_uri))
							label_counter += 1
							# added extra preferredLabel@de with hiddenLabel betwen brackets 
							label_uri = URIRef("%s#l%i"%(temp,label_counter))
							g.add((label_uri,RDF.type,skosxl["Label"]))
							g.add((label_uri,skosxl["literalForm"],Literal("%s (%s)"%(node,record['hidden_label']),lang="de")))
							g.add((temp,skosxl["prefLabel"],label_uri))
							label_counter += 1
						else:
							g.add((temp,skos["prefLabel"],Literal(node,lang="de")))
				print >> sys.stderr, "Record %s converted into RDF (%i/%i)"%(record['id'],n,len(records))
		except Exception, e:
			print >> sys.stderr, "Failed converting record %s with error %s (%i/%i)"%(record['id'],str(e),n,len(records))
예제 #39
0
class TestLevelDBConjunctiveGraphCore(unittest.TestCase):
    def setUp(self):
        store = "LevelDB"
        self.graph = ConjunctiveGraph(store=store)
        self.path = configString
        self.graph.open(self.path, create=True)

    def tearDown(self):
        self.graph.destroy(self.path)
        try:
            self.graph.close()
        except:
            pass
        if getattr(self, 'path', False) and self.path is not None:
            if os.path.exists(self.path):
                if os.path.isdir(self.path):
                    for f in os.listdir(self.path):
                        os.unlink(self.path + '/' + f)
                    os.rmdir(self.path)
                elif len(self.path.split(':')) == 1:
                    os.unlink(self.path)
                else:
                    os.remove(self.path)

    def test_namespaces(self):
        self.graph.bind("dc", "http://http://purl.org/dc/elements/1.1/")
        self.graph.bind("foaf", "http://xmlns.com/foaf/0.1/")
        self.assert_(len(list(self.graph.namespaces())) == 5)
        self.assert_(('foaf', rdflib.term.URIRef(u'http://xmlns.com/foaf/0.1/')
                      ) in list(self.graph.namespaces()))

    def test_readable_index(self):
        print(readable_index(111))

    def test_triples_context_reset(self):
        michel = rdflib.URIRef(u'michel')
        likes = rdflib.URIRef(u'likes')
        pizza = rdflib.URIRef(u'pizza')
        cheese = rdflib.URIRef(u'cheese')
        self.graph.add((michel, likes, pizza))
        self.graph.add((michel, likes, cheese))
        self.graph.commit()
        ntriples = self.graph.triples((None, None, None),
                                      context=self.graph.store)
        self.assert_(len(list(ntriples)) == 2)

    def test_remove_context_reset(self):
        michel = rdflib.URIRef(u'michel')
        likes = rdflib.URIRef(u'likes')
        pizza = rdflib.URIRef(u'pizza')
        cheese = rdflib.URIRef(u'cheese')
        self.graph.add((michel, likes, pizza))
        self.graph.add((michel, likes, cheese))
        self.graph.commit()
        self.graph.store.remove((michel, likes, cheese), self.graph.store)
        self.graph.commit()
        self.assert_(
            len(
                list(
                    self.graph.triples((None, None, None),
                                       context=self.graph.store))) == 1)

    def test_remove_db_exception(self):
        michel = rdflib.URIRef(u'michel')
        likes = rdflib.URIRef(u'likes')
        pizza = rdflib.URIRef(u'pizza')
        cheese = rdflib.URIRef(u'cheese')
        self.graph.add((michel, likes, pizza))
        self.graph.add((michel, likes, cheese))
        self.graph.commit()
        self.graph.store.__len__(context=self.graph.store)
        self.assert_(
            len(
                list(
                    self.graph.triples((None, None, None),
                                       context=self.graph.store))) == 2)
예제 #40
0
def DoTheTestMemory():
    ns = Namespace("http://love.com#")

    # AssertionError: ConjunctiveGraph must be backed by a context aware store.
    mary = URIRef("http://love.com/lovers/mary")
    john = URIRef("http://love.com/lovers/john")

    cmary = URIRef("http://love.com/lovers/context_mary")
    cjohn = URIRef("http://love.com/lovers/context_john")

    # my_store = Memory()
    store_input = IOMemory()

    gconjunctive = ConjunctiveGraph(store=store_input)
    gconjunctive.bind("love", ns)

    # add a graph for Mary's facts to the Conjunctive Graph
    gmary = Graph(store=store_input, identifier=cmary)
    # Mary's graph only contains the URI of the person she love, not his cute name
    gmary.add((mary, ns["hasName"], Literal("Mary")))
    gmary.add((mary, ns["loves"], john))

    # add a graph for John's facts to the Conjunctive Graph
    gjohn = Graph(store=store_input, identifier=cjohn)
    # John's graph contains his cute name
    gjohn.add((john, ns["hasCuteName"], Literal("Johnny Boy")))

    # enumerate contexts
    print("Input contexts")
    for c in gconjunctive.contexts():
        print("-- %s " % c)

    # separate graphs
    if False:
        print("===================")
        print("GJOHN")
        print(gjohn.serialize(format="n3").decode("utf-8"))
        print("===================")
        print("GMARY")
        print(gmary.serialize(format="n3").decode("utf-8"))
        print("===================")

    # full graph
    print("===================")
    print("GCONJUNCTIVE NATIVE")
    print(gconjunctive.serialize(format="n3").decode("utf-8"))

    # query the conjunction of all graphs
    xx = None
    for x in gconjunctive[mary:ns.loves / ns.hasCuteName]:
        xx = x
    print("Q: Who does Mary love?")
    print("A: Mary loves {}".format(xx))

    # Ensuite, on sauve un seul sous-graphe, puis on le recharge et le resultat doit etre le meme.
    gjohn.serialize(destination='gjohn_copy.xml', format='xml')
    gmary.serialize(destination='gmary_copy.xml', format='xml')

    gjohn_copy = Graph()
    gjohn_copy.parse('gjohn_copy.xml', format='xml')
    gmary_copy = Graph()
    gmary_copy.parse('gmary_copy.xml', format='xml')

    if True:
        print("===================")
        print("GJOHN")
        print(gjohn_copy.serialize(format="n3").decode("utf-8"))
        print("===================")
        print("GMARY")
        print(gmary_copy.serialize(format="n3").decode("utf-8"))
        print("===================")

    print("===================")
    print("GCONJUNCTIVE WITH QUADS")
    print(list(gconjunctive.quads(None)))
    print("===================")

    gconjunctive.serialize(destination='gconjunctive_copy.xml', format='xml')

    gconjunctive_copy = ConjunctiveGraph()
    gconjunctive_copy.parse('gconjunctive_copy.xml', format='xml')

    print("===================")
    print("GCONJUNCTIVE AS CONJUNCTIVE")
    print(gconjunctive_copy.serialize(format="n3").decode("utf-8"))
    print("Output contexts")
    for c in gconjunctive_copy.contexts():
        print("-- %s " % c)
    print("===================")

    gconjunctive_graph_copy = Graph()
    gconjunctive_graph_copy.parse('gconjunctive_copy.xml', format='xml')

    print("===================")
    print("GCONJUNCTIVE AS GRAPH")
    print(gconjunctive_graph_copy.serialize(format="n3").decode("utf-8"))
    #print("Output contexts")
    #for c in gconjunctive_graph_copy.contexts():
    #    print("-- %s " % c)
    print("===================")
예제 #41
0
#! /usr/bin/env python3

import sys
import glob
import pandas as pd
import rdflib
from rdflib import Namespace, URIRef, BNode, Literal
from rdflib.graph import ConjunctiveGraph
from rdflib.namespace import RDF, RDFS, OWL, XSD, DCTERMS

g = ConjunctiveGraph()
g.bind("dcterms", DCTERMS)

movie = "http://elonet.finna.fi/movie#"


def handle_one(f):
    v = '/scratch/project_2002528/films/' + str(f.elonet_id) + '[-_]*'
    # print(f, v)
    v = glob.glob(v)
    if len(v) == 0:
        v = ['not found']
    v = v[0]
    av = 'https://a3s.fi/momaf/' + v[25:]
    picsom_label = ('000000' + str(f.elonet_id))[-7:]
    picsom_uri = 'http://picsom.aalto.fi/momaf/' + picsom_label
    source = f.quality
    if source == '*':
        source = 'KAVI'
    uri = URIRef(movie + str(f.elonet_id))
    g.add((uri, DCTERMS.date, Literal(f.date, datatype=XSD.date)))
예제 #42
0
class InMemoryStorage(object):

    def __init__(self):

        store = IOMemory()

        self.g = ConjunctiveGraph(store=store)

        self.g.bind("lada",ns_lada)
        self.g.bind('data', ns_data)
        self.g.bind('cube', ns_cube)
        self.g.bind('qb', ns_cube)
        self.g.bind('lcd', ns_lcd)
        self.g.bind('xsd', ns_xsd)
        self.g.bind('qb4cc', ns_qb4cc)
        self.g.bind('skos', ns_skos)

        self.initNs = {
            'lada': ns_lada,
            'data': ns_data,
            'qb': ns_cube,
            'lcd': ns_lcd,
            'xsd': ns_xsd,
            'qb4cc': ns_qb4cc,
            'skos': ns_skos
        }


    def _concatenate_graphs(self, graphs):
        source = Graph()
        for g in graphs:
            if g in graph_dict:
                source += self.g.get_context(graph_dict[g])
            elif type(g) is URIRef:
                source += self.g.get_context(g)
        return source

    def add_triple(self, triple, context):
        if context:
            if type(context) is str:
                self.g.get_context(graph_dict[context]).add(triple)
            else:
                self.g.get_context(context).add(triple)
        else:
            self.g.add(triple)

    def add_graph(self, graph, context):
        if context:
            g = None
            if type(context) is str:
                g = self.g.get_context(graph_dict[context])
            else:
                g = self.g.get_context(context)
            g += graph
        else:
            self.g += graph

    def add_file(self, file, format, context):
        if context:
            if type(context) is str:
                self.g.get_context(graph_dict[context]).parse(file, format=format)
            else:
                self.g.get_context(context).parse(file, format=format)
        else:
            self.g.parse(file, format=format)


    def query(self, queryString, contexts):

        if contexts:
            if type(contexts) is list:
                return self._concatenate_graphs(contexts).query(queryString, initNs=self.initNs)
            elif type(contexts) is str:
                return self.g.get_context(graph_dict[contexts]).query(queryString, initNs=self.initNs)
            else:
                return self.g.get_context(contexts).query(queryString, initNs=self.initNs)
        else:
            return self.g.query(queryString, initNs=self.initNs)

    def value(self, subject, predicate, context):
        if context:
            if type(context) is str:
                return self.g.get_context(graph_dict[context]).value(subject, predicate)
            else:
                return self.g.get_context(context).value(subject, predicate)
        else:
            return self.g.value(subject, predicate)

    def remove(self, triple_pattern, contexts):
        if contexts:
            if type(contexts) is list:
                self._concatenate_graphs(contexts).remove(triple_pattern)
            else:
                self.g.get_context(graph_dict[contexts]).remove(triple_pattern)
        else:
            self.g.remove(triple_pattern)

    def clear(self, context):
        if context:
            if type(context) is str:
                self.g.remove_context(self.g.get_context(graph_dict[context]))
            else:
                self.g.remove_context(self.g.get_context(context))
        else:
            self.g.remove( (None, None, None) )

    def count_triples(self):
        c = 0;
        for s, p, o in self.g:
            c = c +1;
        return c

    def export(self, context):
        if type(context) is str:
            self.g.get_context(graph_dict[context]).serialize(context + ".ttl", format="turtle")
예제 #43
0
#GLOBAL VARS
pub_base_uri = "http://www.diei.udl.cat"
uri_person = "person"
uri_pub = "pub"
uri_sub = "subject"
DC = Namespace("http://purl.org/dc/terms/")
RDFS = Namespace("http://www.w3.org/2000/01/rdf-schema#")
SWRC = Namespace("http://swrc.ontoware.org/ontology#")
AIISO = Namespace("http://purl.org/vocab/aiiso/schema#")
TEACH = Namespace("http://linkedscience.org/teach/ns#")
#END GLOBAL VARS

# Create the RDF Graph
graph = ConjunctiveGraph()
graph.bind("dc", DC)
graph.bind("rdfs", RDFS)
graph.bind("swrc", SWRC)
graph.bind("aiiso", AIISO)
graph.bind("teach", TEACH)
# End create RDF Graph

query_template = """
    SELECT (COUNT(?s) AS ?n)
    FROM <http://www.diei.udl.cat/>
    WHERE {{
        ?s ?p <{uri}>
    }}
"""
sparql = SPARQLWrapper2("http://omediadis.udl.cat:8890/sparql")
sparql.setReturnFormat(JSON)
예제 #44
0
def rdf_all(notation='xml'):
    """
    Funtion takes  title of node, and rdf notation.
    """
    valid_formats = ["xml", "n3", "ntriples", "trix"]
    default_graph_uri = "http://gstudio.gnowledge.org/rdfstore"
  
    configString = "/var/tmp/rdfstore"

    # Get the IOMemory plugin.
    store = plugin.get('IOMemory', Store)('rdfstore')
   


    # Open previously created store, or create it if it doesn't exist yet
    graph = Graph(store="IOMemory",
               identifier = URIRef(default_graph_uri))
    path = mkdtemp()
    rt = graph.open(path, create=False)
    if rt == NO_STORE:
    
        graph.open(path, create=True)
    else:
        assert rt == VALID_STORE, "The underlying store is corrupt"


    # Now we'll add some triples to the graph & commit the changes
    
    graph.bind("gstudio", "http://gnowledge.org/")
    exclusion_fields = ["id", "rght", "node_ptr_id", "image", "lft", "_state", "_altnames_cache", "_tags_cache", "nid_ptr_id", "_mptt_cached_fields"]

    for node in NID.objects.all():
    	    node_dict=node.ref.__dict__
    	    node_type = node.reftype
            try:       
            
           	 if (node_type=='Gbobject'):
    	    		node=Gbobject.objects.get(title=node)
    		
    	    	 elif (node_type=='None'):
    			node=Gbobject.objects.get(title=node)
    		 
            	 elif (node_type=='Processes'):
    			node=Gbobject.objects.get(title=node)
    			 
            	 elif (node_type=='System'):
    			node=Gbobject.objects.get(title=node)
    			rdflib=link(node)
			url_addr=link1(node)
                	a=fstore_dump(url_addr) 
            	 elif (node_type=='Objecttype'):
    			node=Objecttype.objects.get(title=node)
    			
    	    	 elif (node_type=='Attributetype'):
    			node=Attributetype.objects.get(title=node)
    	        	 
    	    	 elif (node_type=='Complement'):
    			node=Complement.objects.get(title=node)
    			
            	 elif (node_type=='Union'):
    	   		node=Union.objects.get(title=node)
    			 
            	 elif (node_type=='Intersection'):
    			node=Intersection.objects.get(title=node)
    	
            	 elif (node_type=='Expression'):
    			node=Expression.objects.get(title=node)
    		
            	 elif (node_type=='Processtype'):
    			node=Processtype.objects.get(title=node)
    		 
            	 elif (node_type=='Systemtype'):
    	 		node=Systemtype.objects.get(title=node)
    			
            	 elif (node_type=='AttributeSpecification'):
    			node=AttributeSpecification.objects.get(title=node)
    			
            	 elif (node_type=='RelationSpecification'):
    			node=RelationSpecification.objects.get(title=node)
    		 rdflib=link(node) 	 
                 url_addr=link1(node)
                 a=fstore_dump(url_addr) 
            	 if(node_type=='Attribute'):
    			node=Attribute.objects.get(title=node)
    			rdflib = Namespace('http://sbox.gnowledge.org/gstudio/')
              	
            	 elif(node_type=='Relationtype' ):
    			node=Relationtype.objects.get(title=node)
    			rdflib = Namespace('http://sbox.gnowledge.org/gstudio/')
                
    	    	 elif(node_type=='Metatype'):
    			node=Metatype.objects.get(title=node)
    			rdflib = Namespace('http://sbox.gnowledge.org/gstudio/')
                 url_addr='http://sbox.gnowledge.org/gstudio/'
                 a=fstore_dump(url_addr) 
            except:
            	if(node_type=='Attribute'):
                	rdflib= Namespace('http://sbox.gnowledge.org/gstudio/')
    
            	if(node_type=='Relationtype' ):
                	rdflib= Namespace('http://sbox.gnowledge.org/gstudio/')
                    
            	if(node_type=='Metatype'):
                	rdflib= Namespace('http://sbox.gnowledge.org/gstudio/')
            	
           
    subject=str(node_dict['id'])
    for key in node_dict:
        if key not in exclusion_fields:
           predicate=str(key)
           pobject=str(node_dict[predicate])
           graph.add((rdflib[subject], rdflib[predicate], Literal(pobject)))                        
        
    rdf_code=graph.serialize(format=notation)
               #path to store the rdf in a file
    
    #x = os.path.join(os.path.dirname(__file__), 'rdffiles.rdf')
    
    graph.commit()
    graph.close()
예제 #45
0
class TestKyotoCabinetConjunctiveGraphCore(unittest.TestCase):
    def setUp(self):
        store = "KyotoCabinet"
        self.graph = ConjunctiveGraph(store=store)
        self.path = configString
        self.graph.open(self.path, create=True)

    def tearDown(self):
        self.graph.destroy(self.path)
        try:
            self.graph.close()
        except:
            pass
        if getattr(self, "path", False) and self.path is not None:
            if os.path.exists(self.path):
                if os.path.isdir(self.path):
                    for f in os.listdir(self.path):
                        os.unlink(self.path + "/" + f)
                    os.rmdir(self.path)
                elif len(self.path.split(":")) == 1:
                    os.unlink(self.path)
                else:
                    os.remove(self.path)

    def test_namespaces(self):
        self.graph.bind("dc", "http://http://purl.org/dc/elements/1.1/")
        self.graph.bind("foaf", "http://xmlns.com/foaf/0.1/")
        self.assert_(len(list(self.graph.namespaces())) == 5)
        self.assert_(("foaf", rdflib.term.URIRef(u"http://xmlns.com/foaf/0.1/")) in list(self.graph.namespaces()))

    def test_play_journal(self):
        self.assertRaises(NotImplementedError, self.graph.store.play_journal, {"graph": self.graph})

    def test_readable_index(self):
        print(readable_index(111))

    def test_triples_context_reset(self):
        michel = rdflib.URIRef(u"michel")
        likes = rdflib.URIRef(u"likes")
        pizza = rdflib.URIRef(u"pizza")
        cheese = rdflib.URIRef(u"cheese")
        self.graph.add((michel, likes, pizza))
        self.graph.add((michel, likes, cheese))
        self.graph.commit()
        ntriples = self.graph.triples((None, None, None), context=self.graph.store)
        self.assert_(len(list(ntriples)) == 2)

    def test_remove_context_reset(self):
        michel = rdflib.URIRef(u"michel")
        likes = rdflib.URIRef(u"likes")
        pizza = rdflib.URIRef(u"pizza")
        cheese = rdflib.URIRef(u"cheese")
        self.graph.add((michel, likes, pizza))
        self.graph.add((michel, likes, cheese))
        self.graph.commit()
        self.graph.store.remove((michel, likes, cheese), self.graph.store)
        self.graph.commit()
        self.assert_(len(list(self.graph.triples((None, None, None), context=self.graph.store))) == 1)

    def test_remove_db_exception(self):
        michel = rdflib.URIRef(u"michel")
        likes = rdflib.URIRef(u"likes")
        pizza = rdflib.URIRef(u"pizza")
        cheese = rdflib.URIRef(u"cheese")
        self.graph.add((michel, likes, pizza))
        self.graph.add((michel, likes, cheese))
        self.graph.commit()
        self.graph.store.__len__(context=self.graph.store)
        self.assert_(len(list(self.graph.triples((None, None, None), context=self.graph.store))) == 2)
예제 #46
0
파일: main2.py 프로젝트: huanjiayang/SWoT
configString = "/var/tmp/rdfstore"

# Get the Sleepycat plugin.
store = plugin.get('Sleepycat', Store)('rdfstore')

# Open previously created store, or create it if it doesn't exist yet
graph = Graph(store="Sleepycat",
              identifier = URIRef(default_graph_uri))
path = mkdtemp()
rt = graph.open(path, create=False)
if rt == NO_STORE:
    # There is no underlying Sleepycat infrastructure, create it
    graph.open(path, create=True)
else:
    assert rt == VALID_STORE, "The underlying store is corrupt"

print "Triples in graph before add: ", len(graph)

# Now we'll add some triples to the graph & commit the changes
rdflib = Namespace('http://rdflib.net/test/')
graph.bind("test", "http://rdflib.net/test/")

graph.add((rdflib['pic:1'], rdflib['name'], Literal('Jane & Bob')))
graph.add((rdflib['pic:2'], rdflib['name'], Literal('Squirrel in Tree')))
graph.commit()

print "Triples in graph after add: ", len(graph)

# display the graph in RDF/XML
print graph.serialize()
예제 #47
0
from rdflib.plugins.memory import IOMemory

if __name__ == '__main__':

    ns = Namespace("http://love.com#")

    mary = URIRef("http://love.com/lovers/mary#")
    john = URIRef("http://love.com/lovers/john#")

    cmary = URIRef("http://love.com/lovers/mary#")
    cjohn = URIRef("http://love.com/lovers/john#")

    store = IOMemory()

    g = ConjunctiveGraph(store=store)
    g.bind("love", ns)

    gmary = Graph(store=store, identifier=cmary)

    gmary.add((mary, ns['hasName'], Literal("Mary")))
    gmary.add((mary, ns['loves'], john))

    gjohn = Graph(store=store, identifier=cjohn)
    gjohn.add((john, ns['hasName'], Literal("John")))

    #enumerate contexts
    for c in g.contexts():
        print("-- %s " % c)

    #separate graphs
    print(gjohn.serialize(format='n3'))
예제 #48
0
def generaterdf(outputfile, groupName):
    # find the groups based on the name of the group.
    myList = groupSearch(httpurl,groupName)
    # take the first group's id
    groupId = myList['results'][0]['id']
    print groupId


    # Find all items in one group.
    myList = groupContent(httpurl,groupId)

    # REsult
    print "total records : %s" % myList['total']
    print "start record  : %s" % myList['start']
    print "num record  : %s" % myList['num']
    print "next start record  : %s" % myList['nextStart']

    #
    # The following fields are returned.
    #itemType, culture, owner, guid, screenshots, id, size, appCategories, access, avgRating, title, numRatings, numComments, snippet,
    #listed, largeThumbnail, type, thumbnail, uploaded, industries, description, tags, typeKeywords, extent, banner, properties, name,
    #licenseInfo, languages, url, lastModified, documentation, modified, spatialReference, item, numViews, accessInformation

    graph = Graph()
    store = IOMemory()

    dmNs = Namespace('http://' + httpurl + "/rdfschema.rdf#")
    dctNs = Namespace("http://purl.org/dc/terms/")
    rdfsNs = Namespace("http://www.w3.org/2000/01/rdf-schema#")

    g = ConjunctiveGraph(store=store)
    g.bind("dm", dmNs)
    g.bind("dct", dctNs)
    g.bind("rdfs", rdfsNs)

    for obj in myList['results']:
        print obj['title'] +  ' -> ' + obj['id']

        subject = URIRef(
            'http://' + httpurl + '/home/item.html?id=' + obj['id']
        )
        tmpgraph = Graph(store=store, identifier=subject)
        tmpgraph.add((subject, dctNs['id'], Literal(obj['id'])))
        tmpgraph.add((subject, rdfsNs['label'], Literal(obj['title'])))
        tmpgraph.add((subject, dmNs['type'], Literal(obj['itemType'])))

        if 'url' in obj and obj['url']:
            tmpgraph.add((subject, dmNs['serviceURL'], URIRef(obj['url'])))

        if 'description' in obj and obj['description']:
            description = ' '.join(striphtml(obj['description']).split())
            description = re.sub(u"(\u2018|\u2019)", "'", description)
            tmpgraph.add((subject, dctNs['description'], Literal(description)))

        if obj.get("uploaded","") != "":
            # remove timezone from timestamp
            timestamp = int(str(obj['uploaded'])[:-3])
            date = datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d')
            tmpgraph.add((subject, dctNs['created'], Literal(date)))
            tmpgraph.add((subject, dctNs['issued'], Literal(date)))

        if obj.get("title","") != "":
            tmpgraph.add((subject, dctNs['title'], Literal(obj['title'])))

        if obj.get("owner","") != "":
            tmpgraph.add((subject, dctNs['creator'], Literal(obj['owner'])))

        if obj.get("tags","") != "":
            for keyword in obj['tags']:
                tmpgraph.add((subject, dctNs['subject'], Literal(keyword.strip())))

        #also "Comments", "Subject", "Category", "Credits"


    outRDF = os.path.join(outputfile)
    RDFFile = open(outRDF,"w")
    RDFFile.writelines(g.serialize())
    RDFFile.close()
예제 #49
0
파일: rdf.py 프로젝트: we1l1n/inpho
def make_graph():
    g = Graph()

    g.bind("inpho", "http://inpho.cogs.indiana.edu/")
    g.bind("thinker", "http://inpho.cogs.indiana.edu/thinker/")

    g.bind("user", "http://inpho.cogs.indiana.edu/user/")
    g.bind("entity", "http://inpho.cogs.indiana.edu/entity/")
    g.bind("foaf", "http://xmlns.com/foaf/0.1/")
    g.bind("rdf", "http://www.w3.org/1999/02/22-rdf-syntax-ns#")
    g.bind("rdfs", "http://www.w3.org/TR/rdf-schema/#")
    g.bind("owl", "http://www.w3.org/2002/07/owl#")
    g.bind("idea", "http://inpho.cogs.indiana.edu/idea/")
    g.bind("skos", "http://www.w3.org/2004/02/skos/core#")
    g.bind ("db", "http://dbpedia.org/")
    g.bind ("dc", "http://purl.org/dc/elements/1.1/")

    return g
예제 #50
0
def rdf_description(name, notation='xml'):
    """
    Funtion takes  title of node, and rdf notation.
    """
    valid_formats = ["xml", "n3", "ntriples", "trix"]
    default_graph_uri = "http://gstudio.gnowledge.org/rdfstore"
    # default_graph_uri = "http://example.com/"
    configString = "/var/tmp/rdfstore"

    # Get the IOMemory plugin.
    store = plugin.get('IOMemory', Store)('rdfstore')

    # Open previously created store, or create it if it doesn't exist yet
    graph = Graph(store="IOMemory", identifier=URIRef(default_graph_uri))
    path = mkdtemp()
    rt = graph.open(path, create=False)
    if rt == NO_STORE:

        graph.open(path, create=True)
    else:
        assert rt == VALID_STORE, "The underlying store is corrupt"

    # Now we'll add some triples to the graph & commit the changes
    #rdflib = Namespace('http://sbox.gnowledge.org/gstudio/')
    graph.bind("gstudio", "http://gnowledge.org/")
    exclusion_fields = [
        "id", "rght", "node_ptr_id", "image", "lft", "_state",
        "_altnames_cache", "_tags_cache", "nid_ptr_id", "_mptt_cached_fields"
    ]

    #verifies the type of node

    node = NID.objects.get(title=name)
    node_type = node.reftype

    if (node_type == 'Gbobject'):
        node = Gbobject.objects.get(title=name)
        rdflib = link(node)
    elif (node_type == 'None'):
        node = Gbobject.objects.get(title=name)
        rdflib = link(node)

    elif (node_type == 'Processes'):
        node = Gbobject.objects.get(title=name)
        rdflib = link(node)

    elif (node_type == 'System'):
        node = Gbobject.objects.get(title=name)
        rdflib = link(node)

    elif (node_type == 'Objecttype'):
        node = Objecttype.objects.get(title=name)
        rdflib = link(node)

    elif (node_type == 'Attributetype'):
        node = Attributetype.objects.get(title=name)
        rdflib = link(node)

    elif (node_type == 'Complement'):
        node = Complement.objects.get(title=name)
        rdflib = link(node)

    elif (node_type == 'Union'):
        node = Union.objects.get(title=name)
        rdflib = link(node)

    elif (node_type == 'Intersection'):
        node = Intersection.objects.get(title=name)
        rdflib = link(node)

    elif (node_type == 'Expression'):
        node = Expression.objects.get(title=name)
        rdflib = link(node)

    elif (node_type == 'Processtype'):
        node = Processtype.objects.get(title=name)
        rdflib = link(node)

    elif (node_type == 'Systemtype'):
        node = Systemtype.objects.get(title=name)
        rdflib = link(node)

    elif (node_type == 'AttributeSpecification'):
        node = AttributeSpecification.objects.get(title=name)
        rdflib = link(node)

    elif (node_type == 'RelationSpecification'):
        node = RelationSpecification.objects.get(title=name)
        rdflib = link(node)

    elif (node_type == 'Attribute'):
        node = Attribute.objects.get(title=name)
        rdflib = Namespace('http://sbox.gnowledge.org/gstudio/')

    elif (node_type == 'Relationtype'):
        node = Relationtype.objects.get(title=name)
        rdflib = Namespace('http://sbox.gnowledge.org/gstudio/')

    elif (node_type == 'Metatype'):
        node = Metatype.objects.get(title=name)
        rdflib = Namespace('http://sbox.gnowledge.org/gstudio/')
    else:
        rdflib = Namespace('http://sbox.gnowledge.org/gstudio/')

    node_dict = node.__dict__

    subject = str(node_dict['id'])
    for key in node_dict:
        if key not in exclusion_fields:
            predicate = str(key)
            pobject = str(node_dict[predicate])
            graph.add((rdflib[subject], rdflib[predicate], Literal(pobject)))

    rdf_code = graph.serialize(format=notation)

    graph.commit()
    print rdf_code
    graph.close()
예제 #51
0
    def encode_container(self, bundle, container=None, identifier=None):
        if container is None:
            container = ConjunctiveGraph(identifier=identifier)
            nm = container.namespace_manager
            nm.bind('prov', PROV.uri)

        for namespace in bundle.namespaces:
            container.bind(namespace.prefix, namespace.uri)

        id_generator = AnonymousIDGenerator()
        real_or_anon_id = lambda record: record._identifier.uri if \
            record._identifier else id_generator.get_anon_id(record)

        for record in bundle._records:
            rec_type = record.get_type()
            if hasattr(record, 'identifier') and record.identifier:
                identifier = URIRef(text_type(real_or_anon_id(record)))
                container.add((identifier, RDF.type, URIRef(rec_type.uri)))
            else:
                identifier = None
            if record.attributes:
                bnode = None
                formal_objects = []
                used_objects = []
                all_attributes = list(record.formal_attributes) + list(record.attributes)
                formal_qualifiers = False
                for attrid, (attr, value) in enumerate(list(record.formal_attributes)):
                    if (identifier is not None and value is not None) or \
                            (identifier is None and value is not None and attrid > 1):
                        formal_qualifiers = True
                has_qualifiers = len(record.extra_attributes) > 0 or formal_qualifiers
                for idx, (attr, value) in enumerate(all_attributes):
                    if record.is_relation():
                        pred = URIRef(PROV[PROV_N_MAP[rec_type]].uri)
                        # create bnode relation
                        if bnode is None:
                            valid_formal_indices = set()
                            for idx, (key, val) in enumerate(record.formal_attributes):
                                formal_objects.append(key)
                                if val:
                                    valid_formal_indices.add(idx)
                            used_objects = [record.formal_attributes[0][0]]
                            subj = None
                            if record.formal_attributes[0][1]:
                                subj = URIRef(record.formal_attributes[0][1].uri)
                            if identifier is None and subj is not None:
                                try:
                                    obj_val = record.formal_attributes[1][1]
                                    obj_attr = URIRef(record.formal_attributes[1][0].uri)
                                except IndexError:
                                    obj_val = None
                                if obj_val and (rec_type not in [PROV_END,
                                                                PROV_START,
                                                                PROV_USAGE,
                                                                PROV_GENERATION,
                                                                PROV_DERIVATION,
                                                                PROV_INVALIDATION] or
                                                (valid_formal_indices == {0, 1} and
                                                 len(record.extra_attributes) == 0)):
                                    used_objects.append(record.formal_attributes[1][0])
                                    obj_val = self.encode_rdf_representation(obj_val)
                                    if rec_type == PROV_ALTERNATE:
                                        subj, obj_val = obj_val, subj
                                    container.add((subj, pred, obj_val))
                                    if rec_type == PROV_MENTION:
                                        if record.formal_attributes[2][1]:
                                            used_objects.append(record.formal_attributes[2][0])
                                            obj_val = self.encode_rdf_representation(record.formal_attributes[2][1])
                                            container.add((subj, URIRef(PROV['asInBundle'].uri), obj_val))
                                        has_qualifiers = False
                            if rec_type in [PROV_ALTERNATE]: #, PROV_ASSOCIATION]:
                                continue
                            if subj and (has_qualifiers or identifier):  #and (len(record.extra_attributes) > 0 or                                                            identifier):
                                qualifier = rec_type._localpart
                                rec_uri = rec_type.uri
                                for attr_name, val in record.extra_attributes:
                                    if attr_name == PROV['type']:
                                        if PROV['Revision'] == val or \
                                              PROV['Quotation'] == val or \
                                                PROV['PrimarySource'] == val:
                                            qualifier = val._localpart
                                            rec_uri = val.uri
                                            if identifier is not None:
                                                container.remove((identifier,
                                                                  RDF.type,
                                                                  URIRef(rec_type.uri)))
                                QRole = URIRef(PROV['qualified' + qualifier].uri)
                                if identifier is not None:
                                    container.add((subj, QRole, identifier))
                                else:
                                    bnode = identifier = BNode()
                                    container.add((subj, QRole, identifier))
                                    container.add((identifier, RDF.type,
                                                   URIRef(rec_uri)))
                                               # reset identifier to BNode
                        if value is not None and attr not in used_objects:
                            if attr in formal_objects:
                                pred = attr2rdf(attr)
                            elif attr == PROV['role']:
                                pred = URIRef(PROV['hadRole'].uri)
                            elif attr == PROV['plan']:
                                pred = URIRef(PROV['hadPlan'].uri)
                            elif attr == PROV['type']:
                                pred = RDF.type
                            elif attr == PROV['label']:
                                pred = RDFS.label
                            elif isinstance(attr, QualifiedName):
                                pred = URIRef(attr.uri)
                            else:
                                pred = self.encode_rdf_representation(attr)
                            if PROV['plan'].uri in pred:
                                pred = URIRef(PROV['hadPlan'].uri)
                            if PROV['informant'].uri in pred:
                                pred = URIRef(PROV['activity'].uri)
                            if PROV['responsible'].uri in pred:
                                pred = URIRef(PROV['agent'].uri)
                            if rec_type == PROV_DELEGATION and PROV['activity'].uri in pred:
                                pred = URIRef(PROV['hadActivity'].uri)
                            if (rec_type in [PROV_END, PROV_START] and PROV['trigger'].uri in pred) or\
                                (rec_type in [PROV_USAGE] and PROV['used'].uri in pred):
                                pred = URIRef(PROV['entity'].uri)
                            if rec_type in [PROV_GENERATION, PROV_END,
                                            PROV_START, PROV_USAGE,
                                            PROV_INVALIDATION]:
                                if PROV['time'].uri in pred:
                                    pred = URIRef(PROV['atTime'].uri)
                                if PROV['ender'].uri in pred:
                                    pred = URIRef(PROV['hadActivity'].uri)
                                if PROV['starter'].uri in pred:
                                    pred = URIRef(PROV['hadActivity'].uri)
                                if PROV['location'].uri in pred:
                                    pred = URIRef(PROV['atLocation'].uri)
                            if rec_type in [PROV_ACTIVITY]:
                                if PROV_ATTR_STARTTIME in pred:
                                    pred = URIRef(PROV['startedAtTime'].uri)
                                if PROV_ATTR_ENDTIME in pred:
                                    pred = URIRef(PROV['endedAtTime'].uri)
                            if rec_type == PROV_DERIVATION:
                                if PROV['activity'].uri in pred:
                                    pred = URIRef(PROV['hadActivity'].uri)
                                if PROV['generation'].uri in pred:
                                    pred = URIRef(PROV['hadGeneration'].uri)
                                if PROV['usage'].uri in pred:
                                    pred = URIRef(PROV['hadUsage'].uri)
                                if PROV['usedEntity'].uri in pred:
                                    pred = URIRef(PROV['entity'].uri)
                            container.add((identifier, pred,
                                           self.encode_rdf_representation(value)))
                        continue
                    if value is None:
                        continue
                    if isinstance(value, ProvRecord):
                        obj = URIRef(text_type(real_or_anon_id(value)))
                    else:
                        #  Assuming this is a datetime value
                        obj = self.encode_rdf_representation(value)
                    if attr == PROV['location']:
                        pred = URIRef(PROV['atLocation'].uri)
                        if False and isinstance(value, (URIRef, QualifiedName)):
                            if isinstance(value, QualifiedName):
                                value = URIRef(value.uri)
                            container.add((identifier, pred, value))
                        else:
                            container.add((identifier, pred,
                                           self.encode_rdf_representation(obj)))
                        continue
                    if attr == PROV['type']:
                        pred = RDF.type
                    elif attr == PROV['label']:
                        pred = RDFS.label
                    elif attr == PROV_ATTR_STARTTIME:
                        pred = URIRef(PROV['startedAtTime'].uri)
                    elif attr == PROV_ATTR_ENDTIME:
                        pred = URIRef(PROV['endedAtTime'].uri)
                    else:
                        pred = self.encode_rdf_representation(attr)
                    container.add((identifier, pred, obj))
        return container
예제 #52
0
from rdflib import Namespace, BNode, Literal, URIRef
from rdflib.graph import Graph, ConjunctiveGraph
from rdflib.plugins.memory import IOMemory

ns = Namespace("http://love.com#")

mary = URIRef("http://love.com/lovers/mary#")
john = URIRef("http://love.com/lovers/john#")

cmary = URIRef("http://love.com/lovers/mary#")
cjohn = URIRef("http://love.com/lovers/john#")

store = IOMemory()

g = ConjunctiveGraph(store=store)
g.bind("love", ns)

gmary = Graph(store=store, identifier=cmary)

gmary.add((mary, ns["hasName"], Literal("Mary")))
gmary.add((mary, ns["loves"], john))

gjohn = Graph(store=store, identifier=cjohn)
gjohn.add((john, ns["hasName"], Literal("John")))

# enumerate contexts
for c in g.contexts():
    print("-- %s " % c)

# separate graphs
print(gjohn.serialize(format="n3"))
예제 #53
0
:Woman = foo:FemaleAdult .
:Title a rdf:Property; = dc:title .



""" # --- End of primer code

# To make this go easier to spit back out...
# technically, we already created a namespace
# with the object init (and it added some namespaces as well)
# By default, your main namespace is the URI of your
# current working directory, so lets make that simpler:

myNS = Namespace(URIRef('http://www.w3.org/2000/10/swap/Primer#'))
primer.bind('', myNS)
primer.bind('owl', 'http://www.w3.org/2002/07/owl#')
primer.bind('dc', 'http://purl.org/dc/elements/1.1/')
primer.bind('swap', 'http://www.w3.org/2000/10/swap/')
sourceCode = StringInputSource(mySource, myNS)

# Lets load it up!

primer.parse(sourceCode, format='n3')

# Now you can query, either directly straight into a list:

[(x, y, z) for x, y, z in primer]

# or spit it back out (mostly) the way we created it:
예제 #54
0
parser.add_argument('--debug',
                    action='store_true',
                    help='show debug output instead of RDF')
parser.add_argument('files', nargs='+', help='file.XML ...')
args = parser.parse_args()
if not args.movies and not args.persons:
    args.movies = args.persons = True

elonet = "http://elonet.finna.fi/"
movie = elonet + "movie#"
person = elonet + "person#"

g = ConjunctiveGraph()

WD = Namespace("http://www.wikidata.org/entity/")
g.bind("wd", WD)
WDT = Namespace("http://www.wikidata.org/prop/direct/")
g.bind("wdt", WDT)
P = Namespace("http://www.wikidata.org/prop/")
g.bind("p", P)
PS = Namespace("http://www.wikidata.org/prop/statement/")
g.bind("ps", PS)
PQ = Namespace("http://www.wikidata.org/prop/qualifier/")
g.bind("pq", PQ)


def clean(s):
    if s is None:
        return None
    s = ' '.join(s.split('\n'))
    while s.find('  ') != -1: