Beispiel #1
0
    def crawl_graph(self):
        ret = {}
        for ontology_property,p,o in self.graph.triples((None, None, RDF.Property)):
            for s,p,domain_class in self.graph.triples((ontology_property, RDFS.domain, None)):
                domain_class = Resource(self.graph, domain_class)
                for domain_subclass in domain_class.transitive_subjects(RDFS.subClassOf):
                    if domain_subclass.identifier not in ret:
                        ret[domain_subclass.identifier] = {'down':[], 'up':[]}
                    for s,p,range_class in self.graph.triples((ontology_property, RDFS.range, None)):
                        ret[domain_subclass.identifier]['down'].append({
                            'ontology_property':ontology_property,
                            'ontology_classes':self.get_subclasses(range_class)
                        })

            for s,p,range_class in self.graph.triples((ontology_property, RDFS.range, None)):
                range_class = Resource(self.graph, range_class)
                for range_subclass in range_class.transitive_subjects(RDFS.subClassOf):
                    if range_subclass.identifier not in ret:
                        ret[range_subclass.identifier] = {'down':[], 'up':[]}
                    for s,p,o in self.graph.triples((ontology_property, RDFS.domain, None)):
                        ret[range_subclass.identifier]['up'].append({
                            'ontology_property':ontology_property,
                            'ontology_classes':self.get_subclasses(o)
                        })
        return ret
    def process(self, input, output):
        username = "******"
        password = "******"
        lmClient = LifemapperClient(username, password)
        
        # Get the JSON spec
        jsonSpecification = input.value(LM.hasJSONExperimentSpecificationURL)
        
        # Get published scenario layers
        scenarioLayerIDs = []
        for layer in input[LM.hasScenarioLayer]:
            layerID = layer.value(LM.hasLayerID)
            scenarioLayerIDs.append(str(layerID))

        # Post scenario and save ID
        lmClient.postScenario(scenarioLayerIDs)
        #scenarioID = str(234)
        
        scenarioID = lmClient.getScenarioID()
        scenarioURL = lmClient.getScenarioURL()
        
        # The URI generator
        resourceURI = ResourceURI()
        
        # Create the published scenario
        scenario = RDFLibResource(output.graph, resourceURI.getURI("scenario"))
        ScenarioClass = RDFLibResource(output.graph, LM.Scenario)
        scenario.set(RDF.type, ScenarioClass)
        scenario.set(LM.hasScenarioID, Literal(scenarioID))
        scenario.set(LM.hasScenarioURL, Literal(scenarioURL))
        scenario.set(LM.hasJSONExperimentSpecificationURL, jsonSpecification)
        
        # Add scenarioID to output
        output.set(LM.hasPublishedScenario, scenario)
Beispiel #3
0
    def add_venue(self):
        """
        Add publication venue.
        :return: rdflib.Graph
        """
        g = Graph()

        isbn = self.isbn()
        issn = self.issn() or self.eissn()

        if isbn is not None:
            vtype = BIBO.Book
            uri = D['venue-' + isbn]
        elif issn is not None:
            vtype = BIBO.Journal
            uri = D['venue-' + issn]
        else:
            # Place holder
            logger.info("No source/venue ISSN or ISBN found for {}.".format(self.ut()))
            vtype = BIBO.Journal
            uri = D['venue-' + self.localid]

        venue = Resource(g, uri)
        venue.set(RDF.type, vtype)
        venue.set(RDFS.label, Literal(self.venue()))
        if vtype == BIBO.Journal:
            venue.set(BIBO.issn, Literal(issn))
        else:
            venue.set(BIBO.isbn, Literal(isbn))
        g.add((self.pub_uri, VIVO.hasPublicationVenue, uri))

        return g
Beispiel #4
0
    def __init__(self, g, uri):

        if not isinstance(uri, rdflib.term.Identifier): 
            uri=rdflib.URIRef(uri)

        _g=rdflib.Graph(namespace_manager=g.namespace_manager)

        if g:
            _g+=g.triples((uri, None,None))
            _g+=g.triples((None,None,uri))

        Resource.__init__(self, _g, uri)
 def testProcess(self):
     inputGraph = Graph()
     inputGraph.parse("http://ontology.cybershare.utep.edu/ELSEWeb/linked-data/lifemapper/layers/tiff-dataset.owl")
     inputURI = URIRef("http://visko.cybershare.utep.edu/linked-data/elseweb/sadi/tiffDataset")
     inputResource = RDFLibResource(inputGraph, inputURI);
     
     outputGraph = Graph()
     outputURI = URIRef("http://visko.cybershare.utep.edu/linked-data/elseweb/sadi/tiffDataset")
     outputResource = RDFLibResource(outputGraph, outputURI);
     
     self.process(inputResource, outputResource)
     
     publishedTIFFLayer = outputResource.value(LM.hasPublishedLayer)
     fileManifestation = publishedTIFFLayer.value(DATA.hasManifestation)
     fileURL = fileManifestation.value(DATA.hasFileDownloadURL)
     layerID = publishedTIFFLayer.value(LM.hasLayerID)
     
     print(fileURL)
     print(publishedTIFFLayer)
     print(layerID)
 def convert_calendar_dates(self, csv_filename):
     read_dates = self.__open_file(csv_filename)
     for row in read_dates:
         service = self.get_service(str.strip(row["service_id"]))
         calendar_date = Resource(self.graph, URIRef(self.uri + str.strip(row["service_id"]) + "_cal" + "_" + str.strip(row["date"])))
         service.add(self.GTFS.serviceRule, calendar_date)
         calendar_date.set(RDF.type, self.GTFS.CalendarDateRule)
         calendar_date.add(DCTERMS.date, self.get_date_literal(str.strip(row["date"])))
         exception_type = str.strip(row["exception_type"])
         if exception_type is "2":
             exception_type = "0"
         calendar_date.add(self.GTFS.dateAddition, Literal(exception_type, datatype=XSD.boolean))
    def process(self, input, output):
        username = "******"
        password = "******"
        lmClient = LifemapperClient(username, password)
        
        # Get scenario id
        scenario = input.value(LM.specifiesModellingScenario)
        scenarioID = scenario.value(LM.hasScenarioID)
        
        # Extract Algorithm
        algorithm = input.value(LM.specifiesModellingAlgorithm)
        algorithmCode = algorithm.value(LM.hasAlgorithmCode)

        # Extract Parameter Bindings
        params = algorithm.value(MD.behaviorControlledBy)
        
        bindings = []
        for param in params[MD.hasParameterMember]:
            name = param.value(MD.hasParameterName)
            value = param.value(MD.boundToValue)
            bindings[name] = value
        

        # Extract OccurrenceSetID
        occurrenceSet = input.value(LM.specifiesOccurrenceSet)
        occurrenceSetID = occurrenceSet.value(LM.hasOccurrenceSetID)

        lmClient.postExperiment(algorithmCode, bindings, occurrenceSetID, scenarioID)
        #resultURL = "http://somedomain.com/testURL"
        
        #get experimentURL and id
        experimentURL = lmClient.getExperimentURL()
        experimentID = lmClient.getExperimentID()
        
        print "experiment result URL %s " % experimentURL

        # The URI generator
        resourceURI = ResourceURI()

        # Create the experiment result
        experimentResult = RDFLibResource(output.graph, resourceURI.getURI("experimentResult"))
        experimentResultClass = RDFLibResource(output.graph, LM.ExperimentResult)
        experimentResult.set(RDF.type, experimentResultClass)
        experimentResult.set(LM.hasExperimentResultURL, Literal(experimentURL))
        experimentResult.set(LM.hasExperimentResultID, Literal(experimentID))

        # Add the experiment result to the executed specification
        output.set(LM.hasExperimentResult, experimentResult)        
Beispiel #8
0
    def to_rdf(self):
        g = Graph()
        p = Resource(g, self.uri)
        p.add(RDF.type, FOAF.Person)
        p.set(RDFS.label, Literal(self._label()))
        p.set(CONVERIS.converisId, Literal(self.cid))
        if hasattr(self, 'cfresint'):
            p.set(VIVO.researchOverview, Literal(self.cfresint))
        if hasattr(self, 'orcid'):
            p.set(VIVO.orcidId, self.orcid_uri)
            # Confirm the orcid
            g.add((self.orcid_uri, RDF.type, OWL.Thing))
            # Todo - review if we want to confirm all orcids
            g.add((self.orcid_uri, VIVO.confirmedOrcidId, self.uri))

        # Vcard individual
        vci_uri = URIRef(self.vcard_uri)
        p.set(OBO['ARG_2000028'], vci_uri)
        g.add((vci_uri, RDF.type, VCARD.Individual))

        # Vcard Name
        g += self._vcard_name()
        g.add((vci_uri, VCARD.hasName, URIRef(self.vcard_name_uri)))

        # Vcard title
        vtg = self._vcard_title()
        if vtg is not None:
            g += vtg
            g.add((vci_uri, VCARD.hasTitle, URIRef(self.vcard_title_uri)))

        # Vcard email
        vte = self._vcard_email()
        if vte is not None:
            g += vte
            g.add((vci_uri, VCARD.hasEmail, URIRef(self.vcard_email_uri)))

        # positions
        g += self.get_positions()

        return g
Beispiel #9
0
 def _vcard_name(self):
     g = Graph()
     vc = Resource(g, URIRef(self.vcard_name_uri))
     vc.set(RDF.type, VCARD.Name)
     vc.set(RDFS.label, Literal(self._label()))
     vc.set(VCARD.familyName, Literal(self.cffamilynames))
     vc.set(VCARD.givenName, Literal(self.cffirstnames))
     if hasattr(self, 'middlename'):
         vc.set(VIVO.middleName, Literal(self.middlename))
     return g
 def get_stop(self, stop_id):
     stop = Resource(self.graph, URIRef(self.uri + "stop_" + stop_id))
     stop.set(DCTERMS.identifier, Literal(stop_id, datatype=XSD.string))
     return stop
 def get_zone(self, zone_id):
     the_zone = Resource(self.graph, URIRef(self.uri + "zone_" + zone_id))
     the_zone.set(RDF.type, self.GTFS.Zone)
     the_zone.set(DCTERMS.identifier, Literal(zone_id, datatype=XSD.string))
     return the_zone
Beispiel #12
0
    def __to_resource(self, consumer):

        if six.PY2:
            consumer.secret = b64encode(consumer.secret.encode("utf-8"))

        if six.PY3:
            consumer.secret = b64encode(consumer.secret)

        resource = Resource(self._graph, BNode())
        resource.add(RDF.type, OAUTH.Consumer)
        resource.add(OAUTH.consumerName, Literal(consumer.name))
        resource.add(OAUTH.consumerKey, Literal(consumer.key))
        resource.add(OAUTH.consumerSecret, Literal(consumer.secret))
        resource.add(OAUTH.provisional,
                     Literal(consumer.provisional, datatype=XSD.boolean))
        resource.add(OAUTH.trusted,
                     Literal(consumer.trusted, datatype=XSD.boolean))

        return resource
 def convert_feed(self, csv_filename):
     read_feed = self.__open_file(csv_filename)
     for row in read_feed:
         feed = Resource(self.graph, URIRef(str.strip(row["publisher"])))
         feed.set(RDF.type, self.GTFS.Feed)
         feed.add(DCTERMS.publisher, Literal(str.strip(row["publisher"]), datatype=XSD.string))
         feed.add(DCTERMS.title, Literal(str.strip(row["feed_publisher_name"]), datatype=XSD.string))
         feed.add(DCTERMS.language, Literal(str.strip(row["feed_lang"]), datatype=XSD.string))
         if "feed_version" in row and str.strip(row["feed_version"]) != "":
             feed.add(self.SCHEMA.version, Literal(row["feed_version"], datatype=XSD.string))
         if "feed_start_date" in row and str.strip(row["feed_start_date"]) != "" and "feed_end_date" in row and str.strip(row["feed_end_date"]) != "":
             temporal = Resource(self.graph, URIRef(feed.identifier + "_temporal"))
             temporal.set(RDF.type, DCTERMS.temporal)
             temporal.add(self.SCHEMA.startDate, self.get_date_literal(str.strip(row["feed_start_date"])))
             temporal.add(self.SCHEMA.endDate, self.get_date_literal(str.strip(row["feed_end_date"])))
Beispiel #14
0
    def __iter__(self):
        """missing docstring."""
        for item in self.previous:
            # check if we have a dataset

            if item['_type'] not in ('org.bccvl.content.dataset',
                                     'org.bccvl.content.remotedataset'):
                # not a dataset
                yield item
                continue

            pathkey = self.pathkey(*item.keys())[0]
            # no path .. can't do anything
            if not pathkey:
                yield item
                continue

            path = item[pathkey]
            # Skip the Plone site object itself
            if not path:
                yield item
                continue

            obj = self.context.unrestrictedTraverse(path.encode().lstrip('/'),
                                                    None)

            # FIXME: this is really not a great way to check where to find provenenace data
            # check if we are inside an experiment (means we import result)
            if IExperiment.providedBy(self.context.__parent__):
                # result import
                context = self.context
            else:
                # dataset import?
                context = obj

            # TODO: do some sanity checks
            provdata = IProvenanceData(context)
            PROV = Namespace(u"http://www.w3.org/ns/prov#")
            BCCVL = Namespace(u"http://ns.bccvl.org.au/")
            LOCAL = Namespace(u"urn:bccvl:")
            graph = Graph()
            graph.parse(data=provdata.data or '', format='turtle')
            activity = Resource(graph, LOCAL['activity'])
            # FIXME: shouldn't I use uuid instead of id?
            entity = Resource(graph, LOCAL[obj.id])
            # create this dataset as new entity -> output of activity
            entity.add(RDF['type'], PROV['Entity'])
            # generated by
            entity.add(PROV['wasGeneratedBy'], activity)
            # PROV['prov:wasAttributedTo'] to user and software?
            # File metadata
            entity.add(DCTERMS['creator'], Literal(obj.Creator()))
            entity.add(DCTERMS['title'], Literal(obj.title))
            entity.add(DCTERMS['description'], Literal(obj.description))
            entity.add(DCTERMS['rights'], Literal(obj.rights))
            if obj.portal_type == 'org.bccvl.content.dataset':
                entity.add(DCTERMS['format'], Literal(obj.file.contentType))
            else:
                # FIXME: this doesn't seem to do the right thing
                entity.add(DCTERMS['format'], Literal(obj.format))
            # TODO: add metadata about file?
            #    genre, layers, emsc, gcm, year

            # set activities end time
            #   first one wins
            if activity.value(PROV['endedAtTime']) is None:
                activity.add(
                    PROV['endedAtTime'],
                    Literal(datetime.now().replace(microsecond=0).isoformat(),
                            datatype=XSD['dateTime']))

            # TODO: extend activity metadata with execution environment data
            #       (logfile import?, pstats import) .. and script + params.json file
            # ALA import url
            pd = item.get('_ala_provenance', {})
            if pd:
                entity.add(BCCVL['download_url'], Literal(pd['url']))

            # store prov data
            provdata.data = graph.serialize(format="turtle")

            yield item
Beispiel #15
0
    def get_resource_shape(cls, item_type: str, url: str, url_sp: str):
        if not cls.__instance:
            cls()

        cls.__init_graph()

        resource_shape = Resource(cls.__graph, URIRef(url))
        resource_shape.add(RDF.type, OSLC.ResourceShape)

        rs = cls.__get_resource_shape(item_type, url_sp, cls.__source_base_uri)

        if rs:
            for subject in rs.subjects(RDF.type, OSLC.Property):

                prop = Resource(cls.__graph, subject)
                prop.add(RDF.type, OSLC.Property)

                for p, o in rs.predicate_objects(subject):
                    prop.add(p, o)

                resource_shape.add(OSLC.property, prop)

            return resource_shape
        else:
            return False
    def _createProvenance(self, result):
        provdata = IProvenanceData(result)
        from rdflib import URIRef, Literal, Namespace, Graph
        from rdflib.namespace import RDF, RDFS, FOAF, DCTERMS, XSD
        from rdflib.resource import Resource
        PROV = Namespace(u"http://www.w3.org/ns/prov#")
        BCCVL = Namespace(u"http://ns.bccvl.org.au/")
        LOCAL = Namespace(u"urn:bccvl:")
        graph = Graph()
        # the user is our agent

        member = api.user.get_current()
        username = member.getProperty('fullname') or member.getId()
        user = Resource(graph, LOCAL['user'])
        user.add(RDF['type'], PROV['Agent'])
        user.add(RDF['type'], FOAF['Person'])
        user.add(FOAF['name'], Literal(username))
        user.add(FOAF['mbox'],
                 URIRef('mailto:{}'.format(member.getProperty('email'))))
        # add software as agent
        software = Resource(graph, LOCAL['software'])
        software.add(RDF['type'], PROV['Agent'])
        software.add(RDF['type'], PROV['SoftwareAgent'])
        software.add(FOAF['name'], Literal('BCCVL ALA Importer'))
        # script content is stored somewhere on result and will be exported with zip?
        #   ... or store along with pstats.json ? hidden from user

        # -> execenvironment after import -> log output?
        # -> source code ... maybe some link expression? stored on result ? separate entity?
        activity = Resource(graph, LOCAL['activity'])
        activity.add(RDF['type'], PROV['Activity'])
        # TODO: this is rather queued or created time for this activity ... could capture real start time on running status update (or start transfer)
        now = datetime.now().replace(microsecond=0)
        activity.add(PROV['startedAtTime'],
                     Literal(now.isoformat(), datatype=XSD['dateTime']))
        activity.add(PROV['hasAssociationWith'], user)
        activity.add(PROV['hasAssociationWith'], software)
        # add job parameters to activity

        provdata.data = graph.serialize(format="turtle")
    def _createProvenance(self, result):
        provdata = IProvenanceData(result)
        from rdflib import URIRef, Literal, Namespace, Graph
        from rdflib.namespace import RDF, RDFS, FOAF, DCTERMS, XSD
        from rdflib.resource import Resource
        PROV = Namespace(u"http://www.w3.org/ns/prov#")
        BCCVL = Namespace(u"http://ns.bccvl.org.au/")
        LOCAL = Namespace(u"urn:bccvl:")
        graph = Graph()
        # the user is our agent

        member = api.user.get_current()
        username = member.getProperty('fullname') or member.getId()
        user = Resource(graph, LOCAL['user'])
        user.add(RDF['type'], PROV['Agent'])
        user.add(RDF['type'], FOAF['Person'])
        user.add(FOAF['name'], Literal(username))
        user.add(FOAF['mbox'],
                 URIRef('mailto:{}'.format(member.getProperty('email'))))
        # add software as agent
        software = Resource(graph, LOCAL['software'])
        software.add(RDF['type'], PROV['Agent'])
        software.add(RDF['type'], PROV['SoftwareAgent'])
        software.add(FOAF['name'], Literal('BCCVL Job Script'))
        # script content is stored somewhere on result and will be exported with zip?
        #   ... or store along with pstats.json ? hidden from user

        # -> execenvironment after import -> log output?
        # -> source code ... maybe some link expression? stored on result ? separate entity?
        activity = Resource(graph, LOCAL['activity'])
        activity.add(RDF['type'], PROV['Activity'])
        # TODO: this is rather queued or created time for this activity ... could capture real start time on running status update (or start transfer)
        now = datetime.now().replace(microsecond=0)
        activity.add(PROV['startedAtTime'],
                     Literal(now.isoformat(), datatype=XSD['dateTime']))
        activity.add(PROV['hasAssociationWith'], user)
        activity.add(PROV['hasAssociationWith'], software)
        # add job parameters to activity
        for idx, (key, value) in enumerate(result.job_params.items()):
            param = Resource(graph, LOCAL[u'param_{}'.format(idx)])
            activity.add(BCCVL['algoparam'], param)
            param.add(BCCVL['name'], Literal(key))
            # We have only dataset references as parameters
            if key in ('data_table',):
                param.add(BCCVL['value'], LOCAL[dsuuid])
            else:
                param.add(BCCVL['value'], Literal(value))
        # iterate over all input datasets and add them as entities
        for key in ('data_table',):
            dsbrain = uuidToCatalogBrain(result.job_params[key])
            if not dsbrain:
                continue
            ds = dsbrain.getObject()
            dsprov = Resource(graph, LOCAL[result.job_params[key]])
            dsprov.add(RDF['type'], PROV['Entity'])
            #dsprov.add(PROV['..'], Literal(''))
            dsprov.add(DCTERMS['creator'], Literal(ds.Creator()))
            dsprov.add(DCTERMS['title'], Literal(ds.title))
            dsprov.add(DCTERMS['description'], Literal(ds.description))
            dsprov.add(DCTERMS['rights'], Literal(ds.rights))  # ds.rightsstatement
            dsprov.add(DCTERMS['format'], Literal(ds.file.contentType))
            # location / source
            # graph.add(uri, DCTERMS['source'], Literal(''))
            # TODO: genre ...
            # TODO: resolution
            # species metadata
            md = IBCCVLMetadata(ds)
            # dsprov.add(BCCVL['scientificName'], Literal(md['species']['scientificName']))
            # dsprov.add(BCCVL['taxonID'], URIRef(md['species']['taxonID']))

            # ... species data, ... species id
            for layer in md.get('layers_used',()):
                dsprov.add(BCCVL['layer'], LOCAL[layer])

            # link with activity
            activity.add(PROV['used'], dsprov)

        provdata.data = graph.serialize(format="turtle")
 def convert_agency(self, csv_filename):
     read_agency = self.__open_file(csv_filename)
     for row in read_agency:
         if "agency_id" in row:
             agency = self.get_agency(str.strip(row["agency_id"]))
         else:
             agency = Resource(self.graph, URIRef(row['agency_url']))
             agency.add(RDF.type, self.GTFS.Agency)
         name = Literal(row['agency_name'], datatype=XSD.string)
         agency.add(FOAF.name, name)
         timezone = Literal(row['agency_timezone'], datatype=XSD.string)
         agency.add(self.GTFS.timeZone, timezone)
         if 'agency_lang' in row and str.strip(row["agency_lang"]) != "":
             agency.add(DCTERMS.language, Literal(row['agency_lang'], datatype=XSD.string))
         if 'agency_phone' in row and str.strip(row["agency_phone"]) != "":
             agency.add(FOAF.phone, Literal(row['agency_phone'], datatype=XSD.string))
         if 'agency_fare_url' in row and str.strip(row["agency_fare_url"]) != "":
             agency.add(self.GTFS.fareUrl, URIRef(row['agency_fare_url']))
 def get_fare(self, fare_id):
     fare = Resource(self.graph, URIRef(self.uri + "fare_" + fare_id))
     fare.set(RDF.type, self.GTFS.FareClass)
     fare.set(DCTERMS.identifier, Literal(fare_id, datatype=XSD.string))
     return fare
 def get_shape(self, shape_id):
     shape = Resource(self.graph, URIRef(self.uri + "shape_" + shape_id))
     shape.set(RDF.type, self.GTFS.Shape)
     shape.set(DCTERMS.identifier, Literal(shape_id, datatype=XSD.string))
     return shape
 def get_service(self, service_id):
     service = Resource(self.graph, URIRef(self.uri + "service_" + service_id))
     service.set(RDF.type, self.GTFS.Service)
     service.set(DCTERMS.identifier, Literal(service_id, datatype=XSD.string))
     return service
 def get_agency(self, agency_id):
     agency = Resource(self.graph, URIRef(self.uri + "agency_" + agency_id))
     agency.add(RDF.type, self.GTFS.Agency)
     agency.set(DCTERMS.identifier, Literal(agency_id, datatype=XSD.string))
     return agency
def generate_rdf(data):
    DCO = Namespace("http://info.deepcarbon.net/schema#")
    VIVO = Namespace("http://vivoweb.org/ontology/core#")
    SKOS = Namespace("http://www.w3.org/2004/02/skos/core#")
    BIBO = Namespace("http://purl.org/ontology/bibo/")

    g = Graph()
    g.bind("dco", DCO)
    g.bind("bibo", BIBO)
    g.bind("vivo", VIVO)

    for (uri, record) in data:
        try:
            publication = Publication(record)
            pub = Resource(g, URIRef(uri))

            if publication.issue is not None:
                pub.add(BIBO.issue, Literal(publication.issue))

            if publication.issued is not None and publication.issued["year"] is not None:
                pub.add(DCO.yearOfPublicationYear, Literal(publication.issued["year"], datatype=XSD.gYear))

            if publication.volume is not None:
                pub.add(BIBO.volume, Literal(publication.volume))

            if publication.issn is not None:
                pub.add(BIBO.issn, Literal(publication.issn))

            if publication.pages is not None:
                if "-" in publication.pages:
                    pageStart = publication.pages[:publication.pages.find("-")]
                    pageEnd = publication.pages[publication.pages.find("-") + 1:]
                    if pageStart != "n/a":
                        pub.add(BIBO.pageStart, Literal(pageStart))
                    if pageEnd != "n/a":
                        pub.add(BIBO.pageEnd, Literal(pageEnd))
                else:
                    #pub.add(BIBO.pages, Literal(publication.pages))
                    pass

        except ValueError as err:
            #print((uri,str(err)))
            pass

    with open("pub-info.ttl", "w") as out:
        out.write(g.serialize(format="turtle", encoding="UTF-8").decode(encoding="UTF-8"))
Beispiel #24
0
    def __get_query_capability(cls, item_type_name: str,
                               item_type_name_url: str, uri: str) -> Resource:

        qc = Resource(cls.__graph, BNode())
        qc.add(RDF.type, OSLC.QueryCapability)
        qc.add(DCTERMS.title,
               Literal(f'Query Capability for ItemType: {item_type_name}'))
        qc.add(OSLC.queryBase, URIRef(uri))
        qc.add(OSLC.resourceType, URIRef(ARAS.term(item_type_name_url)))
        qc.add(OSLC.resourceShape, URIRef(uri + '/resourceShape'))

        return qc
Beispiel #25
0
 def to_rdf(self):
     uri = self._uri()   
     g = Graph()
     jr = Resource(g, uri)
     jr.set(RDF.type, BIBO.Journal)
     jr.set(RDFS.label, Literal(self.title))
     jr.set(LOCAL.identifier, Literal(self.wosid))
     if self.issn is not None:
         jr.set(BIBO.issn, Literal(self.issn))
     if self.eissn is not None:
         jr.set(BIBO.eissn, Literal(self.eissn))
     if self.wikidata is not None:
         jr.set(OWL.sameAs, URIRef(self.wikidata))
     return g
Beispiel #26
0
    def get_components(cls,
                       item_type: str,
                       url: str,
                       paging: bool = False,
                       page_size: int = 0,
                       page_no: int = 0):
        if not cls.__instance:
            cls()

        cls.__init_graph()

        if not cls.__item_types:
            cls.__item_types = cls.__get_item_types()

        if re.sub('\\.', ' ', item_type) in cls.__item_types.values():
            item_type = unquote(item_type)
            item_type = re.sub(' ', '.', item_type)
            url = unquote(url)
            url = re.sub(' ', '.', url)

            container = Resource(cls.__graph, URIRef(url))
            container.add(RDF.type, LDP.BasicContainer)

            config_ids = load_items(cls.__source_base_uri, item_type,
                                    page_size, page_no)

            ri, config_ids = cls.__get_paging(item_type, config_ids, url,
                                              paging, page_size, page_no)
            if ri:
                container.add(OSLC.responseInfo, ri)

            for config_id in config_ids:
                member_url = url + f'/{config_id}'
                member = Resource(cls.__graph, URIRef(member_url))
                member.add(RDF.type, OSLC_CONFIG.Component)
                member.add(DCTERMS.title,
                           Literal(config_ids[config_id]['keyed_name']))

                container.add(LDP.contains, member)

            return container

        else:
            return False
 def get_trip(self, trip_id):
     trip = Resource(self.graph, URIRef(self.uri + "trip_" + trip_id))
     trip.set(RDF.type, self.GTFS.Trip)
     trip.set(DCTERMS.identifier, Literal(trip_id, datatype=XSD.string))
     return trip
    ?structure dbpedia-owl:architect ?architect .
    ?architect rdfs:label ?architect_name .
    bind (str(?architect_name) as ?stripped_architect_name)
}
bind (str(?structure_name) as ?stripped_structure_name)
}

LIMIT 10000

""")


sparql.setReturnFormat(RDF)
results = sparql.query().convert()
#print results.serialize()

structureList = []

for stmt in results.subjects(URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#type"), URIRef("http://dbpedia.org/ontology/Building")):

    resource = Resource(results, stmt)
    print resource.value(RDFS.label)
    print "    " + str(resource.value(URIRef("http://www.w3.org/2003/01/geo/wgs84_pos#long")))
    print "    " + str(resource.value(URIRef("http://www.w3.org/2003/01/geo/wgs84_pos#lat")))
    for architect in resource[URIRef("http://dbpedia.org/ontology/architect")]:
        print "    " + str(architect.value(RDFS.label).encode('ascii', 'ignore'))
    
    
    
    
Beispiel #29
0
    def __iter__(self):
        """missing docstring."""
        for item in self.previous:
            # check if we have a dataset

            if item['_type'] not in ('org.bccvl.content.dataset',
                                     'org.bccvl.content.remotedataset'):
                # not a dataset
                yield item
                continue

            pathkey = self.pathkey(*item.keys())[0]
            # no path .. can't do anything
            if not pathkey:
                yield item
                continue

            path = item[pathkey]
            # Skip the Plone site object itself
            if not path:
                yield item
                continue

            obj = self.context.unrestrictedTraverse(
                path.encode().lstrip('/'), None)

            # FIXME: this is really not a great way to check where to find provenenace data
            # check if we are inside an experiment (means we import result)
            if IExperiment.providedBy(self.context.__parent__):
                # result import
                context = self.context
            else:
                # dataset import?
                context = obj

            # TODO: do some sanity checks
            provdata = IProvenanceData(context)
            PROV = Namespace(u"http://www.w3.org/ns/prov#")
            BCCVL = Namespace(u"http://ns.bccvl.org.au/")
            LOCAL = Namespace(u"urn:bccvl:")
            graph = Graph()
            graph.parse(data=provdata.data or '', format='turtle')
            activity = Resource(graph, LOCAL['activity'])
            # FIXME: shouldn't I use uuid instead of id?
            entity = Resource(graph, LOCAL[obj.id])
            # create this dataset as new entity -> output of activity
            entity.add(RDF['type'], PROV['Entity'])
            # generated by
            entity.add(PROV['wasGeneratedBy'], activity)
            # PROV['prov:wasAttributedTo'] to user and software?
            # File metadata
            entity.add(DCTERMS['creator'], Literal(obj.Creator()))
            entity.add(DCTERMS['title'], Literal(obj.title))
            entity.add(DCTERMS['description'], Literal(obj.description))
            entity.add(DCTERMS['rights'], Literal(obj.rights))
            if obj.portal_type == 'org.bccvl.content.dataset':
                entity.add(DCTERMS['format'], Literal(obj.file.contentType))
            else:
                # FIXME: this doesn't seem to do the right thing
                entity.add(DCTERMS['format'], Literal(obj.format))
            # TODO: add metadata about file?
            #    genre, layers, emsc, gcm, year

            # set activities end time
            #   first one wins
            if activity.value(PROV['endedAtTime']) is None:
                activity.add(PROV['endedAtTime'],
                             Literal(datetime.now().replace(microsecond=0).isoformat(), datatype=XSD['dateTime']))

            # TODO: extend activity metadata with execution environment data
            #       (logfile import?, pstats import) .. and script + params.json file
            # ALA import url
            pd = item.get('_ala_provenance', {})
            if pd:
                entity.add(BCCVL['download_url'], Literal(pd['url']))

            # store prov data
            provdata.data = graph.serialize(format="turtle")

            yield item
def generate_rdf(data):
    DCO = Namespace("http://info.deepcarbon.net/schema#")
    VIVO = Namespace("http://vivoweb.org/ontology/core#")
    SKOS = Namespace("http://www.w3.org/2004/02/skos/core#")
    BIBO = Namespace("http://purl.org/ontology/bibo/")

    g = Graph()
    g.bind("dco", DCO)
    g.bind("bibo", BIBO)
    g.bind("vivo", VIVO)

    for (uri, record) in data:
        try:
            publication = Publication(record)
            pub = Resource(g, URIRef(uri))

            if publication.issue is not None:
                pub.add(BIBO.issue, Literal(publication.issue))

            if publication.issued is not None and publication.issued[
                    "year"] is not None:
                pub.add(
                    DCO.yearOfPublicationYear,
                    Literal(publication.issued["year"], datatype=XSD.gYear))

            if publication.volume is not None:
                pub.add(BIBO.volume, Literal(publication.volume))

            if publication.issn is not None:
                pub.add(BIBO.issn, Literal(publication.issn))

            if publication.pages is not None:
                if "-" in publication.pages:
                    pageStart = publication.pages[:publication.pages.find("-")]
                    pageEnd = publication.pages[publication.pages.find("-") +
                                                1:]
                    if pageStart != "n/a":
                        pub.add(BIBO.pageStart, Literal(pageStart))
                    if pageEnd != "n/a":
                        pub.add(BIBO.pageEnd, Literal(pageEnd))
                else:
                    #pub.add(BIBO.pages, Literal(publication.pages))
                    pass

        except ValueError as err:
            #print((uri,str(err)))
            pass

    with open("pub-info.ttl", "w") as out:
        out.write(
            g.serialize(format="turtle",
                        encoding="UTF-8").decode(encoding="UTF-8"))
    def process(self, input, output):
	   
        username = "******"
        password = "******"
        lmClient = LifemapperClient(username, password)
        
        # Get file download URL of the TIFF layer
        manif = input.value(DATA.hasManifestation)
        tiffFileDownloadURL = manif.value(DATA.hasFileDownloadURL)
		

        # Get the typeCode and layerUnits
        typeCode = input.value(LM.hasTypeCode)
        layerUnits = input.value(LM.hasLayerUnits)
		

        # Get the JSON spec
        jsonSpecification = input.value(LM.hasJSONExperimentSpecificationURL)

        lmClient.postLayer(tiffFileDownloadURL, layerUnits, typeCode)
        #layerID = str(12)
        
        layerID = lmClient.getLayerID()
        layerURL = lmClient.getLayerURL()
        
        print "posted layer ID %s " % layerID

        # The URI generator
        resourceURI = ResourceURI()
        
        # Create new manifestation
        fileManifestation = RDFLibResource(output.graph, resourceURI.getURI("manifestation"))
        FileManifestationClass = RDFLibResource(output.graph, DATA.FileManifestation)
        fileManifestation.set(RDF.type, FileManifestationClass)
        fileManifestation.set(DATA.hasLandingPageURL, Literal(layerURL))
        
        # Create the ScenarioLayer
        layer = RDFLibResource(output.graph, resourceURI.getURI("layer"))
        ScenarioLayerClass = RDFLibResource(output.graph, LM.ScenarioLayer)
        layer.set(RDF.type, ScenarioLayerClass)
        layer.set(DATA.hasManifestation, fileManifestation)
        layer.set(LM.hasLayerID, Literal(layerID))
        layer.set(LM.hasLayerURL, Literal(layerURL))
        layer.set(LM.hasJSONExperimentSpecificationURL, jsonSpecification)
        
        output.set(LM.hasPublishedLayer, layer)
Beispiel #32
0
 def authorships(self):
     g = Graph()
     aus = self.authors()
     for au in aus:
         aship_uri = self.aship_uri(au['rank'])
         r = Resource(g, aship_uri)
         r.set(RDFS.label, Literal(au["display_name"]))
         r.set(RDF.type, VIVO.Authorship)
         r.set(VIVO.rank, Literal(au['rank']))
         data_props = [
             ('rank', VIVO.rank),
             ('full_name', WOS.fullName),
             ('display_name', WOS.displayName),
             ('wos_standard', WOS.standardName),
             ('first', WOS.firstName),
             ('last', WOS.lastName),
             ('email', WOS.email),
             ('dais_ng', WOS.daisNg),
             ('reprint', WOS.reprint),
         ]
         for key, prop in data_props:
             value = au.get(key)
             if value is not None:
                 r.set(prop, Literal(value))
         # relations
         r.add(VIVO.relates, self.uri)
         # relate to addresses too
         # address nums are a space separated list of numbers
         addr_nums = au["address"]
         if addr_nums is None:
             continue
         else:
             for anum in addr_nums.split():
                 addr_uris = self.addr_uris_from_number(anum)
                 for auri in addr_uris:
                     r.add(VIVO.relates, auri)
     return g
Beispiel #33
0
class Ldpr(metaclass=ABCMeta):
    '''LDPR (LDP Resource).

    Definition: https://www.w3.org/TR/ldp/#ldpr-resource

    This class and related subclasses contain the implementation pieces of
    the vanilla LDP specifications. This is extended by the
    `lakesuperior.fcrepo.Resource` class.

    Inheritance graph: https://www.w3.org/TR/ldp/#fig-ldpc-types

    Note: Even though LdpNr (which is a subclass of Ldpr) handles binary files,
    it still has an RDF representation in the triplestore. Hence, some of the
    RDF-related methods are defined in this class rather than in the LdpRs
    class.

    Convention notes:

    All the methods in this class handle internal UUIDs (URN). Public-facing
    URIs are converted from URNs and passed by these methods to the methods
    handling HTTP negotiation.

    The data passed to the store layout for processing should be in a graph.
    All conversion from request payload strings is done here.
    '''

    EMBED_CHILD_RES_URI = nsc['fcrepo'].EmbedResources
    FCREPO_PTREE_TYPE = nsc['fcrepo'].Pairtree
    INS_CNT_REL_URI = nsc['ldp'].insertedContentRelation
    MBR_RSRC_URI = nsc['ldp'].membershipResource
    MBR_REL_URI = nsc['ldp'].hasMemberRelation
    RETURN_CHILD_RES_URI = nsc['fcrepo'].Children
    RETURN_INBOUND_REF_URI = nsc['fcrepo'].InboundReferences
    RETURN_SRV_MGD_RES_URI = nsc['fcrepo'].ServerManaged

    # Workflow type. Inbound means that the resource is being written to the
    # store, outbounnd is being retrieved for output.
    WRKF_INBOUND = '_workflow:inbound_'
    WRKF_OUTBOUND = '_workflow:outbound_'

    # Default user to be used for the `createdBy` and `lastUpdatedBy` if a user
    # is not provided.
    DEFAULT_USER = Literal('BypassAdmin')

    # RDF Types that populate a new resource.
    base_types = {
        nsc['fcrepo'].Resource,
        nsc['ldp'].Resource,
        nsc['ldp'].RDFSource,
    }

    # Predicates that do not get removed when a resource is replaced.
    protected_pred = (
        nsc['fcrepo'].created,
        nsc['fcrepo'].createdBy,
        nsc['ldp'].contains,
    )

    # Server-managed RDF types ignored in the RDF payload if the resource is
    # being created. N.B. These still raise an error if the resource exists.
    smt_allow_on_create = {
        nsc['ldp'].DirectContainer,
        nsc['ldp'].IndirectContainer,
    }

    ## MAGIC METHODS ##

    def __init__(self, uid, repr_opts={}, provided_imr=None, **kwargs):
        '''Instantiate an in-memory LDP resource that can be loaded from and
        persisted to storage.

        @param uid (string) uid of the resource. If None (must be explicitly
        set) it refers to the root node. It can also be the full URI or URN,
        in which case it will be converted.
        @param repr_opts (dict) Options used to retrieve the IMR. See
        `parse_rfc7240` for format details.
        @Param provd_rdf (string) RDF data provided by the client in
        operations such as `PUT` or `POST`, serialized as a string. This sets
        the `provided_imr` property.
        '''
        self.uid = (rdfly.uri_to_uid(uid) if isinstance(uid, URIRef) else uid)
        self.uri = nsc['fcres'][uid]
        # @FIXME Not ideal, should separate app-context dependent functions in
        # a different toolbox.
        self.tbox = Toolbox()

        self.provided_imr = provided_imr

    @property
    def rsrc(self):
        '''
        The RDFLib resource representing this LDPR. This is a live
        representation of the stored data if present.

        @return rdflib.resource.Resource
        '''
        if not hasattr(self, '_rsrc'):
            self._rsrc = rdfly.ds.resource(self.uri)

        return self._rsrc

    @property
    def imr(self):
        '''
        Extract an in-memory resource from the graph store.

        If the resource is not stored (yet), a `ResourceNotExistsError` is
        raised.

        @return rdflib.resource.Resource
        '''
        if not hasattr(self, '_imr'):
            if hasattr(self, '_imr_options'):
                logger.debug(
                    'Getting RDF representation for resource {}'.format(
                        self.uid))
                #logger.debug('IMR options:{}'.format(self._imr_options))
                imr_options = self._imr_options
            else:
                imr_options = {}
            options = dict(imr_options, strict=True)
            self._imr = rdfly.extract_imr(self.uid, **options)

        return self._imr

    @imr.setter
    def imr(self, v):
        '''
        Replace in-memory buffered resource.

        @param v (set | rdflib.Graph) New set of triples to populate the IMR
        with.
        '''
        if isinstance(v, Resource):
            v = v.graph
        self._imr = Resource(Graph(), self.uri)
        gr = self._imr.graph
        gr += v

    @imr.deleter
    def imr(self):
        '''
        Delete in-memory buffered resource.
        '''
        delattr(self, '_imr')

    @property
    def metadata(self):
        '''
        Get resource metadata.
        '''
        if not hasattr(self, '_metadata'):
            if hasattr(self, '_imr'):
                logger.info('Metadata is IMR.')
                self._metadata = self._imr
            else:
                logger.info('Getting metadata for resource {}'.format(
                    self.uid))
                self._metadata = rdfly.get_metadata(self.uid)

        return self._metadata

    @metadata.setter
    def metadata(self, rsrc):
        '''
        Set resource metadata.
        '''
        if not isinstance(rsrc, Resource):
            raise TypeError('Provided metadata is not a Resource object.')
        self._metadata = rsrc

    @property
    def stored_or_new_imr(self):
        '''
        Extract an in-memory resource for harmless manipulation and output.

        If the resource is not stored (yet), initialize a new IMR with basic
        triples.

        @return rdflib.resource.Resource
        '''
        if not hasattr(self, '_imr'):
            if hasattr(self, '_imr_options'):
                #logger.debug('IMR options:{}'.format(self._imr_options))
                imr_options = self._imr_options
            else:
                imr_options = {}
            options = dict(imr_options, strict=True)
            try:
                self._imr = rdfly.extract_imr(self.uid, **options)
            except ResourceNotExistsError:
                self._imr = Resource(Graph(), self.uri)
                for t in self.base_types:
                    self.imr.add(RDF.type, t)

        return self._imr

    @property
    def out_graph(self):
        '''
        Retun a graph of the resource's IMR formatted for output.
        '''
        out_gr = Graph(identifier=self.uri)

        for t in self.imr.graph:
            if (
                    # Exclude digest hash and version information.
                    t[1] not in {
                        nsc['premis'].hasMessageDigest,
                        nsc['fcrepo'].hasVersion,
                    }) and (
                        # Only include server managed triples if requested.
                        self._imr_options.get('incl_srv_mgd', True)
                        or not self._is_trp_managed(t)):
                out_gr.add(t)

        return out_gr

    @property
    def version_info(self):
        '''
        Return version metadata (`fcr:versions`).
        '''
        if not hasattr(self, '_version_info'):
            try:
                #@ TODO get_version_info should return a graph.
                self._version_info = rdfly.get_version_info(self.uid).graph
            except ResourceNotExistsError as e:
                self._version_info = Graph(identifier=self.uri)

        return self._version_info

    @property
    def version_uids(self):
        '''
        Return a generator of version UIDs (relative to their parent resource).
        '''
        gen = self.version_info[self.uri:nsc['fcrepo'].hasVersion /
                                nsc['fcrepo'].hasVersionLabel:]

        return {str(uid) for uid in gen}

    @property
    def is_stored(self):
        if not hasattr(self, '_is_stored'):
            if hasattr(self, '_imr'):
                self._is_stored = len(self.imr.graph) > 0
            else:
                self._is_stored = rdfly.ask_rsrc_exists(self.uid)

        return self._is_stored

    @property
    def types(self):
        '''All RDF types.

        @return set(rdflib.term.URIRef)
        '''
        if not hasattr(self, '_types'):
            if len(self.metadata.graph):
                metadata = self.metadata
            elif getattr(self, 'provided_imr', None) and \
                    len(self.provided_imr.graph):
                metadata = self.provided_imr
            else:
                return set()

            self._types = set(metadata.graph[self.uri:RDF.type])

        return self._types

    @property
    def ldp_types(self):
        '''The LDP types.

        @return set(rdflib.term.URIRef)
        '''
        if not hasattr(self, '_ldp_types'):
            self._ldp_types = {t for t in self.types if nsc['ldp'] in t}

        return self._ldp_types

    ## LDP METHODS ##

    def head(self):
        '''
        Return values for the headers.
        '''
        out_headers = defaultdict(list)

        digest = self.metadata.value(nsc['premis'].hasMessageDigest)
        if digest:
            etag = digest.identifier.split(':')[-1]
            out_headers['ETag'] = 'W/"{}"'.format(etag),

        last_updated_term = self.metadata.value(nsc['fcrepo'].lastModified)
        if last_updated_term:
            out_headers['Last-Modified'] = arrow.get(last_updated_term)\
                .format('ddd, D MMM YYYY HH:mm:ss Z')

        for t in self.ldp_types:
            out_headers['Link'].append('{};rel="type"'.format(t.n3()))

        return out_headers

    def get_version(self, ver_uid, **kwargs):
        '''
        Get a version by label.
        '''
        return rdfly.extract_imr(self.uid, ver_uid, **kwargs).graph

    def create_or_replace(self, create_only=False):
        '''
        Create or update a resource. PUT and POST methods, which are almost
        identical, are wrappers for this method.

        @param create_only (boolean) Whether this is a create-only operation.
        '''
        pdb.set_trace()
        create = create_only or not self.is_stored
        ev_type = RES_CREATED if create else RES_UPDATED

        self._add_srv_mgd_triples(create)
        ref_int = rdfly.config['referential_integrity']
        if ref_int:
            self._check_ref_int(ref_int)

        # Delete existing triples if replacing.
        if not create:
            rdfly.truncate_rsrc(self.uid)

        remove_trp = {
            (self.uri, nsc['fcrepo'].lastModified, None),
            (self.uri, nsc['fcrepo'].lastModifiedBy, None),
        }
        add_trp = set(self.provided_imr.graph) | self._containment_rel(create)

        self._modify_rsrc(ev_type, remove_trp, add_trp)
        new_gr = Graph()
        for trp in add_trp:
            new_gr.add(trp)

        self.imr = new_gr.resource(self.uri)

        return ev_type

    def put(self):
        '''
        https://www.w3.org/TR/ldp/#ldpr-HTTP_PUT
        '''
        return self.create_or_replace()

    def patch(self, update_str):
        '''
        Update an existing resource by applying a SPARQL-UPDATE query.

        @param update_str (string) SPARQL-Update staements.
        '''
        self.handling = 'lenient'  # FCREPO does that and Hyrax requires it.

        return self._sparql_update(update_str)

    def bury_rsrc(self, inbound, tstone_pointer=None):
        '''
        Delete a single resource and create a tombstone.

        @param inbound (boolean) Whether to delete the inbound relationships.
        @param tstone_pointer (URIRef) If set to a URN, this creates a pointer
        to the tombstone of the resource that used to contain the deleted
        resource. Otherwise the deleted resource becomes a tombstone.
        '''
        logger.info('Burying resource {}'.format(self.uid))
        # Create a backup snapshot for resurrection purposes.
        self.create_rsrc_snapshot(uuid4())

        remove_trp = {
            trp
            for trp in self.imr.graph if trp[1] != nsc['fcrepo'].hasVersion
        }

        if tstone_pointer:
            add_trp = {(self.uri, nsc['fcsystem'].tombstone, tstone_pointer)}
        else:
            add_trp = {
                (self.uri, RDF.type, nsc['fcsystem'].Tombstone),
                (self.uri, nsc['fcrepo'].created, env.timestamp_term),
            }

        self._modify_rsrc(RES_DELETED, remove_trp, add_trp)

        if inbound:
            for ib_rsrc_uri in self.imr.graph.subjects(None, self.uri):
                remove_trp = {(ib_rsrc_uri, None, self.uri)}
                ib_rsrc = Ldpr(ib_rsrc_uri)
                # To preserve inbound links in history, create a snapshot
                ib_rsrc.create_rsrc_snapshot(uuid4())
                ib_rsrc._modify_rsrc(RES_UPDATED, remove_trp)

        return RES_DELETED

    def forget_rsrc(self, inbound=True):
        '''
        Remove all traces of a resource and versions.
        '''
        logger.info('Purging resource {}'.format(self.uid))
        refint = env.config['store']['ldp_rs']['referential_integrity']
        inbound = True if refint else inbound
        rdfly.forget_rsrc(self.uid, inbound)

        # @TODO This could be a different event type.
        return RES_DELETED

    def create_rsrc_snapshot(self, ver_uid):
        '''
        Perform version creation and return the version UID.
        '''
        # Create version resource from copying the current state.
        logger.info('Creating version snapshot {} for resource {}.'.format(
            ver_uid, self.uid))
        ver_add_gr = set()
        vers_uid = '{}/{}'.format(self.uid, VERS_CONT_LABEL)
        ver_uid = '{}/{}'.format(vers_uid, ver_uid)
        ver_uri = nsc['fcres'][ver_uid]
        ver_add_gr.add((ver_uri, RDF.type, nsc['fcrepo'].Version))
        for t in self.imr.graph:
            if (t[1] == RDF.type and t[2] in {
                    nsc['fcrepo'].Binary,
                    nsc['fcrepo'].Container,
                    nsc['fcrepo'].Resource,
            }) or (t[1] in {
                    nsc['fcrepo'].hasParent,
                    nsc['fcrepo'].hasVersions,
                    nsc['fcrepo'].hasVersion,
                    nsc['premis'].hasMessageDigest,
            }):
                pass
            else:
                ver_add_gr.add(
                    (self.tbox.replace_term_domain(t[0], self.uri,
                                                   ver_uri), t[1], t[2]))

        rdfly.modify_rsrc(ver_uid, add_trp=ver_add_gr)

        # Update resource admin data.
        rsrc_add_gr = {
            (self.uri, nsc['fcrepo'].hasVersion, ver_uri),
            (self.uri, nsc['fcrepo'].hasVersions, nsc['fcres'][vers_uid]),
        }
        self._modify_rsrc(RES_UPDATED, add_trp=rsrc_add_gr, notify=False)

        return ver_uid

    def resurrect_rsrc(self):
        '''
        Resurrect a resource from a tombstone.

        @EXPERIMENTAL
        '''
        tstone_trp = set(rdfly.extract_imr(self.uid, strict=False).graph)

        ver_rsp = self.version_info.graph.query('''
        SELECT ?uid {
          ?latest fcrepo:hasVersionLabel ?uid ;
            fcrepo:created ?ts .
        }
        ORDER BY DESC(?ts)
        LIMIT 1
        ''')
        ver_uid = str(ver_rsp.bindings[0]['uid'])
        ver_trp = set(rdfly.get_metadata(self.uid, ver_uid).graph)

        laz_gr = Graph()
        for t in ver_trp:
            if t[1] != RDF.type or t[2] not in {
                    nsc['fcrepo'].Version,
            }:
                laz_gr.add((self.uri, t[1], t[2]))
        laz_gr.add((self.uri, RDF.type, nsc['fcrepo'].Resource))
        if nsc['ldp'].NonRdfSource in laz_gr[:RDF.type:]:
            laz_gr.add((self.uri, RDF.type, nsc['fcrepo'].Binary))
        elif nsc['ldp'].Container in laz_gr[:RDF.type:]:
            laz_gr.add((self.uri, RDF.type, nsc['fcrepo'].Container))

        laz_set = set(laz_gr) | self._containment_rel()
        self._modify_rsrc(RES_CREATED, tstone_trp, laz_set)

        return self.uri

    def create_version(self, ver_uid=None):
        '''
        Create a new version of the resource.

        NOTE: This creates an event only for the resource being updated (due
        to the added `hasVersion` triple and possibly to the `hasVersions` one)
        but not for the version being created.

        @param ver_uid Version ver_uid. If already existing, an exception is
        raised.
        '''
        if not ver_uid or ver_uid in self.version_uids:
            ver_uid = str(uuid4())

        return self.create_rsrc_snapshot(ver_uid)

    def revert_to_version(self, ver_uid, backup=True):
        '''
        Revert to a previous version.

        @param ver_uid (string) Version UID.
        @param backup (boolean) Whether to create a backup snapshot. Default is
        true.
        '''
        # Create a backup snapshot.
        if backup:
            self.create_version()

        ver_gr = rdfly.extract_imr(self.uid,
                                   ver_uid=ver_uid,
                                   incl_children=False)
        self.provided_imr = Resource(Graph(), self.uri)

        for t in ver_gr.graph:
            if not self._is_trp_managed(t):
                self.provided_imr.add(t[1], t[2])
            # @TODO Check individual objects: if they are repo-managed URIs
            # and not existing or tombstones, they are not added.

        return self.create_or_replace(create_only=False)

    ## PROTECTED METHODS ##

    def _is_trp_managed(self, t):
        '''
        Whether a triple is server-managed.

        @return boolean
        '''
        return t[1] in srv_mgd_predicates or (t[1] == RDF.type
                                              and t[2] in srv_mgd_types)

    def _modify_rsrc(self,
                     ev_type,
                     remove_trp=set(),
                     add_trp=set(),
                     notify=True):
        '''
        Low-level method to modify a graph for a single resource.

        This is a crucial point for messaging. Any write operation on the RDF
        store that needs to be notified should be performed by invoking this
        method.

        @param ev_type (string) The type of event (create, update, delete).
        @param remove_trp (set) Triples to be removed.
        @param add_trp (set) Triples to be added.
        @param notify (boolean) Whether to send a message about the change.
        '''
        rdfly.modify_rsrc(self.uid, remove_trp, add_trp)

        if notify and env.config['application'].get('messaging'):
            logger.debug('Enqueuing message for {}'.format(self.uid))
            self._enqueue_msg(ev_type, remove_trp, add_trp)

    def _enqueue_msg(self, ev_type, remove_trp=None, add_trp=None):
        '''
        Compose a message about a resource change.

        The message is enqueued for asynchronous processing.

        @param ev_type (string) The event type. See global constants.
        @param remove_trp (set) Triples removed. Only used if the 
        '''
        try:
            rsrc_type = tuple(str(t) for t in self.types)
            actor = self.metadata.value(nsc['fcrepo'].createdBy)
        except (ResourceNotExistsError, TombstoneError):
            rsrc_type = ()
            actor = None
            for t in add_trp:
                if t[1] == RDF.type:
                    rsrc_type.add(t[2])
                elif actor is None and t[1] == nsc['fcrepo'].createdBy:
                    actor = t[2]

        env.app_globals.changelog.append((set(remove_trp), set(add_trp), {
            'ev_type': ev_type,
            'timestamp': env.timestamp.format(),
            'rsrc_type': rsrc_type,
            'actor': actor,
        }))

    def _check_ref_int(self, config):
        gr = self.provided_imr.graph

        for o in gr.objects():
            if isinstance(o, URIRef) and str(o).startswith(nsc['fcres']):
                obj_uid = rdfly.uri_to_uid(o)
                if not rdfly.ask_rsrc_exists(obj_uid):
                    if config == 'strict':
                        raise RefIntViolationError(obj_uid)
                    else:
                        logger.info(
                            'Removing link to non-existent repo resource: {}'.
                            format(obj_uid))
                        gr.remove((None, None, o))

    def _check_mgd_terms(self, gr):
        '''
        Check whether server-managed terms are in a RDF payload.

        @param gr (rdflib.Graph) The graph to validate.
        '''
        offending_subjects = set(gr.subjects()) & srv_mgd_subjects
        if offending_subjects:
            if self.handling == 'strict':
                raise ServerManagedTermError(offending_subjects, 's')
            else:
                for s in offending_subjects:
                    logger.info('Removing offending subj: {}'.format(s))
                    gr.remove((s, None, None))

        offending_predicates = set(gr.predicates()) & srv_mgd_predicates
        # Allow some predicates if the resource is being created.
        if offending_predicates:
            if self.handling == 'strict':
                raise ServerManagedTermError(offending_predicates, 'p')
            else:
                for p in offending_predicates:
                    logger.info('Removing offending pred: {}'.format(p))
                    gr.remove((None, p, None))

        offending_types = set(gr.objects(predicate=RDF.type)) & srv_mgd_types
        if not self.is_stored:
            offending_types -= self.smt_allow_on_create
        if offending_types:
            if self.handling == 'strict':
                raise ServerManagedTermError(offending_types, 't')
            else:
                for t in offending_types:
                    logger.info('Removing offending type: {}'.format(t))
                    gr.remove((None, RDF.type, t))

        #logger.debug('Sanitized graph: {}'.format(gr.serialize(
        #    format='turtle').decode('utf-8')))
        return gr

    def _add_srv_mgd_triples(self, create=False):
        '''
        Add server-managed triples to a provided IMR.

        @param create (boolean) Whether the resource is being created.
        '''
        # Base LDP types.
        for t in self.base_types:
            self.provided_imr.add(RDF.type, t)

        # Message digest.
        cksum = self.tbox.rdf_cksum(self.provided_imr.graph)
        self.provided_imr.set(nsc['premis'].hasMessageDigest,
                              URIRef('urn:sha1:{}'.format(cksum)))

        # Create and modify timestamp.
        if create:
            self.provided_imr.set(nsc['fcrepo'].created, env.timestamp_term)
            self.provided_imr.set(nsc['fcrepo'].createdBy, self.DEFAULT_USER)
        else:
            self.provided_imr.set(nsc['fcrepo'].created,
                                  self.metadata.value(nsc['fcrepo'].created))
            self.provided_imr.set(nsc['fcrepo'].createdBy,
                                  self.metadata.value(nsc['fcrepo'].createdBy))

        self.provided_imr.set(nsc['fcrepo'].lastModified, env.timestamp_term)
        self.provided_imr.set(nsc['fcrepo'].lastModifiedBy, self.DEFAULT_USER)

    def _containment_rel(self, create):
        '''Find the closest parent in the path indicated by the uid and
        establish a containment triple.

        Check the path-wise parent of the new resource. If it exists, add the
        containment relationship with this UID. Otherwise, create a container
        resource as the parent.
        This function may recurse up the path tree until an existing container
        is found.

        E.g. if only fcres:/a exists:
        - If fcres:/a/b/c/d is being created, a becomes container of
          fcres:/a/b/c/d. Also, containers are created for fcres:a/b and
          fcres:/a/b/c.
        - If fcres:/e is being created, the root node becomes container of
          fcres:/e.

        @param create (bool) Whether the resource is being created. If false,
        the parent container is not updated.
        '''
        from lakesuperior.model.ldp_factory import LdpFactory

        if '/' in self.uid.lstrip('/'):
            # Traverse up the hierarchy to find the parent.
            path_components = self.uid.lstrip('/').split('/')
            cnd_parent_uid = '/' + '/'.join(path_components[:-1])
            if rdfly.ask_rsrc_exists(cnd_parent_uid):
                parent_rsrc = LdpFactory.from_stored(cnd_parent_uid)
                if nsc['ldp'].Container not in parent_rsrc.types:
                    raise InvalidResourceError(
                        cnd_parent_uid, 'Parent {} is not a container.')

                parent_uid = cnd_parent_uid
            else:
                parent_rsrc = LdpFactory.new_container(cnd_parent_uid)
                # This will trigger this method again and recurse until an
                # existing container or the root node is reached.
                parent_rsrc.create_or_replace()
                parent_uid = parent_rsrc.uid
        else:
            parent_uid = ROOT_UID

        parent_rsrc = LdpFactory.from_stored(
            parent_uid, repr_opts={'incl_children': False}, handling='none')

        # Only update parent if the resource is new.
        if create:
            add_gr = Graph()
            add_gr.add(
                (nsc['fcres'][parent_uid], nsc['ldp'].contains, self.uri))
            parent_rsrc._modify_rsrc(RES_UPDATED, add_trp=add_gr)

        # Direct or indirect container relationship.
        return self._add_ldp_dc_ic_rel(parent_rsrc)

    def _dedup_deltas(self, remove_gr, add_gr):
        '''
        Remove duplicate triples from add and remove delta graphs, which would
        otherwise contain unnecessary statements that annul each other.

        @return tuple 2 "clean" sets of respectively remove statements and
        add statements.
        '''
        return (remove_gr - add_gr, add_gr - remove_gr)

    def _add_ldp_dc_ic_rel(self, cont_rsrc):
        '''
        Add relationship triples from a parent direct or indirect container.

        @param cont_rsrc (rdflib.resource.Resouce)  The container resource.
        '''
        cont_p = set(cont_rsrc.metadata.graph.predicates())

        logger.info('Checking direct or indirect containment.')
        logger.debug('Parent predicates: {}'.format(cont_p))

        add_trp = {(self.uri, nsc['fcrepo'].hasParent, cont_rsrc.uri)}

        if self.MBR_RSRC_URI in cont_p and self.MBR_REL_URI in cont_p:
            from lakesuperior.model.ldp_factory import LdpFactory

            s = cont_rsrc.metadata.value(self.MBR_RSRC_URI).identifier
            p = cont_rsrc.metadata.value(self.MBR_REL_URI).identifier

            if cont_rsrc.metadata[RDF.type:nsc['ldp'].DirectContainer]:
                logger.info('Parent is a direct container.')

                logger.debug('Creating DC triples.')
                o = self.uri

            elif (cont_rsrc.metadata[RDF.type:nsc['ldp'].IndirectContainer]
                  and self.INS_CNT_REL_URI in cont_p):
                logger.info('Parent is an indirect container.')
                cont_rel_uri = cont_rsrc.metadata.value(
                    self.INS_CNT_REL_URI).identifier
                o = self.provided_imr.value(cont_rel_uri).identifier
                logger.debug('Target URI: {}'.format(o))
                logger.debug('Creating IC triples.')

            target_rsrc = LdpFactory.from_stored(rdfly.uri_to_uid(s))
            target_rsrc._modify_rsrc(RES_UPDATED, add_trp={(s, p, o)})

        return add_trp

    def _sparql_update(self, update_str, notify=True):
        '''
        Apply a SPARQL update to a resource.

        @param update_str (string) SPARQL-Update string. All URIs are local.

        @return 
        '''
        self.handling = 'lenient'  # FCREPO does that and Hyrax requires it.
        delta = self._sparql_delta(update_str)

        return self._modify_rsrc(RES_UPDATED, *delta, notify=notify)

    def _sparql_delta(self, q):
        '''
        Calculate the delta obtained by a SPARQL Update operation.

        This is a critical component of the SPARQL update prcess and does a
        couple of things:

        1. It ensures that no resources outside of the subject of the request
        are modified (e.g. by variable subjects)
        2. It verifies that none of the terms being modified is server managed.

        This method extracts an in-memory copy of the resource and performs the
        query on that once it has checked if any of the server managed terms is
        in the delta. If it is, it raises an exception.

        NOTE: This only checks if a server-managed term is effectively being
        modified. If a server-managed term is present in the query but does not
        cause any change in the updated resource, no error is raised.

        @return tuple(rdflib.Graph) Remove and add graphs. These can be used
        with `BaseStoreLayout.update_resource` and/or recorded as separate
        events in a provenance tracking system.
        '''
        logger.debug('Provided SPARQL query: {}'.format(q))
        pre_gr = self.imr.graph

        post_gr = pre_gr | Graph()
        post_gr.update(q)

        remove_gr, add_gr = self._dedup_deltas(pre_gr, post_gr)

        #logger.debug('Removing: {}'.format(
        #    remove_gr.serialize(format='turtle').decode('utf8')))
        #logger.debug('Adding: {}'.format(
        #    add_gr.serialize(format='turtle').decode('utf8')))

        remove_gr = self._check_mgd_terms(remove_gr)
        add_gr = self._check_mgd_terms(add_gr)

        return set(remove_gr), set(add_gr)
Beispiel #34
0
 def addressships(self):
     g = Graph()
     addresses = self.addresses()
     for addr in addresses:
         addr_uri = self.addr_uri(addr["full_address"], addr["number"])
         org = addr["organization"]
         r = Resource(g, addr_uri)
         r.set(RDF.type, WOS.Address)
         r.set(RDFS.label, Literal(addr['full_address']))
         r.set(WOS.organizationName, Literal(org))
         r.set(WOS.sequenceNumber, Literal(addr['number']))
         # relation to author set by authorship
         # relate to pub
         r.set(VIVO.relates, self.uri)
         # sub orgs
         for idx, suborg in enumerate(addr["sub_organizations"]):
             label = "{}, {}".format(suborg, org)
             so_uri = self.sub_org_uri(label)
             r.add(VIVO.relates, so_uri)
         # relate unified orgs
         for uorg in addr["unified_orgs"]:
             uo_uri = waan_uri(uorg)
             r.add(VIVO.relates, uo_uri)
     return g
Beispiel #35
0
 def to_rdf(self):
     g = Graph()
     o = Resource(g, self.uri)
     o.set(RDF.type, FOAF.Organization)
     o.set(RDFS.label, Literal(self.cfname))
     o.set(CONVERIS.converisId, Literal(self.cid))
     if hasattr(self, 'cfresact'):
         o.set(VIVO.overview, Literal(self.cfresact))
     for child in self.get_children():
         # Has sub-organization
         o.set(OBO['BFO_0000051'], child)
     return g
Beispiel #36
0
 def add_pub_date(self):
     """
     Publication dates in VIVO's expected format.
     """
     g = Graph()
     value = self.pub_date()
     if value is None:
         return g
     date_uri = self.make_date_uri(self.ut, value)
     date = Resource(g, date_uri)
     date.set(RDF.type, VIVO.DateTimeValue)
     date.set(VIVO.dateTimePrecision, VIVO.yearMonthDayPrecision)
     date.add(VIVO.dateTime,
              Literal("%sT00:00:00" % (value), datatype=XSD.dateTime))
     date.add(RDFS.label, Literal(value))
     date.set(RDF.type, VIVO.DateTimeValue)
     date.set(VIVO.dateTimePrecision, VIVO.yearMonthDayPrecision)
     date.add(VIVO.dateTime,
              Literal("%sT00:00:00" % (value), datatype=XSD.dateTime))
     date.add(RDFS.label, Literal(value))
     g.add((self.uri, VIVO.dateTimeValue, date_uri))
     return g
Beispiel #37
0
    def from_provided(uid, mimetype, stream=None, **kwargs):
        '''
        Determine LDP type from request content.

        @param uid (string) UID of the resource to be created or updated.
        @param mimetype (string) The provided content MIME type.
        @param stream (IOStream | None) The provided data stream. This can be
        RDF or non-RDF content, or None. In the latter case, an empty container
        is created.
        '''
        uri = nsc['fcres'][uid]

        if not stream:
            # Create empty LDPC.
            logger.info('No data received in request. '
                        'Creating empty container.')
            inst = Ldpc(uid, provided_imr=Resource(Graph(), uri), **kwargs)

        elif __class__.is_rdf_parsable(mimetype):
            # Create container and populate it with provided RDF data.
            input_rdf = stream.read()
            gr = Graph().parse(data=input_rdf, format=mimetype, publicID=uri)
            #logger.debug('Provided graph: {}'.format(
            #        pformat(set(provided_gr))))
            provided_imr = Resource(gr, uri)

            # Determine whether it is a basic, direct or indirect container.
            if Ldpr.MBR_RSRC_URI in gr.predicates() and \
                    Ldpr.MBR_REL_URI in gr.predicates():
                if Ldpr.INS_CNT_REL_URI in gr.predicates():
                    cls = LdpIc
                else:
                    cls = LdpDc
            else:
                cls = Ldpc

            inst = cls(uid, provided_imr=provided_imr, **kwargs)

            # Make sure we are not updating an LDP-RS with an LDP-NR.
            if inst.is_stored and LDP_NR_TYPE in inst.ldp_types:
                raise IncompatibleLdpTypeError(uid, mimetype)

            if kwargs.get('handling', 'strict') != 'none':
                inst._check_mgd_terms(inst.provided_imr.graph)

        else:
            # Create a LDP-NR and equip it with the binary file provided.
            provided_imr = Resource(Graph(), uri)
            inst = LdpNr(uid,
                         stream=stream,
                         mimetype=mimetype,
                         provided_imr=provided_imr,
                         **kwargs)

            # Make sure we are not updating an LDP-NR with an LDP-RS.
            if inst.is_stored and LDP_RS_TYPE in inst.ldp_types:
                raise IncompatibleLdpTypeError(uid, mimetype)

        logger.info('Creating resource of type: {}'.format(
            inst.__class__.__name__))

        try:
            types = inst.types
        except (TombstoneError, ResourceNotExistsError):
            types = set()

        return inst
Beispiel #38
0
    def add_date(self):
        """
        Add vivo:DateTimeValue for publication.
        :return: rdflib.Graph
        """
        g = Graph()
        date_uri = D['date-' + self.localid]
        de = Resource(g, date_uri)
        de.set(RDF.type, VIVO.DateTimeValue)
        year, month, month_num = self.pub_date()
        # Add year and month if possible.
        if month_num is not None:
            de.set(RDFS.label, Literal("{}, {}".format(month, year)))
            de.set(
                VIVO.dateTime,
                Literal("{}-{}".format(year, month_num), datatype=XSD.dayMonth)
            )
            de.set(VIVO.dateTimePrecision, VIVO.yearMonthPrecision)
        else:
            de.set(RDFS.label, Literal(year))
            de.set(
                VIVO.dateTime,
                Literal("{}".format(year), datatype=XSD.year)
            )
            de.set(VIVO.dateTimePrecision, VIVO.yearPrecision)

        g.add((self.pub_uri, VIVO.dateTimeValue, date_uri))
        return g
Beispiel #39
0
 def get_subclasses(self, ontology_class):
     if ontology_class not in self.subclass_cache:
         ontology_class_resource = Resource(self.graph, ontology_class)
         self.subclass_cache[ontology_class] = [subclass.identifier for subclass in ontology_class_resource.transitive_subjects(RDFS.subClassOf)]
     return self.subclass_cache[ontology_class]
 def convert_transfers(self, csv_filename):
     read_transfers = self.__open_file(csv_filename)
     for row in read_transfers:
         from_stop = str.strip(row["from_stop_id"])
         to_stop = str.strip(row["to_stop_id"])
         transfers = Resource(self.graph, URIRef(self.uri + "_" + from_stop + "_" + to_stop))
         transfers.set(RDF.type, self.GTFS.TransferRule)
         transfers.add(self.GTFS.originStop, self.get_stop(from_stop))
         transfers.add(self.GTFS.destinationStop, self.get_stop(to_stop))
         transfers.add(self.GTFS.transferType, self.get_transfer_type(str.strip(row["transfer_type"])))
         if "min_transfer_time" in row and str.strip(row["min_transfer_time"]):
             transfers.add(self.GTFS.minimumTransferTime, Literal(str.strip(row["min_transfer_time"]), datatype=XSD.nonNegativeInteger))
print sparql.query()
results = sparql.query().convert()
#print results.serialize()

# fixedgraph = rdflib.Graph()
# fixedgraph += [ sanitize_triple([s,p,o]) for s,p,o in results ]


nProjects = 0

for stmt in results.subjects(URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#type"), URIRef("http://dbpedia.org/ontology/Building")):
    
    if nProjects%500 == 0:
        print nProjects

    resource = Resource(results, stmt)


    p = Project()
    p.owner = User.objects.get(username = "******")
        
    p.name = resource.value(RDFS.label)
    p.longitude = resource.value(URIRef("http://www.w3.org/2003/01/geo/wgs84_pos#long"))
    p.latitude = resource.value(URIRef("http://www.w3.org/2003/01/geo/wgs84_pos#lat"))
    p.wikipedia_page_id = resource.value(URIRef("http://dbpedia.org/ontology/wikiPageID"))

    # Retrieve and concatenate all architects names

    architects = [architect.value(RDFS.label) for architect in resource[URIRef("http://dbpedia.org/ontology/architect")]]
    #.encode('ascii', 'ignore'))
    if architects != []:
Beispiel #42
0
    def to_rdf(self):
        """
        Convert the API publication object to VIVO RDF.

        :return: rdflib.Graph
        """
        g = Graph()
        pub = Resource(g, self.pub_uri)
        pub.set(RDF.type, self.vivo_type())
        pub.set(RDFS.label, Literal(self.title()))
        # WoS UT. Map to VIVO.identifier for now.
        # ToDo: this should be a more specific property
        pub.set(VIVO.identifier, Literal(self.ut()))
        # DOI
        doi = self.doi()
        if doi is not None:
            pub.set(BIBO.doi, Literal(doi))
        # Volume
        volume = self.volume()
        if volume is not None:
            pub.set(BIBO.volume, Literal(volume))
        # Issue
        issue = self.issue()
        if issue is not None:
            pub.set(BIBO.issue, Literal(issue))
        # Pages
        pages = self.pages()
        if pages is not None:
            start, end = pages.split('-')
            pub.set(BIBO.start, Literal(start))
            pub.set(BIBO.end, Literal(end))

        # publication venue
        g += self.add_venue()
        # date
        g += self.add_date()

        # authorship and vcards
        g += self.authorship()

        # links
        web_link, linkg = self.add_vcard_weblink()
        g += linkg
        # relate web link and publication
        g.add((self.pub_uri, OBO['ARG_2000028'], web_link))

        return g
Beispiel #43
0
    def to_rdf(self, graph):

        graph.bind('dc', DCTERMS)
        graph.bind('jd', JAZZ_DISCOVERY)

        uri = self.about if self.about.__contains__(
            self.identifier) else self.about + '/{}'.format(
                self.identifier) if self.identifier else ''

        rs = Resource(graph, URIRef(uri))

        if self.title:
            rs.add(DCTERMS.title, Literal(self.title))

        if self.description:
            rs.add(DCTERMS.description, Literal(self.description))

        if self.publisher:
            publisher_url = url_for('oslc.adapter_configuration_publisher',
                                    _external=True)
            rs.add(OSLC.publisher, URIRef(publisher_url))

        graph.bind('oslc_am', OSLC_AM)
        graph.bind('oslc_rm', OSLC_RM_JAZZ, override=True)
        graph.bind('oslc_cm', OSLC_CM_JAZZ, override=True)
        graph.bind('oslc_config', OSLC_CONFIG)
        graph.bind('jfs', JFS)
        graph.bind('trs', OSLC_TRS)
        graph.bind('pyoslc', PYOSLC)

        spc_url = url_for('oslc.adapter_service_provider_catalog',
                          _external=True)
        spc_config_url = url_for('oslc.adapter_configuration_catalog',
                                 _external=True)

        rs.add(OSLC_RM_JAZZ.rmServiceProviders, URIRef(spc_url))
        rs.add(OSLC_CONFIG.cmServiceProviders, URIRef(spc_config_url))

        rs.add(JFS.oauthRealmName, Literal("PyOSLC"))
        rs.add(JFS.oauthDomain, Literal(url_for('oslc.doc', _external=True)))
        rs.add(JFS.oauthRequestConsumerKeyUrl,
               URIRef(url_for('consumer.register', _external=True)))
        rs.add(JFS.oauthApprovalModuleUrl,
               URIRef(url_for('consumer.approve', _external=True)))
        rs.add(JFS.oauthRequestTokenUrl,
               URIRef(url_for('oauth.issue_token', _external=True)))
        rs.add(JFS.oauthUserAuthorizationUrl,
               URIRef(url_for('oauth.issue_token', _external=True)))
        rs.add(JFS.oauthAccessTokenUrl,
               URIRef(url_for('oauth.issue_token', _external=True)))

        trs = Resource(graph, BNode())
        trs.add(RDF.type, PYOSLC.TrackedResourceSetProvider)

        trs_url = URIRef(url_for('oslc.doc', _external=True))

        tr = Resource(graph, BNode())
        tr.add(RDF.type, OSLC_TRS.TrackedResourceSet)
        tr.add(OSLC_TRS.trackedResourceSet, URIRef(trs_url))

        # # trs.add(RDF.type, OSLC_TRS.TrackedResourceSet)
        # # trs.add(DCTERMS.title, Literal('Title', datatype=XSD.Literal))
        tr.add(DCTERMS.title, Literal('Title'))
        tr.add(DCTERMS.description, Literal('Description'))

        tr.add(DCTERMS.type, OSLC_CM.uri)
        tr.add(OSLC.domain, OSLC_RM.uri)
        tr.add(OSLC.domain, OSLC_AM.uri)

        trs.add(OSLC_TRS.TrackedResourceSet, tr)
        rs.add(PYOSLC.TrackedResourceSetProvider, trs)

        return rs
Beispiel #44
0
    def _createProvenance(self, result):
        provdata = IProvenanceData(result)
        from rdflib import URIRef, Literal, Namespace, Graph
        from rdflib.namespace import RDF, RDFS, FOAF, DCTERMS, XSD
        from rdflib.resource import Resource
        PROV = Namespace(u"http://www.w3.org/ns/prov#")
        BCCVL = Namespace(u"http://ns.bccvl.org.au/")
        LOCAL = Namespace(u"urn:bccvl:")
        graph = Graph()
        # the user is our agent

        member = api.user.get_current()
        username = member.getProperty('fullname') or member.getId()
        user = Resource(graph, LOCAL['user'])
        user.add(RDF['type'], PROV['Agent'])
        user.add(RDF['type'], FOAF['Person'])
        user.add(FOAF['name'], Literal(username))
        user.add(FOAF['mbox'],
                 URIRef('mailto:{}'.format(member.getProperty('email'))))
        # add software as agent
        software = Resource(graph, LOCAL['software'])
        software.add(RDF['type'], PROV['Agent'])
        software.add(RDF['type'], PROV['SoftwareAgent'])
        software.add(FOAF['name'], Literal('BCCVL Job Script'))
        # script content is stored somewhere on result and will be exported with zip?
        #   ... or store along with pstats.json ? hidden from user

        # -> execenvironment after import -> log output?
        # -> source code ... maybe some link expression? stored on result ? separate entity?
        activity = Resource(graph, LOCAL['activity'])
        activity.add(RDF['type'], PROV['Activity'])
        # TODO: this is rather queued or created time for this activity ... could capture real start time on running status update (or start transfer)
        now = datetime.now().replace(microsecond=0)
        activity.add(PROV['startedAtTime'],
                     Literal(now.isoformat(), datatype=XSD['dateTime']))
        activity.add(PROV['hasAssociationWith'], user)
        activity.add(PROV['hasAssociationWith'], software)
        # add job parameters to activity
        for idx, (key, value) in enumerate(result.job_params.items()):
            param = Resource(graph, LOCAL[u'param_{}'.format(idx)])
            activity.add(BCCVL['algoparam'], param)
            param.add(BCCVL['name'], Literal(key))
            # We have only dataset references as parameters
            if key in ('data_table', ):
                param.add(BCCVL['value'], LOCAL[dsuuid])
            else:
                param.add(BCCVL['value'], Literal(value))
        # iterate over all input datasets and add them as entities
        for key in ('data_table', ):
            dsbrain = uuidToCatalogBrain(result.job_params[key])
            if not dsbrain:
                continue
            ds = dsbrain.getObject()
            dsprov = Resource(graph, LOCAL[result.job_params[key]])
            dsprov.add(RDF['type'], PROV['Entity'])
            #dsprov.add(PROV['..'], Literal(''))
            dsprov.add(DCTERMS['creator'], Literal(ds.Creator()))
            dsprov.add(DCTERMS['title'], Literal(ds.title))
            dsprov.add(DCTERMS['description'], Literal(ds.description))
            dsprov.add(DCTERMS['rights'],
                       Literal(ds.rights))  # ds.rightsstatement
            dsprov.add(DCTERMS['format'], Literal(ds.file.contentType))
            # location / source
            # graph.add(uri, DCTERMS['source'], Literal(''))
            # TODO: genre ...
            # TODO: resolution
            # species metadata
            md = IBCCVLMetadata(ds)
            # dsprov.add(BCCVL['scientificName'], Literal(md['species']['scientificName']))
            # dsprov.add(BCCVL['taxonID'], URIRef(md['species']['taxonID']))

            # ... species data, ... species id
            for layer in md.get('layers_used', ()):
                dsprov.add(BCCVL['layer'], LOCAL[layer])

            # link with activity
            activity.add(PROV['used'], dsprov)

        provdata.data = graph.serialize(format="turtle")
Beispiel #45
0
def get_item_rdf(item_graph: Graph, item_type: str, source_base_url: str,
                 item_id: str, config_id: str, item_url: str, url_sp: str):
    # Decode the item_url and replace whitespace with dot
    item_url = unquote(item_url)
    item_url = re.sub(' ', '.', item_url)

    # Get the resource shape graph to extract the relationships to be queried
    g = load_resource_shape(item_type,
                            url_sp=url_sp,
                            source_base_url=source_base_url)
    if not g:
        return None

    rs = get_resource_shape(item_type, url_sp, source_base_url)
    if not rs:
        if g:
            rs = g
        else:
            return None

    oslc_base = url_sp
    oslc_resource_shape_base = oslc_base + '/' + re.sub(
        ' ', '.', item_type) + "/resourceShape#"
    oslc_config_base = oslc_base + '/config'

    # Get expanded item JSON from the API by following the resource shapes file
    item_response = query_expanded_item(source_base_url, item_type, item_id,
                                        config_id, rs)
    if item_response:
        item_json = item_response.json()

    # equalize query responses to avoid having errors when the item_id was
    # inexistent and the config_id was used as filtering
    if 'value' in item_json.keys():
        item_json = item_json['value'][0]

    # set the item_id property if it wasn't passed in the method
    if not item_id:
        item_id = item_json['id']

    # Build RDF Graph for an item
    item_graph.bind('rdf', RDF)
    item_graph.bind('rdfs', RDFS)
    item_graph.bind('dcterms', DCTERMS)
    item_graph.bind('xsd', XSD)
    item_graph.bind('aras', ARAS)
    item_graph.bind('aras_' + re.sub(' ', '.', item_type),
                    oslc_resource_shape_base)
    item_graph.bind('oslc', OSLC)
    item_graph.bind('oslc_config', OSLC_CONFIG)

    # Create the RDF resource shape node for each item type
    item_node = Resource(item_graph, URIRef(item_url))
    item_node.add(RDF.type, URIRef(ARAS + re.sub(' ', '.', item_type)))
    item_node.add(
        OSLC_CONFIG.component,
        URIRef(oslc_config_base + '/' + re.sub(' ', '.', item_type) +
               '/component/' + config_id))
    item_node.add(OSLC.instanceShape, URIRef(oslc_resource_shape_base))

    # If the item is versionable, add version properties
    if check_if_versionable(source_base_url, item_type):
        item_node.add(RDF.type, URIRef(OSLC_CONFIG.VersionResource))
        item_node.add(OSLC_CONFIG.versionId,
                      Literal(item_json.get('id'), datatype=XSD.string))
        item_node.add(DCTERMS.isVersionOf, URIRef(item_url))

    # Query and iterate through the item type properties to build the item graph
    qres = rs.query("""
        SELECT ?prop ?type ?def
            WHERE {
                ?s oslc:name ?prop.
                ?s oslc:occurs ?occurs.
                FILTER(?occurs IN (oslc:Exactly-one, oslc:Zero-or-one))
                OPTIONAL {
                    ?s ?type ?def.
                    FILTER(?type IN (oslc:valueType, oslc:propertyDefinition))
                }
            }
        """)

    # Iterate through the properties and insert them into the item graph
    for row in qres:
        # Extract the SPARQL results as the property name, data type, and property definitions
        prop = row['prop']
        data_type = row['type']
        resource_def = row['def']

        # iterate through the properties of the resource shapes file and get their value from the JSON response
        prop_val = None
        if item_json.get(str(prop)):
            prop_val = item_json.get(str(prop))

        if type(prop_val) is dict:
            prop_val = prop_val['config_id']['id']

        # Associate the property to the graph according to their data type/relationship
        if data_type == OSLC.propertyDefinition:
            prop_item_search = re.search('(.*?)api/oslc/(.*?)/resourceShape',
                                         resource_def, re.IGNORECASE)
            if prop_item_search:
                prop_item_type = prop_item_search.group(2)
                if prop_val:
                    logger.debug(f'property: {str(prop)}')
                    unquoted_pre = oslc_resource_shape_base + re.sub(
                        ' ', '.', prop)
                    unquoted_obj = oslc_base + '/' + re.sub(
                        ' ', '.', prop_item_type) + '/' + prop_val
                    unquoted_obj += '?oslc_config.context='
                    unquoted_obj += quote(oslc_config_base + '/' +
                                          re.sub(' ', '.', prop_item_type) +
                                          '/component/' + prop_val +
                                          '/stream/' +
                                          item_json[str(prop)]['id'])
                    item_node.add(URIRef(unquoted_pre), URIRef(unquoted_obj))
        elif data_type == OSLC.valueType:
            if prop_val:
                item_node.add(
                    URIRef(oslc_resource_shape_base + re.sub(' ', '.', prop)),
                    Literal(prop_val, datatype=resource_def))

    # Iterate through the Zero-or-many properties and query for the external relationships
    qres = rs.query("""SELECT ?prop ?def
           WHERE {
              ?s oslc:name ?prop.
              ?s oslc:occurs oslc:Zero-or-many.
              ?s oslc:propertyDefinition ?def.
           }""")

    # Iterate through the properties and query the API to create a list of instances and insert into the
    #  item graph
    for row in qres:
        # Associate the SPARQL query results to variables
        rel_prop = str(row['prop'])
        rel_def = row['def']

        if str(rel_prop) not in ('oslc_component', 'oslc_version_id',
                                 'dcterms_is_version_of'):
            rel_item_response = query_relation_properties(
                source_base_url, rel_prop, item_id)
            rel_item_json = None
            if rel_item_response:
                rel_item_json = rel_item_response.json()

            if rel_item_json is not None and rel_item_json.get('value'):
                for rel_item in rel_item_json.get('value'):
                    rel_prop_val = rel_item['config_id']['id']
                    prop_item_search = re.search(
                        '(.*?)api/oslc/(.*?)/resourceShape', rel_def,
                        re.IGNORECASE)
                    if prop_item_search:
                        prop_item_type = prop_item_search.group(2)
                        if rel_prop_val:
                            unquoted_pre = oslc_resource_shape_base + re.sub(
                                ' ', '.', rel_prop)
                            unquoted_obj = oslc_base + '/' + re.sub(
                                ' ', '.', prop_item_type) + '/' + rel_prop_val
                            unquoted_obj += '?oslc_config.context='
                            unquoted_obj += quote(
                                oslc_config_base + '/' +
                                re.sub(' ', '.', prop_item_type) +
                                '/component/' + rel_prop_val + '/stream/' +
                                rel_item['id'])
                            item_node.add(URIRef(unquoted_pre),
                                          URIRef(unquoted_obj))

    return item_node
Beispiel #46
0
    def _createProvenance(self, result):
        provdata = IProvenanceData(result)
        from rdflib import URIRef, Literal, Namespace, Graph
        from rdflib.namespace import RDF, RDFS, FOAF, DCTERMS, XSD
        from rdflib.resource import Resource
        PROV = Namespace(u"http://www.w3.org/ns/prov#")
        BCCVL = Namespace(u"http://ns.bccvl.org.au/")
        LOCAL = Namespace(u"urn:bccvl:")
        graph = Graph()
        # the user is our agent

        member = api.user.get_current()
        username = member.getProperty('fullname') or member.getId()
        user = Resource(graph, LOCAL['user'])
        user.add(RDF['type'], PROV['Agent'])
        user.add(RDF['type'], FOAF['Person'])
        user.add(FOAF['name'], Literal(username))
        user.add(FOAF['mbox'],
                 URIRef('mailto:{}'.format(member.getProperty('email'))))
        # add software as agent
        software = Resource(graph, LOCAL['software'])
        software.add(RDF['type'], PROV['Agent'])
        software.add(RDF['type'], PROV['SoftwareAgent'])
        software.add(FOAF['name'], Literal('BCCVL ALA Importer'))
        # script content is stored somewhere on result and will be exported with zip?
        #   ... or store along with pstats.json ? hidden from user

        # -> execenvironment after import -> log output?
        # -> source code ... maybe some link expression? stored on result ? separate entity?
        activity = Resource(graph, LOCAL['activity'])
        activity.add(RDF['type'], PROV['Activity'])
        # TODO: this is rather queued or created time for this activity ... could capture real start time on running status update (or start transfer)
        now = datetime.now().replace(microsecond=0)
        activity.add(PROV['startedAtTime'],
                     Literal(now.isoformat(), datatype=XSD['dateTime']))
        activity.add(PROV['hasAssociationWith'], user)
        activity.add(PROV['hasAssociationWith'], software)
        # add job parameters to activity

        provdata.data = graph.serialize(format="turtle")
Beispiel #47
0
    def find_manifestation(self, cellarid, celexid):
        cellarurl = "http://publications.europa.eu/resource/cellar/%s?language=%s" % (cellarid, self.languages[0])
        graph = self.get_treenotice_graph(cellarurl, celexid)
        if graph is None:
            return None, None, None, None
        
        # find the root URI -- it might be on the form
        # "http://publications.europa.eu/resource/celex/%s", but can
        # also take other forms (at least for legislation)
        # At the same time, find all expressions of this work (ie language versions).
        CDM = Namespace("http://publications.europa.eu/ontology/cdm#")
        CMR = Namespace("http://publications.europa.eu/ontology/cdm/cmr#")
        root = None
        candidateexpressions = {}
        for expression, work in graph.subject_objects(CDM.expression_belongs_to_work):
            assert root is None or work == root
            root = work
            expression = Resource(graph, expression)
            lang = expression.value(CDM.expression_uses_language)
            lang = str(lang.identifier).rsplit("/", 1)[1].lower()
            if lang in self.config.languages:
                candidateexpressions[lang] = expression

        if not candidateexpressions:
            self.log.warning("%s: Found no suitable languages" % celexid)
            self.dump_graph(celexid, graph)
            return None, None, None, None

        for lang in self.config.languages:
            if lang in candidateexpressions:
                expression = candidateexpressions[lang]
                candidateitem = {}
                # we'd like to order the manifestations in some preference order -- fmx4 > xhtml > html > pdf
                for manifestation in expression.objects(CDM.expression_manifested_by_manifestation):
                    manifestationtype = str(manifestation.value(CDM.type))
                    # there might be multiple equivalent
                    # manifestations, eg
                    # ...celex/62001CJ0101.SWE.fmx4,
                    # ...ecli/ECLI%3AEU%3AC%3A2003%3A596.SWE.fmx4 and
                    # ...cellar/bcc476ae-43f8-4668-8404-09fad89c202a.0011.01. Try
                    # to find out if that is the case, and get the "root" manifestation
                    rootmanifestations = list(manifestation.subjects(OWL.sameAs))
                    if rootmanifestations:
                        manifestation = rootmanifestations[0]
                    items = list(manifestation.subjects(CDM.item_belongs_to_manifestation))
                    if len(items) == 1: 
                        candidateitem[manifestationtype] = items[0]
                    elif len(items) == 2:
                        # NOTE: for at least 32016L0680, there can be
                        # two items of the fmx4 manifestation, where
                        # one (DOC_1) is bad (eg only a reference to
                        # the pdf file) and the other (DOC_2) is
                        # good. The heuristic for choosing the good
                        # one: if the owl:sameAs property ends in .xml
                        # but not .doc.xml...
                        for item in items:
                            # this picks a random object if there are
                            # two or more owl:sameAs triples, but the
                            # heuristic seems to work with all
                            # owl:sameAs objects
                            sameas = str(item.value(OWL.sameAs).identifier)
                            if sameas.endswith(".xml") and not sameas.endswith(".doc.xml"):
                                candidateitem[manifestationtype] = item
                                break

                if candidateitem:
                    for t in ("fmx4", "xhtml", "html", "pdf", "pdfa1a"):
                        if t in candidateitem:
                            item = candidateitem[t]
                            mimetype = str(item.value(CMR.manifestationMimeType))
                            self.log.info("%s: Has manifestation %s (%s) in language %s" % (celexid, t,mimetype, lang))
                            # we might need this even outside of
                            # debugging (eg when downloading
                            # eurlexcaselaw, the main document lacks
                            # keywords, classifications, instruments
                            # cited etc.
                            self.dump_graph(celexid, graph) 
                            return lang, t, mimetype, str(item.identifier)
                else:
                    if candidateitem:
                        self.log.warning("%s: Language %s had no suitable manifestations" %
                                         (celexid, lang))
        self.log.warning("%s: No language (tried %s) had any suitable manifestations" % (celexid, ", ".join(candidateexpressions.keys())))
        self.dump_graph(celexid, graph)
        return None, None, None, None
 def get_route(self, route_id):
     route = Resource(self.graph, URIRef(self.uri + "route_" + route_id))
     route.set(RDF.type, self.GTFS.Route)
     route.set(DCTERMS.identifier, Literal(route_id, datatype=XSD.string))
     return route