コード例 #1
0
ファイル: case.py プロジェクト: vikhari/CASE-CLI-Wrapper
# © 2020 The MITRE Corporation
#This software (or technical data) was produced for the U. S. Government under contract SB-1341-14-CQ-0010, and is subject to the Rights in Data-General Clause 52.227-14, Alt. IV (DEC 2007)
#Released under MITRE PRE #18-4297.


#====================================================
# CASE API
#!/usr/bin/env python

import datetime
import uuid

import rdflib
from rdflib import RDF

CASE = rdflib.Namespace('http://case.example.org/core#')


#====================================================
#-- CREATE A CASE DOCUMENT FOR A SINGLE REPORT

class Document(object):

    def __init__(self, graph=None):
        """
        Initializes the CASE document.
        Args:
            graph: The graph to populate (instance of rdflib.Graph)
                   If not provided, a graph in memory will be used.
        """
コード例 #2
0
with open('io.json') as infile:
    data = json.load(infile)

    # Loop through every annotation entry in the json
    entry = data['protocol']

    # Get whatever information you want for your manual step description
    doi = entry['doi']
    steps = entry['steps']
    creator = entry['creator']['username']

    # print(f'url={url}\nprefix={prefix}\nexact={exact}\nsuffix={suffix}\ntitle={title}')

    for step in steps:
        # Some standard ontologies used for nanopubs and describing workflows.
        NP = rdflib.Namespace("http://www.nanopub.org/nschema#")
        PPLAN = rdflib.Namespace("http://purl.org/net/p-plan#")
        PROV = rdflib.Namespace("http://www.w3.org/ns/prov#")
        DUL = rdflib.Namespace(
            "http://ontologydesignpatterns.org/wiki/Ontology:DOLCE+DnS_Ultralite/"
        )
        BPMN = rdflib.Namespace("https://www.omg.org/spec/BPMN/")
        PWO = rdflib.Namespace("http://purl.org/spar/pwo/")
        PAV = rdflib.Namespace("http://purl.org/pav/")
        RESEARCHER = rdflib.Namespace("https://www.protocols.io/researchers/")

        # Now make assertion graph for the nanopub out of this
        this_np = rdflib.Namespace(doi + '#')
        this_step = rdflib.term.URIRef(doi + '#step')
        np_rdf = rdflib.ConjunctiveGraph()
        head = rdflib.Graph(np_rdf.store, this_np.Head)
コード例 #3
0
 def __init__(self):
     self.rdf = rdflib.Namespace('http://www.w3.org/1999/02/22-rdf-syntax-ns#')
     self.sh = rdflib.Namespace('http://www.w3.org/ns/shacl#')
     self.g = rdflib.Graph()
     self.nodeShapes = {}
     self.propertyShapes = {}
コード例 #4
0
import csv
import os
import rdflib
from iribaker import to_iri

YAML_NAMESPACE_FILE = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                   'namespaces.yaml')

FOAF = rdflib.Namespace('http://xmlns.com/foaf/0.1/')
LODE = rdflib.Namespace('http://linkedevents.org/ontology/')
IDS = rdflib.Namespace('http://data.socialhistory.org/resource/ids/')
TIME = rdflib.Namespace('https://www.w3.org/2006/time#')
OWLTIME = rdflib.Namespace("http://www.w3.org/TR/owl-time#")

rdf_graph = rdflib.Graph()

rdf_graph.bind('foaf', FOAF)
rdf_graph.bind('ids', IDS)
rdf_graph.bind('lode', LODE)
rdf_graph.bind('owltime', OWLTIME)

with open("indiv_001smpl.csv") as infile:
    reader = csv.reader(infile)
    header = reader.next()
    for row in reader:
        personid = row[2]
        day = str(row[9]).zfill(2)
        mnth = str(row[10]).zfill(2)
        year = str(row[11]).zfill(2)
        date = "{}-{}-{}".format(year, mnth, day)
        predicate = row[4]
コード例 #5
0
def specgen(template, language):
    global spec_url
    global spec_ns
    global ns_list
    global namespace_dict

    # getting all namespaces from o_graph
    all_ns = [n for n in o_graph.namespace_manager.namespaces()]

    # creating a dictionary of the names spaces - {identifier:uri}
    namespace_dict = {key: value for (key, value) in all_ns}
    if spec_pre in namespace_dict:
        spec_url = namespace_dict[spec_pre]
    else:
        spec_url = namespace_dict['']

    spec_ns = rdflib.Namespace(spec_url)
    # print(get_high_lvl_nodes())
    # for x in get_high_lvl_nodes():
    #     print(x)
    deprecated_html = create_deprecated_html(o_graph)

    # Gets sorted classes & property labels
    class_list = [
        get_uri_term((x)) for x in sorted(o_graph.subjects(None, OWL.Class))
    ]
    prop_list = [
        get_uri_term((x))
        for x in sorted(o_graph.subjects(None, OWL.ObjectProperty))
    ]

    global domain_dict
    global range_dict
    domain_dict, range_dict = get_domain_range_dict()

    # Dict_list in specgen
    # skos_concepts = [get_uri_term(s) for s, p, o in sorted(o_graph.triples((None, RDF.type, SKOS.ConceptScheme)))]
    classes_i = sorted([
        x for x in class_list if (None, RDF.type, get_full_uri(x)) in o_graph
    ])
    instance_list = get_instances(class_list)

    # Build HTML list of terms.
    dict_str = trans_dict["dicts"][l_index] + ":"
    props_str = trans_dict["props"][l_index] + ":"

    az_dict = {
        "Classes:": class_list,
        props_str: prop_list,
        "Instances:": instance_list,
        dict_str: classes_i,
    }
    temp_list = [dict_str, "Classes:", props_str, "Instances:"]

    # create global cross reference
    azlist_html = get_azlist_html(az_dict, temp_list)

    # Creating rest of html
    dict_html = create_dictionary_html(classes_i)
    classes_html = "<h3 id='classes'>Classes</h3>" + create_terms_html(
        class_list, "Class")
    prop_html = "<h3 id='properties'>%s</h3>" % props_str[:
                                                          -1] + create_terms_html(
                                                              prop_list,
                                                              "Property")
    instance_html = "<h3 id='instances'>Instances</h3>" + create_terms_html(
        instance_list, "Instance")

    terms_html = dict_html + classes_html + prop_html + instance_html

    template = template.format(_header_=get_header_html(),
                               _azlist_=azlist_html,
                               _terms_=terms_html,
                               _deprecated_=deprecated_html)
    pattern = re.compile(
        r'importRDFExample\[(?P<label>.*?)\]\[(?P<file>.*?)\]')
    locate = pattern.search(template)
    totalExamples = 1
    while locate:
        print("Adding example #", totalExamples, " with title ",
              locate.groupdict()['label'], ".")
        template = template.replace(
            locate.group(),
            createExamplar(totalExamples,
                           locate.groupdict()['label'],
                           locate.groupdict()['file']))
        totalExamples = totalExamples + 1
        locate = pattern.search(template)

    return template
コード例 #6
0
def write_rdf(schema, graph_elements, egg, configG, start_point):

    predicate = rdflib.Namespace("http://egg/predicate/")
    named_graph = rdflib.Namespace("http://egg/ng/")

    g = rdflib.ConjunctiveGraph()

    #############################################
    # write gmark and valid tuples in rdf: begin
    #############################################

    with open("../" + schema + "_output/" + schema + '-graph.txt', 'r') as f:

        for line in f.readlines():

            match = re.match(r"(^(\w+):(\w+) (\w+):(\w+) (\w+):(\w+))", line)

            if match:

                s = rdflib.URIRef(match.group(2) + ":" + match.group(3))

                o = rdflib.URIRef(match.group(6) + ":" + match.group(7))

                if configG[
                        "ListDynP"]:  #write gmark in rdf only if there is properties

                    p = rdflib.URIRef(match.group(4) + ":" + match.group(5))

                    n = rdflib.URIRef(named_graph + "gmark")

                    g.add((s, p, o, n))

                p = rdflib.URIRef(predicate + match.group(4))

                for snap in range(0, len(egg[match.group(5)]["v"])):

                    if egg[match.group(5)]["v"][snap] == "T":

                        n = rdflib.URIRef(named_graph + "snapshot" +
                                          str(start_point + snap))

                        g.add((s, p, o, n))

    logging.info("1 rdf end")

    #############################################
    # write gmark and valid tuples in rdf: end
    #############################################

    ############################################################
    # write value of properties of graph elements in rdf: begin
    ############################################################

    i = 0

    for key in graph_elements:

        for element in graph_elements[key]:

            s = rdflib.URIRef(key + ":" + str(element))

            p = rdflib.URIRef(predicate.hasProperty)

            if key in configG["nodes"]:

                for prop in configG["nodes"][key]:

                    o = rdflib.URIRef("Property:" + prop)

                    n = rdflib.URIRef(named_graph + "G" + str(i))

                    g.add((s, p, o, n))

                    for snapshot in range(0, len(egg[match.group(5)]["v"])):

                        s1 = rdflib.URIRef(named_graph + "G" + str(i))

                        p1 = rdflib.URIRef(predicate.value)

                        o1 = rdflib.Literal(str(egg[element][prop][snapshot]))

                        n1 = rdflib.URIRef(named_graph + "snap" +
                                           str(start_point + snapshot))

                        g.add((s1, p1, o1, n1))

                    i = i + 1

            elif key in configG["edges"]:

                for prop in configG["edges"][key]:

                    o = rdflib.URIRef("Property:" + prop)

                    n = rdflib.URIRef(named_graph + "G" + str(i))

                    g.add((s, p, o, n))

                    for snapshot in range(0, len(egg[match.group(5)]["v"])):

                        s1 = rdflib.URIRef(named_graph + "G" + str(i))

                        p1 = rdflib.URIRef(predicate.value)

                        o1 = rdflib.Literal(str(egg[element][prop][snapshot]))

                        n1 = rdflib.URIRef(named_graph + "snap" +
                                           str(start_point + snapshot))

                        g.add((s1, p1, o1, n1))

                    i = i + 1

    logging.info("2 rdf end")

    ############################################################
    # write value of properties of graph elements in rdf: end
    ############################################################

    #####################################
    # serialize rdf graph in trig format
    #####################################

    with open("../" + schema + "_output/" + schema + "-output.trig", "a") as f:
        f.write(g.serialize(format='trig'))

    logging.info("rdf end")
コード例 #7
0
from bs4 import BeautifulSoup
import os
from os import listdir
import sys
import csv
import rdflib, sys
from rdflib import *

CWRC = rdflib.Namespace( "http://sparql.cwrc.ca/ontologies/cwrc#")
BF = rdflib.Namespace( "http://id.loc.gov/ontologies/bibframe/")
XML = rdflib.Namespace("http://www.w3.org/XML/1998/namespace")
MARCREL = rdflib.Namespace("http://id.loc.gov/vocabulary/relators/")
DATA = rdflib.Namespace("http://cwrc.ca/cwrcdata/")
GENRE = rdflib.Namespace("http://sparql.cwrc.ca/ontologies/genre#")
SCHEMA = rdflib.Namespace("http://schema.org/")

TYPE_MAPPING = {
    "monographic": "standaloneWork",
    "analytic": "embeddedWork",
    "journal": "periodical",
    "series": "series",
    "unpublished": "unpublished",

}

def csv_matches(csv_file):
    col_map = []
    mapping = {}
    with open(csv_file) as f:
        reader = csv.DictReader(f)
        for row in reader:
コード例 #8
0
def get_rdf_news(name,
                 in_files,
                 top_entries=None,
                 extra_entries=None,
                 dev_dist=None):
    import rdflib
    from time import strptime

    doap = rdflib.Namespace('http://usefulinc.com/ns/doap#')
    dcs = rdflib.Namespace('http://ontologi.es/doap-changeset#')
    rdfs = rdflib.Namespace('http://www.w3.org/2000/01/rdf-schema#')
    foaf = rdflib.Namespace('http://xmlns.com/foaf/0.1/')
    rdf = rdflib.Namespace('http://www.w3.org/1999/02/22-rdf-syntax-ns#')
    m = rdflib.ConjunctiveGraph()

    try:
        for i in in_files:
            m.parse(i, format='n3')
    except Exception:
        Logs.warn('Error parsing data, unable to generate NEWS')
        return

    proj = m.value(None, rdf.type, doap.Project)
    for f in m.triples([proj, rdfs.seeAlso, None]):
        if f[2].endswith('.ttl'):
            m.parse(f[2], format='n3')

    entries = {}
    for r in m.triples([proj, doap.release, None]):
        release = r[2]
        revision = m.value(release, doap.revision, None)
        date = m.value(release, doap.created, None)
        blamee = m.value(release, dcs.blame, None)
        changeset = m.value(release, dcs.changeset, None)
        dist = m.value(release, doap['file-release'], None)

        if not dist:
            Logs.warn('No file release for %s %s' % (proj, revision))
            dist = dev_dist

        if revision and date and blamee and changeset:
            entry = {}
            entry['name'] = str(name)
            entry['revision'] = str(revision)
            entry['date'] = strptime(str(date), '%Y-%m-%d')
            entry['status'] = 'stable' if dist != dev_dist else 'unstable'
            entry['dist'] = str(dist)
            entry['items'] = []

            for i in m.triples([changeset, dcs.item, None]):
                item = str(m.value(i[2], rdfs.label, None))
                entry['items'] += [item]
                if dist and top_entries is not None:
                    if not str(dist) in top_entries:
                        top_entries[str(dist)] = {'items': []}
                    top_entries[str(dist)]['items'] += [
                        '%s: %s' % (name, item)
                    ]

            if extra_entries and dist:
                for i in extra_entries[str(dist)]:
                    entry['items'] += extra_entries[str(dist)]['items']

            entry['blamee_name'] = str(m.value(blamee, foaf.name, None))
            entry['blamee_mbox'] = str(m.value(blamee, foaf.mbox, None))

            entries[(str(date), str(revision))] = entry
        else:
            Logs.warn('Ignored incomplete %s release description' % name)

    return entries
コード例 #9
0
ファイル: simpleProperty.py プロジェクト: ssghost/PyOpenWorm
 def on_mapper_add_class(cls, mapper):
     cls.rdf_type = cls.base_namespace[cls.__name__]
     cls.rdf_namespace = R.Namespace(cls.rdf_type + "/")
     return cls
コード例 #10
0
ファイル: merge.py プロジェクト: VTaim/sema_A1

def localish(s):
    """Return the (almost) local name part of a URI, 
  e.g., http:///www.ex.org/foo -> /foo"""
    a = s.rfind('/')
    b = s.rfind('#')
    if a == -1 and b == -1: return s
    return s[max(a, b):]


# The simplest & safest way to merge is to parse graph from several files:
g = rdflib.Graph()
g.parse("part1.ttl", format="turtle")
g.parse("part2.ttl", format="turtle")

n1 = rdflib.Namespace(
    "http://www.tut.fi/example.org/fi-tut-math-sm-calvin-and-hobbes/")

print "This has been asserted about Calvin:"

for p, o in g.predicate_objects(n1.Calvin):
    print "  ", localish(p), localish(o)

print "And in n3:"

g2 = rdflib.Graph()
for s, p, o in g.triples((n1.Calvin, None, None)):
    g2.add((s, p, o))
print(g2.serialize(format="n3"))
コード例 #11
0
ファイル: huvizQuads.py プロジェクト: cwrc/RDF-extraction
import argparse
from rdflib import *
import rdflib
from SPARQLWrapper import SPARQLWrapper, JSON, XML, N3, RDFXML
import json
import sys

CWRC = rdflib.Namespace("http://sparql.cwrc.ca/ontologies/cwrc#")


class HuvizQuads:

    contextGraph = None

    conjunctiveGraph = ConjunctiveGraph()

    def __init__(self, instanceUri, graph):
        self.instanceUri = instanceUri
        self.graph = graph
        self.contextGraph = self.get_context_graph()

        # Do the extraction
        self.construct_huviz_graph()

    def get_context_graph(self):
        """Get Context Graph
        
        Arguments:
            instanceUri {str} -- A string representation of the instance uri
        
        Returns:
コード例 #12
0
    # Loop through every annotation entry in the json
    for entry in data:

        # Get whatever information you want for your manual step description
        url = entry['url']
        user = entry['user']
        exact = entry['exact']
        created = entry['created']
        id = entry['id']
        title = entry['title']

        # print(f'url={url}\nprefix={prefix}\nexact={exact}\nsuffix={suffix}\ntitle={title}')


        # Some standard ontologies used for nanopubs and describing workflows.
        NP = rdflib.Namespace("http://www.nanopub.org/nschema#")
        PPLAN = rdflib.Namespace("http://purl.org/net/p-plan#")
        PROV = rdflib.Namespace("http://www.w3.org/ns/prov#")
        DUL = rdflib.Namespace("http://ontologydesignpatterns.org/wiki/Ontology:DOLCE+DnS_Ultralite/")
        BPMN = rdflib.Namespace("https://www.omg.org/spec/BPMN/")
        PWO = rdflib.Namespace("http://purl.org/spar/pwo/")
        AUT = rdflib.Namespace("https://hypothes.is/a/")

        # Now make assertion graph for the nanopub out of this
        this_np = rdflib.Namespace(url+'#')
        this_step = rdflib.term.URIRef(url + '#step')
        np_rdf = rdflib.ConjunctiveGraph()
        head = rdflib.Graph(np_rdf.store, this_np.Head)
        assertion = rdflib.Graph(np_rdf.store, this_np.assertion)
        provenance = rdflib.Graph(np_rdf.store, this_np.provenance)
        pubInfo = rdflib.Graph(np_rdf.store, this_np.pubInfo)
コード例 #13
0
ファイル: platypus.py プロジェクト: ghukill/platypus
#!/usr/bin/env python

# fedora 4 test client
# NOTE: this is experimental / spike code!!
# (written to get a sense of  the fedora 4 LDP API)
import requests
import rdflib

DC = rdflib.Namespace('http://purl.org/dc/elements/1.1/')
XMLSchema = rdflib.Namespace('http:////www.w3.org/2001/XMLSchema#')
LDP = rdflib.Namespace('http://www.w3.org/ns/ldp#')

FEDORA4 = rdflib.Namespace('http://fedora.info/definitions/v4/repository#')

# rdf descriptors for easy access to get/set rdf values


class Value(object):
    """A data descriptor that gets a rdf single value.
    """

    # break out into types by xmlschema type, like eulxml?
    # StringField, DateField, etc. then conversion is easy...
    # but do we always know what a field will contain?

    def __init__(self, predicate, datatype=None, normalize=False):
        self.predicate = predicate
        self.datatype = datatype
        self.normalize = normalize

    def __get__(self, obj, objtype=None):
コード例 #14
0
def main(data, lang):
    print("Pattern")
    nif = rdflib.Namespace(
        "http://persistence.uni-leipzig.org/nlp2rdf/ontologies/nif-core#")
    count = 0
    i = 0

    for filename in os.listdir('Files/Input' + lang + '/'):
        val = 0
        if (count < int(data)):
            graph2 = rdflib.Graph()
            graph2.parse('Files/Input' + lang + '/' + filename, format='nt')
            g = Graph()
            name = filename.split(".")[0]
            s = graph2.serialize(format="nt")
            for s, p, o in graph2:
                if type(o) == rdflib.term.Literal and nif.isString in p:
                    wiki = tokenize(o)
                    for i in wiki:
                        try:
                            BI = val
                            val = val + len(i.encode().decode('utf-8')) + 1
                            EI = BI + len(str(i.encode().decode('utf-8')))
                            g.add([
                                rdflib.term.URIRef(
                                    "http://dbpedia.org/resource/" + name +
                                    "?dbpv=2016-10&nif=sentence_" + str(BI) +
                                    "_" + str(EI)), RDF.type, nif.Sentence
                            ])
                            g.add([
                                rdflib.term.URIRef(
                                    "http://dbpedia.org/resource/" + name +
                                    "?dbpv=2016-10&nif=sentence_" + str(BI) +
                                    "_" + str(EI)), nif.beginIndex,
                                rdflib.term.Literal(str(BI))
                            ])
                            g.add([
                                rdflib.term.URIRef(
                                    "http://dbpedia.org/resource/" + name +
                                    "?dbpv=2016-10&nif=sentence_" + str(BI) +
                                    "_" + str(EI)), nif.endIndex,
                                rdflib.term.Literal(str(EI))
                            ])
                            g.add([
                                rdflib.term.URIRef(
                                    "http://dbpedia.org/resource/" + name +
                                    "?dbpv=2016-10&nif=sentence_" + str(BI) +
                                    "_" + str(EI)), nif.anchorOf,
                                rdflib.term.Literal(
                                    str(i.encode().decode('utf-8')))
                            ])
                            g.add([
                                rdflib.term.URIRef(
                                    "http://dbpedia.org/resource/" + name +
                                    "?dbpv=2016-10&nif=sentence_" + str(BI) +
                                    "_" + str(EI)), nif.referenceContext,
                                rdflib.term.URIRef(
                                    "http://dbpedia.org/resource/" + name +
                                    "?dbpv=2016-10&nif=context")
                            ])
                        except:
                            pass
            g.bind("nif", nif)
            g.serialize(destination='Files/Sentence/' + filename,
                        format="turtle")
            count = count + 1
    print("Your Output is stored in Sentence Folder")
コード例 #15
0
ファイル: core.py プロジェクト: oda-hub/literature-to-facts
def workflows_by_input(nthreads=1, input_types=None, max_inputs=None):
    logger.info("searching for input list...")

    collected_inputs = []

    t0 = time.time()

    for w in workflow_context:
        logger.debug(f"{Fore.BLUE} {w['name']} {Style.RESET_ALL}")
        logger.debug(f"   has " + " ".join([
            f"{k}:" + getattr(v, "__name__", "?")
            for k, v in w['signature'].items()
        ]))
        r = w['signature'].get('return', None)
        largs = typing.get_args(r)

        if len(largs) == 0:
            continue

        larg = largs[0]
        logger.debug(f"{Fore.GREEN} {larg} {Style.RESET_ALL}")

        if larg not in input_types:
            continue

        logger.info(
            f"{Fore.YELLOW} valid input generator for {Fore.MAGENTA} {larg.__name__} : {w['function']} {Style.RESET_ALL} {Style.RESET_ALL}"
        )

        for i, arg in enumerate(w['function']()):
            collected_inputs.append(dict(arg_type=larg, arg=arg))
            logger.debug(
                f"{Fore.BLUE} input: {Fore.MAGENTA} {str(arg):.100s} {Style.RESET_ALL} {Style.RESET_ALL}"
            )

        logger.info("collected %d arguments", len(collected_inputs))

        if max_inputs is not None and len(collected_inputs) > max_inputs:
            logger.warning(f"selecting only %s", max_inputs)
            break

    logger.info(f"inputs search done in in {time.time()-t0}")

    Ex = futures.ThreadPoolExecutor

    r = []

    with Ex(max_workers=nthreads) as ex:
        for c_id, d in ex.map(workflows_for_input, collected_inputs):
            logger.debug(f"{c_id} gives: {len(d)}")
            r.append(d)

    facts = []
    for d in r:
        for s in d:
            facts.append(s)

    logger.info("updating graph..")

    G = rdflib.Graph()
    G.bind('paper', rdflib.Namespace('http://odahub.io/ontology/paper#'))

    if False:
        D = 'INSERT DATA { ' + " .\n".join(facts) + '}'
        logger.debug(D)
        try:
            G.update(D)
        except Exception as e:
            logger.error(f"problem {e}  adding \"{s}\"")
            raise Exception()
    else:
        for fact in facts:
            D = f'INSERT DATA {{ {fact} }}'
            logger.info(D)
            try:
                G.update(D)
            except Exception as e:
                logger.error(f"problem {e}  adding \"{D}\"")
                raise Exception(f"problem {e}  adding \"{D}\"")

    r = G.serialize(format='n3')

    if isinstance(r, bytes):
        return r.decode()
    else:
        return r
コード例 #16
0
ファイル: simpleProperty.py プロジェクト: ssghost/PyOpenWorm
class RealSimpleProperty(
        with_metaclass(ContextMappedPropertyClass, DataUser,
                       Contextualizable)):
    multiple = False
    link = R.URIRef("property")
    linkName = "property"
    base_namespace = R.Namespace("http://openworm.org/entities/")

    def __init__(self, owner, **kwargs):
        super(RealSimpleProperty, self).__init__(**kwargs)
        self._v = []
        self.owner = owner
        self._hdf = None

    def contextualize_augment(self, context):
        self._hdf = None
        return contextualize_helper(context, self)

    def has_value(self):
        for x in self._v:
            if x.context == self.context:
                return True
        return False

    def has_defined_value(self):
        if self._hdf is not None:
            return self._hdf
        for x in self._v:
            if x.context == self.context and x.object.defined:
                self._hdf = True
                return True
        return False

    def set(self, v):
        if v is None:
            raise ValueError(
                'It is not permitted to declare a property to have value the None'
            )
        if not hasattr(v, 'idl'):
            v = PropertyValue(v)

        if not self.multiple:
            self.clear()

        stmt = self._insert_value(v)
        if self.context is not None:
            self.context.add_statement(stmt)
        return stmt

    def clear(self):
        """ Clears values set *in all contexts* """
        self._hdf = None
        for x in self._v:
            assert self in x.object.owner_properties
            x.object.owner_properties.remove(self)
            self._v.remove(x)

    @property
    def defined_values(self):
        return tuple(x.object for x in self._v
                     if x.object.defined and x.context == self.context)

    @property
    def values(self):
        return tuple(x.object for x in self._v if x.context == self.context)

    @property
    def rdf(self):
        if self.context is not None:
            return self.context.rdf_graph()
        else:
            return self.conf['rdf.graph']

    @property
    def identifier(self):
        return self.link

    def get(self):
        if self.rdf is None:
            return ()
        results = None
        owner = self.owner
        if owner.defined:
            self._ensure_fresh_po_cache()
            results = set()
            for pred, obj in owner.po_cache.cache:
                if pred == self.link:
                    results.add(obj)
        else:
            v = Variable("var" + str(id(self)))
            self._insert_value(v)
            results = GraphObjectQuerier(v, self.rdf, parallel=False)()
            self._remove_value(v)
        return results

    def _insert_value(self, v):
        stmt = Statement(self.owner, self, v, self.context)
        self._hdf = None
        self._v.append(stmt)
        if self not in v.owner_properties:
            v.owner_properties.append(self)
        return stmt

    def _remove_value(self, v):
        assert self in v.owner_properties
        self._hdf = None
        v.owner_properties.remove(self)
        self._v.remove(Statement(self.owner, self, v, self.context))

    def _ensure_fresh_po_cache(self):
        owner = self.owner
        ident = owner.identifier
        graph_index = self.conf.get('rdf.graph.change_counter', None)

        if graph_index is None or owner.po_cache is None or owner.po_cache.cache_index != graph_index:
            owner.po_cache = POCache(
                graph_index, frozenset(self.rdf.predicate_objects(ident)))

    def unset(self, v):
        self._remove_value(v)

    def __call__(self, *args, **kwargs):
        return _get_or_set(self, *args, **kwargs)

    def __repr__(self):
        fcn = FCN(type(self))
        return '{}(owner={})'.format(fcn, repr(self.owner))

    def one(self):
        return next(iter(self.get()), None)

    def onedef(self):
        for x in self._v:
            if x.object.defined and x.context == self.context:
                return x.object
        return None

    @classmethod
    def on_mapper_add_class(cls, mapper):
        cls.rdf_type = cls.base_namespace[cls.__name__]
        cls.rdf_namespace = R.Namespace(cls.rdf_type + "/")
        return cls

    @property
    def defined_statements(self):
        return tuple(x for x in self._v
                     if x.object.defined and x.subject.defined)

    @property
    def statements(self):
        return self.rdf.quads((self.owner.idl, self.link, None, None))
コード例 #17
0
ファイル: rdflib.py プロジェクト: Zeigar/percolation-1
class NS:
    test = r.Namespace(
        "http://purl.org/socialparticipation/test/")  # caixa mágica
    cm = r.Namespace("http://purl.org/socialparticipation/cm/")  # caixa mágica
    obs = r.Namespace("http://purl.org/socialparticipation/obs/"
                      )  # ontology of the social library
    aa = r.Namespace("http://purl.org/socialparticipation/aa/"
                     )  # algorithmic autoregulation
    vbs = r.Namespace("http://purl.org/socialparticipation/vbs/"
                      )  # vocabulary of the social library
    opa = r.Namespace(
        "http://purl.org/socialparticipation/opa/")  # participabr
    ops = r.Namespace("http://purl.org/socialparticipation/ops/"
                      )  # social participation ontology
    ocd = r.Namespace(
        "http://purl.org/socialparticipation/ocd/")  # cidade democrática
    ore = r.Namespace(
        "http://purl.org/socialparticipation/ore/"
    )  # ontology of the reseach, for registering ongoing works, a RDF AA
    ot = r.Namespace(
        "http://purl.org/socialparticipation/ot/"
    )  # ontology of the thesis, for academic conceptualizations
    po = r.Namespace("http://purl.org/socialparticipation/po/"
                     )  # the participation ontology, this framework itself
    per = r.Namespace("http://purl.org/socialparticipation/per/"
                      )  # percolation, this framework itself
    social = r.Namespace("http://purl.org/socialparticipation/social/"
                         )  # percolation, this framework itself
    participation = r.Namespace(
        "http://purl.org/socialparticipation/participation/"
    )  # percolation, this framework itself
    facebook = r.Namespace(
        "http://purl.org/socialparticipation/facebook/")  # facebook
    tw = r.Namespace("http://purl.org/socialparticipation/tw/")  # twitter
    irc = r.Namespace("http://purl.org/socialparticipation/irc/")  # irc
    gmane = r.Namespace("http://purl.org/socialparticipation/gmane/")  # gmane
    ld = r.Namespace("http://purl.org/socialparticipation/ld/")  # linkedin

    aavo = r.Namespace("http://purl.org/audiovisualanalytics/")
    aav = r.Namespace("http://purl.org/audiovisualanalytics/vocabulary/")
    aao = r.Namespace("http://purl.org/audiovisualanalytics/ontology/")
    voeiia = r.Namespace("http://purl.org/audiovisualanalytics/voeiia/")

    dbp = r.Namespace("http://dbpedia.org/resource/")
    rdf = r.namespace.RDF
    rdfs = r.namespace.RDFS
    owl = r.namespace.OWL
    xsd = r.namespace.XSD
    dc = r.namespace.DC
    dct = r.namespace.DCTERMS
    foaf = r.namespace.FOAF
    doap = r.namespace.DOAP
    void = r.namespace.VOID
    skos = r.namespace.SKOS
    U = r.URIRef
コード例 #18
0
ファイル: csv2rdf.py プロジェクト: jmmalaca/Thesis-Ontology
    def convert(self, csvreader):

        start = time.time()

        if self.OUT:
            sys.stderr.write("Output to %s\n" % self.OUT.name)

        if self.IDENT != "auto" and not isinstance(self.IDENT, tuple):
            self.IDENT = (self.IDENT,)

        if not self.BASE:
            warnings.warn("No base given, using http://example.org/instances/")
            self.BASE = rdflib.Namespace("http://example.org/instances/")

        if not self.PROPBASE:
            warnings.warn(
                "No property base given, using http://example.org/property/")
            self.PROPBASE = rdflib.Namespace("http://example.org/props/")

        # skip lines at the start
        for x in range(self.SKIP):
            next(csvreader)

        # read header line
        header_labels = list(next(csvreader))
        headers = dict(
            enumerate([self.PROPBASE[toProperty(x)] for x in header_labels]))
        # override header properties if some are given
        for k, v in self.PROPS.items():
            headers[k] = v
            header_labels[k] = split_uri[1]

        if self.DEFINECLASS:
            # output class/property definitions
            self.triple(self.CLASS, RDF.type, RDFS.Class)
            for i in range(len(headers)):
                h, l = headers[i], header_labels[i]
                if h == "" or l == "":
                    continue
                if self.COLUMNS.get(i) == "ignore":
                    continue
                self.triple(h, RDF.type, RDF.Property)
                self.triple(h, RDFS.label, rdflib.Literal(toPropertyLabel(l)))
                self.triple(h, RDFS.domain, self.CLASS)
                self.triple(h, RDFS.range,
                            self.COLUMNS.get(i, default_node_make).range())

        rows = 0
        for l in csvreader:
            try:
                if self.IDENT == 'auto':
                    uri = self.BASE["%d" % rows]
                else:
                    uri = self.BASE["_".join([urllib.parse.quote(x.encode(
                        "utf8").replace(" ", "_"), safe="")
                        for x in index(l, self.IDENT)])]

                if self.LABEL:
                    self.triple(uri, RDFS.label, rdflib.Literal(
                        " ".join(index(l, self.LABEL))))

                if self.CLASS:
                    # type triple
                    self.triple(uri, RDF.type, self.CLASS)

                for i, x in enumerate(l):
                    x = x.strip()
                    if x != '':
                        if self.COLUMNS.get(i) == "ignore":
                            continue
                        try:
                            o = self.COLUMNS.get(i, rdflib.Literal)(x)
                            if isinstance(o, list):
                                for _o in o:
                                    self.triple(uri, headers[i], _o)
                            else:
                                self.triple(uri, headers[i], o)

                        except Exception as e:
                            warnings.warn(
                                "Could not process value for column " +
                                "%d:%s in row %d, ignoring: %s " % (
                                i, headers[i], rows, e.message))

                rows += 1
                if rows % 100000 == 0:
                    sys.stderr.write(
                        "%d rows, %d triples, elapsed %.2fs.\n" % (
                        rows, self.triples, time.time() - start))
            except:
                sys.stderr.write("Error processing line: %d\n" % rows)
                raise

        # output types/labels for generated URIs
        classes = set()
        for l, x in uris.items():
            u, c = x
            self.triple(u, RDFS.label, rdflib.Literal(l))
            if c:
                c = rdflib.URIRef(c)
                classes.add(c)
                self.triple(u, RDF.type, c)

        for c in classes:
            self.triple(c, RDF.type, RDFS.Class)

        self.OUT.close()
        sys.stderr.write(
            "Converted %d rows into %d triples.\n" % (rows, self.triples))
        sys.stderr.write("Took %.2f seconds.\n" % (time.time() - start))
コード例 #19
0
                    newGraphSize = len(rdfGraph)
                    logging.info(f'\t{newGraphSize - graphSize} statements read from {srcFile.name}')
                    graphSize = newGraphSize
            case _:
                raise NotImplementedError(f'source {src} not yet implemented!')
        logging.info(f'data graph now has {len(rdfGraph)} statements')

        #   check data
        if config.getboolean('WORK', 'checkData'):
            for (subj, pred, obj) in rdfGraph:
                print(subj, pred, obj)

        #   set bindings
        logging.debug('Namespaces\n\t{:s}'.format('\n\t'.join(map(str, rdfGraph.namespaces()))))
        nsDict = dict(rdfGraph.namespaces())
        mo = rdflib.Namespace(nsDict['mo'])

        #   read data
        if config.getboolean('WORK', 'checkData'):
            for (subj, pred, obj) in rdfGraph:
                print(subj, pred, obj)
            # uriRef = rdflib.term.URIRef(value="https://www.mathematik-olympiaden.de/aufgaben/rdf/Problem#MO-511341")
            # print(uriRef)
            # for (pred, obj) in rdfGraph.predicate_objects(subject=uriRef):
            #     print(rdfGraph.label(pred), obj)

        #  query data: MO
        if config.getboolean('WORK', 'queryData'):
            queryStr = 'SELECT $nr $pdfA $oly $rnd $okl \
                        WHERE { \
                            $problem a mo:Problem . \
コード例 #20
0
ファイル: csv2rdf.py プロジェクト: jmmalaca/Thesis-Ontology
def main():
    csv2rdf = CSV2RDF()

    opts, files = getopt.getopt(
        sys.argv[1:],
        "hc:b:p:i:o:Cf:l:s:d:",
        ["out=", "base=", "delim=", "propbase=", "class=",
         "ident=", "label=", "skip=", "defineclass", "help"])
    opts = dict(opts)

    if "-h" in opts or "--help" in opts:
        print(HELP)
        sys.exit(-1)

    if "-f" in opts:
        config = configparser.ConfigParser()
        config.readfp(open(opts["-f"]))
        for k, v in config.items("csv2rdf"):
            if k == "out":
                csv2rdf.OUT = codecs.open(v, "w", "utf-8")
            elif k == "base":
                csv2rdf.BASE = rdflib.Namespace(v)
            elif k == "propbase":
                csv2rdf.PROPBASE = rdflib.Namespace(v)
            elif k == "class":
                csv2rdf.CLASS = rdflib.URIRef(v)
            elif k == "defineclass":
                csv2rdf.DEFINECLASS = bool(v)
            elif k == "ident":
                csv2rdf.IDENT = eval(v)
            elif k == "label":
                csv2rdf.LABEL = eval(v)
            elif k == "delim":
                csv2rdf.DELIM = v
            elif k == "skip":
                csv2rdf.SKIP = int(v)
            elif k.startswith("col"):
                csv2rdf.COLUMNS[int(k[3:])] = column(v)
            elif k.startswith("prop"):
                csv2rdf.PROPS[int(k[4:])] = rdflib.URIRef(v)

    if "-o" in opts:
        csv2rdf.OUT = codecs.open(opts["-o"], "w", "utf-8")
    if "--out" in opts:
        csv2rdf.OUT = codecs.open(opts["--out"], "w", "utf-8")

    if "-b" in opts:
        csv2rdf.BASE = rdflib.Namespace(opts["-b"])
    if "--base" in opts:
        csv2rdf.BASE = rdflib.Namespace(opts["--base"])

    if "-d" in opts:
        csv2rdf.DELIM = opts["-d"]
    if "--delim" in opts:
        csv2rdf.DELIM = opts["--delim"]

    if "-p" in opts:
        csv2rdf.PROPBASE = rdflib.Namespace(opts["-p"])
    if "--propbase" in opts:
        csv2rdf.PROPBASE = rdflib.Namespace(opts["--propbase"])

    if "-l" in opts:
        csv2rdf.LABEL = eval(opts["-l"])
    if "--label" in opts:
        csv2rdf.LABEL = eval(opts["--label"])

    if "-i" in opts:
        csv2rdf.IDENT = eval(opts["-i"])
    if "--ident" in opts:
        csv2rdf.IDENT = eval(opts["--ident"])

    if "-s" in opts:
        csv2rdf.SKIP = int(opts["-s"])
    if "--skip" in opts:
        csv2rdf.SKIP = int(opts["--skip"])

    if "-c" in opts:
        csv2rdf.CLASS = rdflib.URIRef(opts["-c"])
    if "--class" in opts:
        csv2rdf.CLASS = rdflib.URIRef(opts["--class"])

    for k, v in opts.items():
        if k.startswith("--col"):
            csv2rdf.COLUMNS[int(k[5:])] = column(v)
        elif k.startswith("--prop"):
            csv2rdf.PROPS[int(k[6:])] = rdflib.URIRef(v)

    if csv2rdf.CLASS and ("-C" in opts or "--defineclass" in opts):
        csv2rdf.DEFINECLASS = True

    csv2rdf.convert(
        csv_reader(fileinput.input(files), delimiter=csv2rdf.DELIM))
コード例 #21
0
from versa import VERSA_BASEIRI
from versa.reader.md import from_markdown

#Represent the blank node RDF-ism
class mock_bnode(I):
    '''
    Must be initialized with a string. Just use ''
    '''
    def __new__(cls, _):
        new_id = '__VERSABLANKNODE__' + str(hash(datetime.datetime.now().isoformat())) #str(id(object())) #Fake an ID
        self = super(mock_bnode, cls).__new__(cls, new_id)
        return self


VERSA_TYPE = I(VERSA_BASEIRI + 'type')
VNS = rdflib.Namespace(VERSA_BASEIRI)

RDF_NS = 'http://www.w3.org/1999/02/22-rdf-syntax-ns#'
RDFS_NS = 'http://www.w3.org/2000/01/rdf-schema#'

RDF_TYPE = I(RDF_NS + 'type')


def prep(s, p, o):
    '''
    Prepare a triple for rdflib
    '''
    def bnode_check(r):
        return isinstance(r, mock_bnode) or r.startswith('VERSABLANKNODE_')

    s = BNode() if bnode_check(s) else URIRef(s)
コード例 #22
0
"""Grab Collections Graphs from Fedora, Iterate, and load into local Fuseki."""
import rdflib
from rdflib.namespace import *
from argparse import ArgumentParser
import sys

fcrepo_base = 'http://hydraedit-dev.library.cornell.edu:8080/fedora/rest/dev/'
EDM = rdflib.Namespace('http://www.europeana.eu/schemas/edm/')
PCDM = rdflib.Namespace('http://pcdm.org/models#')
FEDORA = rdflib.Namespace('http://fedora.info/definitions/v4/repository#')
ONS = rdflib.Namespace('http://opaquenamespace.org/hydra/')
VIVO = rdflib.Namespace('http://vivoweb.org/ontology/core#')
HW = rdflib.Namespace('http://projecthydra.org/works/models#')
MARCREL = rdflib.Namespace('http://id.loc.gov/vocabulary/relators/')
FCONFIG = rdflib.Namespace('http://fedora.info/definitions/v4/config#')
LDP = rdflib.Namespace('http://www.w3.org/ns/ldp#')


def grabCollContainers(cont, fcrepo):
    """Query fedora for all resources of the selected collection."""
    print("Generating graph.")
    cont_g = fcrepo.parse(cont)
    s = 0
    print("Parsing graph objects.")
    for obj in cont_g.objects(cont, LDP.contains):
        # Parse recursively on that graph through Works, Parts, Filesets.
        cont_g.parse(rdflib.term.URIRef(obj))
        if (s % 100) == 0 and s != 0:
            print("%d URIs processed" % s)
        s += 1
    return (cont_g)
コード例 #23
0
from pygments.formatters import HtmlFormatter
# temp log library for debugging
# from log import *
# log = Log("log/docgen")
# log.test_name("Debugging Document Generator")

spec_url = None
spec_ns = None
spec_pre = None
lang = None
bibo_nodes = None
o_graph = None
deprecated_uris = None

# Important nspaces
RDF = rdflib.Namespace("http://www.w3.org/1999/02/22-rdf-syntax-ns#")
RDFS = rdflib.Namespace("http://www.w3.org/2000/01/rdf-schema#")
SKOS = rdflib.Namespace("http://www.w3.org/2004/02/skos/core#")
OWL = rdflib.Namespace("http://www.w3.org/2002/07/owl#")
FOAF = rdflib.Namespace("http://xmlns.com/foaf/0.1/")
VANN = rdflib.Namespace("http://purl.org/vocab/vann/")
PROV = rdflib.Namespace("http://www.w3.org/ns/prov#")
DCTERMS = rdflib.Namespace("http://purl.org/dc/terms/")

trans_dict = {
    "specification": ["Specification", "Spécifications"],
    "draft": ["Working Draft", "Brouillon de travail"],
    "previous_ver": ["Previous Version", "Ancienne version"],
    "current_ver": ["This Version", "Version courante"],
    "latest_ver": ["Latest Version", "Version à jour"],
    "last_ver": ["Last update", "Dernière version"],
コード例 #24
0
ファイル: constants.py プロジェクト: Benedict-Carling/pySBOL2
# A BioPax term and possible value for ComponentDefinition::type property
BIOPAX_COMPLEX = "http://www.biopax.org/release/biopax-level3.owl#Complex"

# EDAM ontology is used for Model.languages
# An EDAM ontology term and option for Model::language
EDAM_SBML = "http://identifiers.org/edam/format_2585"
# An EDAM ontology term and option for Model::language
EDAM_CELLML = "http://identifiers.org/edam/format_3240"
# An EDAM ontology term and option for Model::language
EDAM_BIOPAX = "http://identifiers.org/edam/format_3156"

# Model.frameworks
# SBO term and option for Model::framework
SBO_CONTINUOUS = SBO + "0000062"
# SBO term and option for Model::framework
SBO_DISCRETE = SBO + "0000063"

# URIs for SBOL extension objects
SYSBIO_DESIGN = SYSBIO_URI + "#Design"
SYSBIO_BUILD = SYSBIO_URI + "#Build"
SYSBIO_TEST = SYSBIO_URI + "#Test"
SYSBIO_ANALYSIS = SYSBIO_URI + "#Analysis"
SYSBIO_SAMPLE_ROSTER = SYSBIO_URI + "#SampleRoster"

IGEM_URI = "http://wiki.synbiohub.org/wiki/Terms/igem"

OM_NS = rdflib.Namespace(OM_URI)
OM_MEASURE: str = OM_NS.Measure.toPython()
OM_HAS_UNIT: str = OM_NS.hasUnit.toPython()
OM_HAS_NUMERICAL_VALUE: str = OM_NS.hasNumericalValue.toPython()
コード例 #25
0
ファイル: main.py プロジェクト: jerry-lei/graphene
    def configure_database(self):
        """
        Database configuration should be set here
        """
        self.NS = NamespaceContainer()
        self.NS.RDFS = rdflib.RDFS
        self.NS.RDF = rdflib.RDF
        self.NS.rdfs = rdflib.Namespace(rdflib.RDFS)
        self.NS.rdf = rdflib.Namespace(rdflib.RDF)
        self.NS.owl = rdflib.OWL
        self.NS.xsd   = rdflib.Namespace("http://www.w3.org/2001/XMLSchema#")
        self.NS.dc    = rdflib.Namespace("http://purl.org/dc/terms/")
        self.NS.dcelements    = rdflib.Namespace("http://purl.org/dc/elements/1.1/")
        self.NS.auth  = rdflib.Namespace("http://vocab.rpi.edu/auth/")
        self.NS.foaf  = rdflib.Namespace("http://xmlns.com/foaf/0.1/")
        self.NS.prov  = rdflib.Namespace("http://www.w3.org/ns/prov#")
        self.NS.skos = rdflib.Namespace("http://www.w3.org/2004/02/skos/core#")
        self.NS.cmo = rdflib.Namespace("http://purl.org/twc/ontologies/cmo.owl#")
        self.NS.sio = rdflib.Namespace("http://semanticscience.org/resource/")
        self.NS.sioc_types = rdflib.Namespace("http://rdfs.org/sioc/types#")
        self.NS.sioc = rdflib.Namespace("http://rdfs.org/sioc/ns#")
        self.NS.np = rdflib.Namespace("http://www.nanopub.org/nschema#")
        self.NS.graphene = rdflib.Namespace("http://vocab.rpi.edu/graphene/")
        self.NS.local = rdflib.Namespace(self.config['lod_prefix']+'/')
        self.NS.ov = rdflib.Namespace("http://open.vocab.org/terms/")


        self.db = database.engine_from_config(self.config, "db_")
        load_namespaces(self.db,locals())
        Resource.db = self.db

        self.vocab = Graph()
        #print URIRef(self.config['vocab_file'])
        self.vocab.load(open(self.config['vocab_file']), format="turtle")

        self.role_api = ld.LocalResource(self.NS.prov.Role,"role", self.db.store, self.vocab, self.config['lod_prefix'], RoleMixin)
        self.Role = self.role_api.alchemy

        self.user_api = ld.LocalResource(self.NS.prov.Agent,"user", self.db.store, self.vocab, self.config['lod_prefix'], UserMixin)
        self.User = self.user_api.alchemy

        self.nanopub_api = ld.LocalResource(self.NS.np.Nanopublication,"pub", self.db.store, self.vocab, self.config['lod_prefix'], name="Graph")
        self.Nanopub = self.nanopub_api.alchemy

        self.classes = mapper(self.Role, self.User)
        self.datastore = RDFAlchemyUserDatastore(self.db, self.classes, self.User, self.Role)
        self.security = Security(self, self.datastore,
                                 register_form=ExtendedRegisterForm)
コード例 #26
0
from csvw_validation_test_cases import implementation_report

__author__ = 'sebastian'
import rdflib
from rdflib.namespace import FOAF
EARL = rdflib.Namespace("http://www.w3.org/ns/earl#")


class ImplementationReport():
    def __init__(self):
        self.g = rdflib.Graph()
        self.g.parse(location='test/doap.ttl', format='turtle')
        for person in self.g.subjects(rdflib.RDF.type, FOAF.Person):
            self.assertor = person
            break
        for subj in self.g.subjects(rdflib.RDF.type, EARL.TestSubject):
            self.subject = subj
            break

    def run_validation_test(self):
        implementation_report(self.g, self.subject, self.assertor)

    def getResult(self):
        return self.g.serialize(format='turtle')


if __name__ == '__main__':
    rep = ImplementationReport()
    rep.run_validation_test()
    res = rep.getResult()
    print res
コード例 #27
0
class ShapeParserTests(unittest.TestCase):

    w3c_test_files = 'tests/_files/w3c'
    ex = rdflib.Namespace('http://www.example.org/')

    def setUp(self):
        self.parser = ShapeParser()
        self.dir = path.abspath('tests/_files')

    def tearDown(self):
        self.parser = None
        self.dir = None

    def testParsingW3CFiles(self):
        """Test if all W3C samples are parsed without throwing an error."""
        for root, dirs, files in os.walk(self.w3c_test_files):
            for f in files:
                shapes = ShapeParser().parseShape(
                    path.join(self.w3c_test_files, f))
                self.assertTrue(isinstance(shapes, dict))

    def testAddressShape(self):
        """Test Adress Shape example."""
        nodeShapesDict = ShapeParser().parseShape(
            path.join(self.w3c_test_files, 'AddressShape.ttl'))
        addressShape = nodeShapesDict[str(self.ex.AddressShape)]
        personShape = nodeShapesDict[str(self.ex.PersonShape)]
        postalCodeShape = addressShape.properties[0]
        addressPropertyShape = personShape.properties[0]

        self.assertEqual(len(nodeShapesDict), 2)
        self.assertEqual(len(addressShape.properties), 1)
        self.assertEqual(len(personShape.properties), 1)

        self.assertEqual(addressShape.uri, str(self.ex.AddressShape))

        self.assertEqual(personShape.uri, str(self.ex.PersonShape))

        self.assertEqual(postalCodeShape.path, str(self.ex.postalCode))
        self.assertEqual(postalCodeShape.datatype, str(XSD.string))
        self.assertEqual(postalCodeShape.maxCount, 1)

        self.assertEqual(addressPropertyShape.path, str(self.ex.address))
        self.assertEqual(addressPropertyShape.minCount, 1)
        self.assertEqual(addressPropertyShape.nodes[0],
                         str(self.ex.AddressShape))

    def testClassExampleShape(self):
        """Test Class example."""
        nodeShapesDict = ShapeParser().parseShape(
            path.join(self.w3c_test_files, 'ClassExampleShape.ttl'))
        nodeShape = nodeShapesDict[str(self.ex.ClassExampleShape)]
        propertyShape = nodeShape.properties[0]

        self.assertEqual(nodeShape.uri, str(self.ex.ClassExampleShape))
        self.assertTrue(str(self.ex.Alice) in nodeShape.targetNode)
        self.assertTrue(str(self.ex.Bob) in nodeShape.targetNode)
        self.assertTrue(str(self.ex.Carol) in nodeShape.targetNode)
        self.assertTrue(len(nodeShape.targetNode), 3)
        self.assertTrue(len(nodeShapesDict), 1)

        self.assertEqual(propertyShape.path, str(self.ex.address))
        self.assertEqual(propertyShape.classes[0], str(self.ex.PostalAddress))
        self.assertTrue(len(propertyShape.classes), 1)

    def testHand(self):
        """Test qualifiedValueShapes of Hand example."""
        nodeShapesDict = ShapeParser().parseShape(
            path.join(self.w3c_test_files, 'HandShape.ttl'))
        self.assertEqual(len(nodeShapesDict), 1)
        for id, nodeShape in nodeShapesDict.items():
            values = {
                'http://www.example.org/Finger': 4,
                'http://www.example.org/Thumb': 1
            }

            for propertyShape in nodeShape.properties:
                if propertyShape.isSet['qualifiedValueShape']:
                    # One thumb and four fingers
                    self.assertTrue(
                        propertyShape.isSet['qualifiedValueShapesDisjoint'])
                    self.assertEqual(propertyShape.path, str(self.ex.digit))
                    self.assertEqual(
                        values[propertyShape.qualifiedValueShape.classes[0]],
                        propertyShape.qualifiedMinCount)
                    self.assertEqual(
                        values[propertyShape.qualifiedValueShape.classes[0]],
                        propertyShape.qualifiedMaxCount)
                else:
                    # Hand
                    self.assertEqual(propertyShape.path, str(self.ex.digit))
                    self.assertFalse(
                        propertyShape.isSet['qualifiedValueShapesDisjoint'])
                    self.assertEqual(propertyShape.maxCount, 5)

    def testMinMaxLogic(self):
        # not part of the shacl constraints, can be reimplemented though
        # with self.assertRaises(Exception):
        #     ShapeParser().parseShape(path.join(self.dir, 'minGreaterMax.ttl'))

        # with self.assertRaises(Exception):
        #     ShapeParser().parseShape(path.join(self.dir, 'maxLowerMin.ttl'))

        # with self.assertRaises(Exception):
        #     ShapeParser().parseShape(path.join(self.dir, 'multipleMinCounts.ttl'))

        # with self.assertRaises(Exception):
        #     ShapeParser().parseShape(path.join(self.dir, 'multipleMaxCounts.ttl'))

        shapes = ShapeParser().parseShape(
            path.join(self.dir, 'minLowerMax.ttl'))
        self.assertEqual(
            shapes['http://www.example.org/ExampleShape'].properties[0].
            minCount, 1)
        self.assertEqual(
            shapes['http://www.example.org/ExampleShape'].properties[0].
            maxCount, 2)

    def testPositiveNodeShapeParse(self):
        nodeShapes = self.parser.parseShape(
            self.dir + '/positiveNodeShapeParserExample1.ttl')
        nodeShape = nodeShapes[('http://www.example.org/exampleShape')]
        self.assertEqual(str(nodeShape.uri),
                         'http://www.example.org/exampleShape')

        targetClasses = [
            'http://www.example.org/Animal', 'http://www.example.org/Person'
        ]
        self.assertEqual(sorted(nodeShape.targetClass), targetClasses)

        targetNodes = [
            'http://www.example.org/Alice', 'http://www.example.org/Bob'
        ]
        self.assertEqual(sorted(nodeShape.targetNode), targetNodes)

        relationships = [
            'http://www.example.org/friendship',
            'http://www.example.org/relationship'
        ]
        self.assertEqual(sorted(nodeShape.targetObjectsOf), relationships)
        self.assertEqual(sorted(nodeShape.targetSubjectsOf), relationships)
        self.assertEqual(str(nodeShape.nodeKind),
                         'http://www.w3.org/ns/shacl#IRI')
        self.assertEqual(nodeShape.closed, True)

        ignoredProperties = [
            'http://www.example.org/A', 'http://www.example.org/B',
            'http://www.example.org/C'
        ]
        self.assertEqual(sorted(nodeShape.ignoredProperties),
                         ignoredProperties)
        self.assertEqual(str(nodeShape.message['default']), "C")
        self.assertEqual(str(nodeShape.message['en']), "A")
        self.assertEqual(str(nodeShape.message['de']), "B")
        self.assertEqual(len(nodeShape.properties), 2)

    def testPositiveNodeShapePropertiesParse(self):
        nodeShapes = self.parser.parseShape(
            self.dir + '/positivePropertyShapeParserExample.ttl')
        nodeShape = nodeShapes['http://www.example.org/exampleShape']

        for shape in nodeShape.properties:
            if str(shape.path) == 'http://www.example.org/PathA':
                propertyShapeA = shape
            else:
                propertyShapeB = shape
        classes = ['http://www.example.org/A', 'http://www.example.org/B']
        self.assertEqual(sorted(propertyShapeA.classes), classes)
        self.assertEqual(propertyShapeA.datatype,
                         'http://www.w3.org/2001/XMLSchema#integer')
        self.assertEqual(int(propertyShapeA.minCount), 1)
        self.assertEqual(int(propertyShapeA.maxCount), 2)
        self.assertEqual(int(propertyShapeA.minExclusive), 1)
        self.assertEqual(int(propertyShapeA.maxExclusive), 1)
        self.assertEqual(int(propertyShapeA.minInclusive), 1)
        self.assertEqual(int(propertyShapeA.maxInclusive), 1)
        self.assertEqual(int(propertyShapeA.minLength), 1)
        self.assertEqual(int(propertyShapeA.maxLength), 2)
        self.assertEqual(str(propertyShapeA.pattern), '[abc]')
        self.assertEqual(str(propertyShapeA.flags), 'i')

        languageIn = ['de', 'en']
        self.assertEqual(sorted(propertyShapeA.languageIn), languageIn)
        self.assertEqual(propertyShapeA.uniqueLang, True)

        equals = [
            'http://www.example.org/PathB', 'http://www.example.org/PathC'
        ]
        self.assertEqual(sorted(propertyShapeA.equals), equals)

        disjoint = [
            'http://www.example.org/PathB', 'http://www.example.org/PathC'
        ]
        self.assertEqual(sorted(propertyShapeA.disjoint), disjoint)

        lessThan = ['http://www.example.org/A', 'http://www.example.org/B']
        self.assertEqual(sorted(propertyShapeA.lessThan), lessThan)

        lessThanOrEquals = [
            'http://www.example.org/A', 'http://www.example.org/B'
        ]
        self.assertEqual(sorted(propertyShapeA.lessThanOrEquals),
                         lessThanOrEquals)

        nodes = [
            'http://www.example.org/propertyShapeA',
            'http://www.example.org/propertyShapeB'
        ]
        self.assertEqual(sorted(propertyShapeA.nodes), nodes)

        qualifiedValueShape = [
            'http://www.example.org/friendship',
            'http://www.example.org/relationship'
        ]
        self.assertEqual(str(propertyShapeA.qualifiedValueShape.path),
                         'http://www.example.org/PathC')
        self.assertEqual(propertyShapeA.qualifiedValueShapesDisjoint, True)
        self.assertEqual(int(propertyShapeA.qualifiedMinCount), 1)
        self.assertEqual(int(propertyShapeA.qualifiedMaxCount), 2)
        self.assertEqual(str(propertyShapeA.name['default']), "C")
        self.assertEqual(str(propertyShapeA.name['en']), "A")
        self.assertEqual(str(propertyShapeA.name['de']), "B")
        self.assertEqual(str(propertyShapeA.description['default']), "C")
        self.assertEqual(str(propertyShapeA.description['en']), "A")
        self.assertEqual(str(propertyShapeA.description['de']), "B")
        self.assertEqual(str(propertyShapeB.path),
                         'http://www.example.org/PathB')

    def testUnusedPropertyShapeParse(self):
        wellFormedShapes = self.parser.parseShape(
            self.dir + '/w3c/ExampleNodeShapeWithPropertyShapes.ttl')
        self.assertEqual(len(wellFormedShapes), 1)

    def testUsedPropertyShapeParse(self):
        wellFormedShapes = self.parser.parseShape(
            self.dir + '/positiveUsedPropertyShape.ttl')
        self.assertEqual(
            len(wellFormedShapes[str(
                self.ex.PositiveUsedPropertyShape)].properties), 2)
コード例 #28
0
* system content
* registers that are just containers for other registers
'''

import rdflib
from rdflib.namespace import RDF, RDFS
import uuid
import logging

from ckan.common import OrderedDict
from ckanext.harvest.harvesters.base import HarvesterBase
import ckan.plugins as p

from ckanext.dgu.lib.formats import Formats

REG = rdflib.Namespace('http://purl.org/linked-data/registry#')
SKOS = rdflib.Namespace('http://www.w3.org/2004/02/skos/core#')
OWL = rdflib.Namespace('http://www.w3.org/2002/07/owl#')
LDP = rdflib.Namespace('http://www.w3.org/ns/ldp#')
DCT = rdflib.Namespace('http://purl.org/dc/terms/')

# DGU_type
DGU_TYPE__CODE_LIST = 'Code list'
DGU_TYPE__ONTOLOGY = 'Ontology'
DGU_TYPE__CONTROLLED_LIST = 'Controlled list'

TTL_PARAM = '?_format=ttl'
METADATA_PARAM = '&_view=with_metadata'


def printable_uri(uri):
コード例 #29
0
import rdflib
import os
import flask_ld as ld
import collections
import requests

import tempfile

from depot.io.utils import FileIntent
from depot.manager import DepotManager

from datetime import datetime
import pytz

np = rdflib.Namespace("http://www.nanopub.org/nschema#")
prov = rdflib.Namespace("http://www.w3.org/ns/prov#")
dc = rdflib.Namespace("http://purl.org/dc/terms/")
frbr = rdflib.Namespace("http://purl.org/vocab/frbr/core#")
from uuid import uuid4


class Nanopublication(rdflib.ConjunctiveGraph):

    _nanopub_resource = None

    @property
    def nanopub_resource(self):
        if self._nanopub_resource is None:
            self._nanopub_resource = self.resource(self.identifier)
            if not self._nanopub_resource[rdflib.RDF.type:np.Nanopublication]:
                self._nanopub_resource.add(rdflib.RDF.type, np.Nanopublication)
コード例 #30
0
ファイル: __init__.py プロジェクト: cosmicraga/mollyproject
import rdflib
import os.path

__all__ = ['licenses']

FOAF = rdflib.Namespace('http://xmlns.com/foaf/0.1/')
DCTERMS = rdflib.Namespace('http://purl.org/dc/terms/')
DCTYPE = rdflib.Namespace('http://purl.org/dc/dcmitype/')
RDF = rdflib.Namespace('http://www.w3.org/1999/02/22-rdf-syntax-ns#')


class _License(object):
    def __init__(self, graph, uri):
        self._graph, self._uri = graph, uri

    def get(self, p):
        for o in self._graph.objects(self._uri, p):
            return unicode(o)
        return None

    def __unicode__(self):
        return self.get(DCTERMS['title'])

    @property
    def logo(self):
        return self.get(FOAF['logo'])

    def simplify_for_render(self, simplify_value, simplify_model):
        simplified = {}
        for predicate, object in self._graph.predicate_objects(self._uri):
            # Disregard type