コード例 #1
0
def resource(request, ref=None):
    saved = False # set saved to false until post is successful
    uri = Namespace("http://data.library.oregonstate.edu/person/")[ref]
    data = ''
    # deal with updates if this is a form submit
    if request.method == 'POST':
        # get data to delete
        resource = queryManager.describe(uri)
        try:
            resource = resource
        except:
            resource = {}
        
        # get form data and put it in a dict
        resForm = ResourceForm(request.POST)
        VariantFormSet = formset_factory(VariantForm)
        varForm = VariantFormSet(request.POST)
        if resForm.is_valid() and varForm.is_valid():
            data = {
                'res': resForm.cleaned_data,
                'var': varForm.cleaned_data
                }
            saved = processForm(uri, data, resource)

    # if the uri exists, try to describe it and build an edit form 
    # retrieving data again if we just processed a form

    resource = queryManager.describe(uri)
    if resource:
        forms = buildForm(resource)
    else:
        #TODO: return a useful error
        return HttpResponse("resource not found", status=404) 

    return render_to_response("resource.tpl", {'uri': str(uri), 'short': uri.split('/')[-1], 'res': resource, 'form': forms['form'], 'variants': forms['variants'], 'saved': saved, 'data': resource})
コード例 #2
0
    def create_ontology(self,tr,predicate,subClass,address,booktitle):
        LDT= Namespace("http://www.JceFinalProjectOntology.com/")
        ut=Namespace("http://www.JceFinalProjectOntology.com/subject/#")
        usubClass=URIRef("http://www.JceFinalProjectOntology.com/subject/"+subClass.strip()+'#')
        #LDT.subClass=LDT[subClass]
        print(ut)
        print(usubClass)

        store=IOMemory()

        sty=LDT[predicate]
        g = rdflib.Graph(store=store,identifier=LDT)
        t = ConjunctiveGraph(store=store,identifier=ut)
        print ('Triples in graph before add: ', len(t))
        #g.add((LDT,RDF.type,RDFS.Class))
        g.add((URIRef(LDT),RDF.type,RDFS.Class))
        g.add((URIRef(LDT),RDFS.label,Literal("JFPO")))
        g.add((URIRef(LDT),RDFS.comment,Literal('class of all properties')))
        for  v in self.symbols.values():
            if self.if_compoTerm(v)==True:
                vs=self.splitTerms(v)[0]
            else:
                vs =v
            g.add((LDT[vs],RDF.type,RDF.Property))
            g.add((LDT[vs],RDFS.label,Literal('has'+vs)))
            g.add((LDT[vs],RDFS.comment,Literal(v)))
            g.add((LDT[vs],RDFS.range,OWL.Class))
            g.add((LDT[vs],RDFS.domain,Literal(vs)))
        g.bind('JFPO',LDT)
        #g.commit()
        g.serialize('trtst.rdf',format='turtle')

        t.add( (ut[tr], RDF.type,OWL.Class) )
        t.add((ut[tr],RDFS.subClassOf,OWL.Thing))
        t.add((ut[tr],RDFS.label,Literal(tr)))
        t.add((ut[tr],DC.title,Literal(booktitle)))
        t.add((ut[tr],DC.source,Literal(address)))

        t.add((ut[tr],DC[predicate],URIRef(usubClass)))
        t.add((ut[tr],LDT[predicate],RDF.Property))

        t.add((ut[tr],DC[predicate],URIRef(usubClass)))

        t.add((ut[tr],DC[predicate],URIRef(usubClass)))
        relation='has'+predicate
        t.add((ut[tr],LDT.term(predicate),URIRef(usubClass)))

        t.add( (usubClass,RDF.type,OWL.Class))
        t.add((usubClass,RDFS.subClassOf,OWL.Thing))
        t.add((usubClass,RDFS.subClassOf,URIRef(sty)))
        t.add((usubClass,RDFS.label,Literal(subClass)))

        #tc=Graph(store=store,identifier=usubClass)
        t.bind("dc", "http://http://purl.org/dc/elements/1.1/")
        t.bind('JFPO',LDT)
        t.commit()
                #print(t.serialize(format='pretty-xml'))

        t.serialize('test2.owl',format='turtle')
コード例 #3
0
def new(request, ref=None):
    uriNeeded = True
    while uriNeeded == True:
        # assign an identifier
        identifier = pynoid.mint('zeek', random.randint(0, 100000))
        uri = Namespace("http://data.library.oregonstate.edu/person/")[identifier]
        uriNeeded = queryManager.ask(uri) # if this identifier is already in use, let's get another one
    forms = buildForm()
       
    return render_to_response("resource.tpl", {'uri': str(uri), 'short': uri.split('/')[-1], 'res': {}, 'form': forms['form'], 'variants': forms['variants']})
コード例 #4
0
ファイル: TopbraidDiff.py プロジェクト: AKSW/QuitDiff
    def serialize(self, add, delete):
        diff = Namespace("http://topbraid.org/diff#")

        g = ConjunctiveGraph()

        namespace_manager = NamespaceManager(g)
        namespace_manager.bind('diff', diff, override=False)
        namespace_manager.bind('owl', OWL, override=False)

        graphUris = set(delete.keys()) | set(add.keys())

        for graphUri in graphUris:
            if (graphUri in delete.keys() and len(delete[graphUri]) > 0) or (graphUri in add.keys() and len(add[graphUri]) > 0):
                changeset = Namespace("urn:diff:" + str(uuid.uuid1()))
                graphTerm = changeset.term("")
                if str(graphUri) != 'http://quitdiff.default/':
                    g.add((graphTerm, OWL.imports, graphUri, graphTerm))
                g.add((graphTerm, RDF.type, OWL.Ontology, graphTerm))
                g.add((graphTerm, OWL.imports, diff.term(""), graphTerm))
                if graphUri in delete.keys() and len(delete[graphUri]) > 0:
                    i = 0
                    for triple in delete[graphUri]:
                        deleteStatementName = BNode()
                        g.add((deleteStatementName, RDF.type, diff.DeletedTripleDiff, graphTerm))
                        g.add((deleteStatementName, RDF.subject, triple[0], graphTerm))
                        g.add((deleteStatementName, RDF.predicate, triple[1], graphTerm))
                        g.add((deleteStatementName, RDF.object, triple[2], graphTerm))
                        i += 1
                if graphUri in add.keys() and len(add[graphUri]) > 0:
                    i = 0
                    for triple in add[graphUri]:
                        insertGraphName = BNode()
                        g.add((insertGraphName, RDF.type, diff.AddedTripleDiff, graphTerm))
                        g.add((insertGraphName, RDF.subject, triple[0], graphTerm))
                        g.add((insertGraphName, RDF.predicate, triple[1], graphTerm))
                        g.add((insertGraphName, RDF.object, triple[2], graphTerm))
                        i += 1

        return g.serialize(format="trig").decode("utf-8")
コード例 #5
0
def merge(request, uriMerge=None, uriTarget=None):
    uriMerge = Namespace("http://data.library.oregonstate.edu/person/")[uriMerge]
    uriTarget = Namespace("http://data.library.oregonstate.edu/person/")[uriTarget]
    merge = queryManager.describe(uriMerge)
    target = queryManager.describe(uriTarget)

    if request.method == 'POST':
        sameAs = "INSERT DATA {<" + uriMerge + "> owl:sameAs <" + uriTarget + "> .}"
        queryManager.update(sameAs)
        # get form data and put it in a dict
        resForm = ResourceForm(request.POST)
        VariantFormSet = formset_factory(VariantForm)
        varForm = VariantFormSet(request.POST)
        if resForm.is_valid() and varForm.is_valid():
            data = {
                'res': resForm.cleaned_data,
                'var': varForm.cleaned_data
                }
            saved = processForm(uriTarget, data, resource)

    # these labels will be merged into the target as skos:hiddenLabel
    #TODO: exclude duplicate names
    mergeLabels = [str(ns['skos']['prefLabel']), str(ns['skos']['altLabel']), str(ns['skos']['hiddenLabel']), str(ns['foaf']['name'])]
    for field in merge:
        if field in mergeLabels:
            for value in merge[field]:
                if str(ns['skos']['hiddenLabel']) in target:
                    target[str(ns['skos']['hiddenLabel'])].append(value)
                else: 
                    target[str(ns['skos']['hiddenLabel'])] = merge[field]

    #TODO: mark old record for deletion
    # do this with a hidden form?
    
    forms = buildForm(target)
    data = target
    return render_to_response("merge.tpl", {'uriMerge': str(uriMerge), 'uri': str(uriTarget), 'mergeShort': uriMerge.split('/')[-1], 'short': uriTarget.split('/')[-1], 'res': target, 'form': forms['form'], 'variants': forms['variants']})
コード例 #6
0
def make_component_hierarchy(component_map, component_part_map):
    g = rdflib.Graph()
    indra_ns = 'http://sorger.med.harvard.edu/indra/'
    ln = Namespace(indra_ns + 'locations/')
    rn = Namespace(indra_ns + 'relations/')
    part_of = rn.term('partof')
    has_name = rn.term('hasName')
    for comp_id, comp_name in component_map.items():
        g.add((ln.term(comp_id), has_name, Literal(comp_name)))
        sups = component_part_map.get(comp_id)
        if sups is not None:
            for sup_id in sups:
                g.add((ln.term(comp_id), part_of, ln.term(sup_id)))
    return g
コード例 #7
0
import sys
import json
from os.path import join, dirname, abspath
from rdflib import Graph, Namespace, Literal
from indra.sources import sofia


# Note that this is just a placeholder, it doesn't resolve as a URL
sofia_ns = Namespace('http://cs.cmu.edu/sofia/')
indra_ns = 'http://sorger.med.harvard.edu/indra/'
indra_rel_ns = Namespace(indra_ns + 'relations/')
isa = indra_rel_ns.term('isa')


def save_ontology(g, path):
    with open(path, 'wb') as out_file:
        g_bytes = g.serialize(format='nt')
        # Replace extra new lines in string and get rid of empty line at end
        g_bytes = g_bytes.replace(b'\n\n', b'\n').strip()
        # Split into rows and sort
        rows = g_bytes.split(b'\n')
        rows.sort()
        g_bytes = b'\n'.join(rows)
        out_file.write(g_bytes)


def build_ontology(ont_json, rdf_path):
    G = Graph()
    for top_key, entries in ont_json.items():
        for entry_key, examples in entries.items():
            if '/' in entry_key:
コード例 #8
0
turtle_folder_fao = "/home/iati/mappings/FAO/"
dbpedia_file = "/home/iati/mappings/DBPedia/dbpedia-countries-via-factbook.ttl"

if not os.path.isdir(turtle_folder_oecd):
    os.makedirs(turtle_folder_oecd)
if not os.path.isdir(turtle_folder_bfs):
    os.makedirs(turtle_folder_bfs)
if not os.path.isdir(turtle_folder_ecb):
    os.makedirs(turtle_folder_ecb)
if not os.path.isdir(turtle_folder_fao):
    os.makedirs(turtle_folder_fao)

start_time = datetime.datetime.now()

# Namespaces
Iati = Namespace("http://purl.org/collections/iati/")

countries_oecd = Graph()
countries_oecd.bind('iati-country', "http://purl.org/collections/iati/codelist/Country/")
countries_oecd.bind('owl', "http://www.w3.org/2002/07/owl#")

countries_bfs = Graph()
countries_bfs.bind('iati-country', "http://purl.org/collections/iati/codelist/Country/")
countries_bfs.bind('bfs-country', "http://bfs.270a.info/code/1.0/CL_STATES_AND_TERRITORIES/")
countries_bfs.bind('owl', "http://www.w3.org/2002/07/owl#")

countries_ecb = Graph()
countries_ecb.bind('iati-country', "http://purl.org/collections/iati/codelist/Country/")
countries_ecb.bind('ecb-country', "http://ecb.270a.info/code/1.0/CL_AREA_EE/")
countries_ecb.bind('owl', "http://www.w3.org/2002/07/owl#")
コード例 #9
0
all_store = None

# print(len(all_store['problem_domain']))

stop_words = set(stopwords.words('english'))
pos_list = [
    'CC', 'CD', 'DT', 'EX', 'JJR', 'JJS', 'PRP', 'PRP$', '$', 'RB', 'RBR',
    'RBS', 'RP', 'WDT', 'WP$', 'WP', 'WRB', 'MD'
]
word_list = [
    'definition', 'definitions', 'update', 'updates', 'sequence', 'sequences',
    'associated', 'involved', 'accessed', 'invokes', 'invoke', 'are', 'list'
]

#Defining the prefixes
symbol = Namespace("http://smartKT/ns/symbol#")
comment = Namespace("http://smartKT/ns/comment#")
prop = Namespace("http://smartKT/ns/properties#")

g.bind("symbol", symbol)
g.bind("comment", comment)
g.bind("prop", prop)


def get_id(nodeId):

    while nodeId and nodeId != mapping_extra_id[nodeId]:
        nodeId = mapping_extra_id[nodeId]

    return nodeId
コード例 #10
0
"""

#%% md

## Constructing an ontology from Scratch

#%%

from rdflib import Namespace, Literal, Graph
from funowl import SubObjectPropertyOf, InverseObjectProperties, FunctionalObjectProperty, \
    InverseFunctionalObjectProperty, ObjectPropertyDomain, ObjectPropertyRange, SubClassOf
from funowl.annotations import AnnotationValue

from funowl.ontology_document import OntologyDocument, Ontology

DC = Namespace("http://purl.org/dc/elements/1.1/")
TERMS = Namespace("http://purl.org/dc/terms/")
PIZZA = Namespace("http://www.co-ode.org/ontologies/pizza#")

pizza_doc = OntologyDocument()
pizza_doc.prefixes(PIZZA, dc=DC, terms=TERMS)
pizza = Ontology("http://www.co-ode.org/ontologies/pizza",
                 "http://www.co-ode.org/ontologies/pizza/2.0.0")
pizza_doc.ontology = pizza
pizza.annotation(DC.title, "pizza")
pizza.annotation(TERMS.contributor, "Alan Rector")
pizza.annotation(TERMS.contributor, "Matthew Horridge")
pizza.annotation(TERMS.contributor, "Chris Wroe")
pizza.annotation(TERMS.contributor, "Robert Stevens")
pizza.annotation(
    DC.description,
コード例 #11
0
from lxml import etree

tree = etree.parse('input.xml')

root = tree.getroot()

base_uri = root.get('{http://www.w3.org/XML/1998/namespace}base')
edition_id = root.get('{http://www.w3.org/XML/1998/namespace}id')

tei = {'tei': 'http://www.tei-c.org/ns/1.0'}

from rdflib import Graph, Literal, BNode, Namespace, URIRef

from rdflib.namespace import RDF, RDFS, XSD, DCTERMS, OWL

agrelon = Namespace("https://d-nb.info/standards/elementset/agrelon#")
crm = Namespace("http://www.cidoc-crm.org/cidoc-crm/")
frbroo = Namespace("http://iflastandards.info/ns/fr/frbr/frbroo/")
pro = Namespace("http://purl.org/spar/pro/")
proles = Namespace("http://www.essepuntato.it/2013/10/politicalroles/")
prov = Namespace("http://www.w3.org/ns/prov#")
ti = Namespace("http://www.essepuntato.it/2012/04/tvc/")

g = Graph()

g.bind("agrelon", agrelon)
g.bind("crm", crm)
g.bind("frbroo", frbroo)
g.bind("dcterms", DCTERMS)
g.bind("owl", OWL)
g.bind("pro", pro)
コード例 #12
0
class Differential_Pressure_Proportional_Band_Setpoint(
        Proportional_Band_Setpoint):
    rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#'
                         ).Differential_Pressure_Proportional_Band_Setpoint
コード例 #13
0
import csv, os, time, shutil
from progress import Progress


def is_specified(val):
    unspecified_indicators = ['\\N']
    return val not in unspecified_indicators


identifier = 'yearsGraph'
uri_base = 'http://www.ourmoviedb.org/'
output = 'outs/' + identifier + '.ttl.n3'

# Create graph
g = Graph(identifier=identifier)
n = Namespace(uri_base)

years = range(1870, 2050)
progress = Progress(len(years))
start = time.time()

for year in range(1870, 2050):
    year_node = n['Year/' + str(year)]
    class_node = n['Year']
    g.add((year_node, RDF.type, class_node))
    g.add((year_node, n.YearValue, Literal(year, datatype=XSD.gYear)))

    # Let's name the decades
    # I want to isolate the
    if year < 2010 and year >= 1950:
        decade_prefix = str(year)[2]
コード例 #14
0
ファイル: ej-rdflib-02.py プロジェクト: ChrisLe7/ISSBC
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 29 08:28:28 2021

@author: ChrisLe
"""

from rdflib import Graph  # Importamos la clase Graph de rdflib
from rdflib import Namespace  # Namespace nos servira para almacernar nuestros espacios de nombre
from rdflib import Literal  # Nos permitira almacenar una variable como un literal (Valor Constante)

FOAF = Namespace(
    "http://xmlns.com/foaf/0.1/"
)  # De esta forma almacenamos el nombre de dominio para hacermos más facil su uso
EX = Namespace("http://example.org/foovocab#")


def main():
    # Creamos el grafo

    s = Graph()

    #Ahora asignaremos un prefijo a cada uno de los espacios de nombre en el grafo

    s.bind('dc', EX)  #Mediante bind podemos hacerlo
    #bind (prefijo, espacioNombre)

    s.bind('foaf', FOAF)

    #Ahora leeremos del archivo rdf, para ello usaremos parse
コード例 #15
0
ファイル: encoder.py プロジェクト: IBCNServices/HeadacheDSS
import json
from rdflib import URIRef, BNode, Literal, Graph, Namespace
from rdflib.namespace import RDF, FOAF, OWL, RDFS, NamespaceManager
from rdflib.extras.infixowl import Restriction, Individual
import pandas as pd
import numpy as np
from urllib.request import urlopen, quote
import SemanticProcessor.mappings as mappings
import SemanticProcessor.concepts as concepts
#from SemanticProcessor.snomed import getDescriptionsByString

chronicals = Namespace(concepts.BASE_URL)
namespace_manager = NamespaceManager(Graph())


def add_sample(g, id, symptoms, diagnose, prev_attacks, characterisation,
               severity, location, duration):
    # Creates an URI with its ID and says it's from type chron:Headache
    headache = URIRef(concepts.BASE_URL + 'headache#' + str(id))
    g.add((headache, RDF.type, concepts.headache_type))

    # For each symptom, we create another triple
    for symptom in symptoms:
        g.add((headache, concepts.symptom_predicate,
               mappings.symptom_to_URI[symptom]))

    # Triple for our diagnose
    g.add((headache, concepts.diagnose_predicate,
           mappings.diagnoses_to_URI[diagnose]))

    # Add triple with previous attacks
コード例 #16
0
 def __init__(self):
     self.g = Graph()
     self.n = Namespace('http://home.eps.hw.ac.uk/~qz1/')
     self.file_abs = '/Users/silvia/Documents/code/reverseProxy/db/knowledgeBase.ttl'
     self.clearGraph()
     self.getGraph()
class VAV_Box_Operating_Mode_Status(Mode_Status):
    rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#'
                         ).VAV_Box_Operating_Mode_Status
コード例 #18
0
# @author Yuan Jin
# @contact [email protected]
# @created Sept. 24, 2012
# @updated Sept. 25, 2012
#

import sys
reload(sys)
sys.setdefaultencoding('UTF-8')

from nltk import load_parser
from rdflib import Namespace, URIRef

# PATTERN = raw_input('enter pattern config file: ')
PATTERN = 'searchet.fcfg'
searchet = Namespace("http://searchet.baidu.com/ontology#")


class PatternLoader:
    def __init__(self, pattern):
        self.pattern = pattern
        self.c = load_parser(PATTERN)

    def pattern_converter(self):
        '''docs'''

    def load(self):
        '''docs'''
        # print str(self.c.nbest_parse(self.pattern))
        return '_'.join(self.c.nbest_parse(self.pattern)[0].node['SEM'])
class Heat_Exchanger_Outside_Air_Temperature_Low_Reset_Setpoint(
        Outside_Air_Temperature_Low_Reset_Setpoint):
    rdf_type = Namespace(
        'https://brickschema.org/schema/1.0.2/Brick#'
    ).Heat_Exchanger_Outside_Air_Temperature_Low_Reset_Setpoint
コード例 #20
0
if args.open:
    hostname = '0.0.0.0'
else:
    hostname = socket.gethostname()

# Directory Service Graph
dsgraph = Graph()

# Vinculamos todos los espacios de nombre a utilizar
dsgraph.bind('acl', ACL)
dsgraph.bind('rdf', RDF)
dsgraph.bind('rdfs', RDFS)
dsgraph.bind('foaf', FOAF)
dsgraph.bind('dso', DSO)

agn = Namespace("http://www.agentes.org#")
DirectoryAgent = Agent('DirectoryAgent', agn.Directory,
                       'http://%s:%d/Register' % (hostname, port),
                       'http://%s:%d/Stop' % (hostname, port))
app = Flask(__name__)
mss_cnt = 0

cola1 = Queue()  # Cola de comunicacion entre procesos


@app.route("/Register")
def register():
    """
    Entry point del agente que recibe los mensajes de registro
    La respuesta es enviada al retornar la funcion,
    no hay necesidad de enviar el mensaje explicitamente
コード例 #21
0
import os
import re
from typing import Optional, Callable

from rdflib import Graph, Namespace, OWL

from biolinkml import METAMODEL_NAMESPACE
from biolinkml.utils.generator import Generator
from tests import DEFAULT_LOG_LEVEL
from tests.utils.test_environment import TestEnvironment, TestEnvironmentTestCase

BIOLINK_NS = Namespace("https://w3id.org/biolink/vocab/")


class GeneratorTestCase(TestEnvironmentTestCase):
    model_name: str = None  # yaml name (sans '.yaml')

    @staticmethod
    def _print_triples(g: Graph):
        """ Pretty print the contents of g, removing the prefix declarations """
        g.bind('BIOLINK', BIOLINK_NS)
        g.bind('meta', METAMODEL_NAMESPACE)
        g.bind('owl', OWL)
        g_text = re.sub(r'@prefix.*\n', '',
                        g.serialize(format="turtle").decode())
        print(g_text)

    def single_file_generator(self,
                              suffix: str,
                              gen: type(Generator),
                              *,
コード例 #22
0
from rdflib import RDF, RDFS, OWL, Namespace, Graph
import brickschema
from .util import make_readable
import sys

sys.path.append("..")
from bricksrc.namespaces import BRICK, TAG, A, SKOS  # noqa: E402

BLDG = Namespace(f"https://brickschema.org/schema/ExampleBuilding#")

g = Graph()
g.parse("Brick.ttl", format="turtle")

# Instances
g.add((BLDG.Coil_1, A, BRICK.Heating_Coil))

g.add((BLDG.Coil_2, BRICK.hasTag, TAG.Equipment))
g.add((BLDG.Coil_2, BRICK.hasTag, TAG.Coil))
g.add((BLDG.Coil_2, BRICK.hasTag, TAG.Heat))

g.add((BLDG.AHU1, A, BRICK.AHU))
g.add((BLDG.VAV1, A, BRICK.VAV))
g.add((BLDG.AHU1, BRICK.feedsAir, BLDG.VAV1))
g.add((BLDG.CH1, A, BRICK.Chiller))

# This gets inferred as an air temperature sensor
g.add((BLDG.TS1, A, BRICK.Temperature_Sensor))
g.add((BLDG.TS1, BRICK.measures, BRICK.Air))
g.add((BLDG.TS2, A, BRICK.Air_Temperature_Sensor))

# add air flow sensor
class Heat_Exchanger_Valve_Command(Valve_Command):
    rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#').Heat_Exchanger_Valve_Command
	
	
コード例 #24
0
ファイル: test_support.py プロジェクト: arcangelo7/oc_ocdm
class TestSupport(unittest.TestCase):
    resp_agent = 'http://resp_agent.test/'
    pro = Namespace("http://purl.org/spar/pro/")

    def setUp(self):
        self.graph_set = GraphSet("http://test/", "./info_dir/", "", False)
        self.prov_set = ProvSet(self.graph_set, "http://test/", "./info_dir/",
                                False)

        self.br = self.graph_set.add_br(self.resp_agent)

    def _prepare_ordered_authors_list(self, list_len):
        # First of all, we must cleanup the GraphSet:
        self.br.remove_contributor()

        for ar in self.graph_set.get_ar():
            del self.graph_set.res_to_entity[ar.res]
        for ra in self.graph_set.get_ra():
            del self.graph_set.res_to_entity[ra.res]

        # Then, we initialize a new well-formed linked list of authors:
        ar_ordered_list = []

        for i in range(list_len):
            ra = self.graph_set.add_ra(self.resp_agent)

            ar = self.graph_set.add_ar(self.resp_agent)
            ar.create_author()
            ar.is_held_by(ra)

            self.br.has_contributor(ar)
            ar_ordered_list.append(ar)

        # Here each node of the list gets linked to the next one:
        for i in range(list_len - 1):
            ar_ordered_list[i].has_next(ar_ordered_list[i + 1])

        return ar_ordered_list

    @staticmethod
    def _extract_ra_list(ar_list):
        # Here the RA list is built and returned:
        ra_list = []
        for i in range(len(ar_list)):
            ra = ar_list[i].get_is_held_by()
            if ra is not None:
                ra_list.append(ra)

        return ra_list

    def test_get_ordered_contributors_from_br(self):
        list_len = 100
        with self.subTest("Empty linked list"):
            result = get_ordered_contributors_from_br(self.br, self.pro.author)

            self.assertIsNotNone(result)
            self.assertListEqual([], result)

        with self.subTest("Well-formed linked list"):
            correct_list = self._prepare_ordered_authors_list(list_len)
            result = get_ordered_contributors_from_br(self.br, self.pro.author)

            self.assertIsNotNone(result)
            ar_list = self._extract_ra_list(correct_list)
            self.assertListEqual(ar_list, result)

        with self.subTest("Linked list with a loop"):
            correct_list = self._prepare_ordered_authors_list(list_len)
            # Here we corrupt the well-formed linked list introducing a loop:
            correct_list[80].has_next(correct_list[50])

            self.assertRaises(ValueError, get_ordered_contributors_from_br,
                              self.br, self.pro.author)

        with self.subTest("Linked list split in two sub-lists"):
            correct_list = self._prepare_ordered_authors_list(list_len)
            # Here we corrupt the well-formed linked list introducing a loop:
            correct_list[64].remove_next()

            self.assertRaises(ValueError, get_ordered_contributors_from_br,
                              self.br, self.pro.author)
コード例 #25
0
class AHU_Supply_Air_Temperature_Sensor(Discharge_Air_Temperature_Sensor,
                                        Supply_Air_Temperature_Sensor):
    rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#'
                         ).AHU_Supply_Air_Temperature_Sensor
コード例 #26
0
    def __init__(self,
                 graph: Graph = None,
                 thesaurus_path="data/agrovoc.rdf",
                 skos_xl_labels=True,
                 lang="fr"):

        if graph is None:
            self.graph = Graph()
        else:
            self.graph = graph
        logger.info("Loading thesaurus... [{}]".format(thesaurus_path))

        self.graph.load(thesaurus_path)

        string_entries = []

        if skos_xl_labels:
            query = f"""SELECT ?x ?lf WHERE {{
                ?x a skos:Concept;
                skosxl:prefLabel ?l.
                ?l skosxl:literalForm ?lf.
                FILTER(lang(?lf)='{lang}')
            }}
            """
            pref_labels = self.graph.query(
                query,
                initNs={
                    'skos': Namespace("http://www.w3.org/2004/02/skos/core#"),
                    'skosxl': Namespace("http://www.w3.org/2008/05/skos-xl#")
                })
        else:
            query = f"""SELECT ?x ?lf WHERE {{
                 ?x a skos:Concept;
                 skos:prefLabel ?lf.
                 FILTER(lang(?lf)='{lang}')
             }}
             """
            pref_labels = self.graph.query(
                query,
                initNs=dict(
                    skos=Namespace("http://www.w3.org/2004/02/skos/core#")))

        for result in pref_labels:
            string_entries.append((str(result[0]), str(result[1])))

        if skos_xl_labels:
            query = f"""SELECT ?x ?lf WHERE {{
                ?x a skos:Concept;
                skosxl:prefLabel ?l.
                ?l skosxl:literalForm ?lf.
                FILTER(lang(?lf)='{lang}')
            }}
        """
            alt_labels = self.graph.query(
                query,
                initNs=dict(
                    skos=Namespace("http://www.w3.org/2004/02/skos/core#"),
                    skosxl=Namespace("http://www.w3.org/2008/05/skos-xl#")))
        else:
            query = f"""SELECT ?x ?lf WHERE {{
            ?x a skos:Concept;
            skos:altLabel ?lf.
            FILTER(lang(?lf)='{lang}')
        }}
        """
            alt_labels = self.graph.query(
                query,
                initNs=dict(
                    skos=Namespace("http://www.w3.org/2004/02/skos/core#")))

        for result in alt_labels:
            string_entries.append((str(result[0]), str(result[1])))
        dictionary_loader = StringDictionaryLoader(string_entries)
        dictionary_loader.load()

        if lang == 'fr':
            self.concept_recognizer = IntersStemConceptRecognizer(
                dictionary_loader, "data/stopwordsfr.txt",
                "data/termination_termsfr.txt")
        else:
            self.concept_recognizer = IntersStemConceptRecognizer(
                dictionary_loader, "data/stopwordsen.txt",
                "data/termination_termsen.txt")

        self.concept_recognizer.initialize()
コード例 #27
0
from rdflib.serializer import Serializer
from rdflib.namespace import RDF, RDFS, XSD
import requests
import csv
import re
import hashlib
import docker
import time

client = docker.from_env()
container_it = client.containers.get("c9e896401036")
container_en = client.containers.get("233d97c53c0a")

g = graph.Graph()

cro = Namespace("http://www.cityrank.org/ontology/")
geo = Namespace("https://schema.org/GeoCoordinates")
plc = Namespace("https://schema.org/Place")
schema = Namespace("https://schema.org")
owl = Namespace("http://www.w3.org/2002/07/owl")
addr = Namespace("https://schema.org/PostalAddress")
rdfs = Namespace("http://www.w3.org/2000/01/rdf-schema#")
base = "http://www.cityrank.org/resource/"
dbpediaurl = "http://dbpedia.org/resource/"
not_found = {}
g.bind("cro", cro)
g.bind("geo", geo)
g.bind("schema", schema)
g.bind("addr", addr)
g.bind("owl", owl)
g.bind("rdfs", rdfs)
コード例 #28
0
class Water_System_Water_Meter(Water_Meter):
    rdf_type = Namespace(
        'https://brickschema.org/schema/1.0.2/Brick#').Water_System_Water_Meter
コード例 #29
0
import rdflib
from rdflib	import URIRef
from rdflib	import Literal
from rdflib	import BNode
from rdflib	import Namespace
if rdflib.__version__ >= "3.0.0" :
	from rdflib	import RDF  as ns_rdf
	from rdflib	import RDFS as ns_rdfs
	from rdflib	import Graph
else :
	from rdflib.RDFS  import RDFSNS as ns_rdfs
	from rdflib.RDF	  import RDFNS  as ns_rdf
	from rdflib.Graph import Graph

# Namespace, in the RDFLib sense, for the rdfa vocabulary
ns_rdfa		= Namespace("http://www.w3.org/ns/rdfa#")

from .extras.httpheader   import acceptable_content_type, content_type
from .transform.prototype import handle_prototypes

# Vocabulary terms for vocab reporting
RDFA_VOCAB  = ns_rdfa["usesVocabulary"]

# Namespace, in the RDFLib sense, for the XSD Datatypes
ns_xsd		= Namespace('http://www.w3.org/2001/XMLSchema#')

# Namespace, in the RDFLib sense, for the distiller vocabulary, used as part of the processor graph
ns_distill	= Namespace("http://www.w3.org/2007/08/pyRdfa/vocab#")

debug = False
コード例 #30
0
def main():
    indra_ns = 'http://sorger.med.harvard.edu/indra/'
    rn = Namespace(indra_ns + 'relations/')
    en = Namespace(indra_ns + 'entities/')
    g = Graph()

    isa = rn.term('isa')

    g.add((en.term('phosphorylation'), isa, en.term('modification')))
    g.add((en.term('ubiquitination'), isa, en.term('modification')))
    g.add((en.term('sumoylation'), isa, en.term('modification')))
    g.add((en.term('acetylation'), isa, en.term('modification')))
    g.add((en.term('hydroxylation'), isa, en.term('modification')))

    save_hierarchy(g, hierarchy_path)
コード例 #31
0
ファイル: redfoot-0.9.1.py プロジェクト: eikeon/redfoot
try:
    import rdflib
except:
    show_install_message("rdflib not found")
else:
    version = tuple([int(x) for x in rdflib.__version__.split(".", 2)])    
    REQUIRED = (2, 1, 0)
    if version < REQUIRED:
        show_install_message("rdflib %s or greater required. Found rdflib version %s" % ("%s.%s.%s" % REQUIRED, rdflib.__version__))        

logging.info("rdflib version: %s" % rdflib.__version__)

from rdflib import RDF, RDFS, Graph, URIRef, BNode, Namespace

REDFOOT = Namespace("http://redfoot.net/2005/redfoot#")
REDFOOT.Globals = REDFOOT["Globals"]
REDFOOT.program = REDFOOT["program"]
REDFOOT.code = REDFOOT["code"]
REDFOOT.Defaults = REDFOOT["Defaults"]


class BootLoader(Graph):

    def __init__(self, backend):
        backend = backend or "Sleepycat"
        super(BootLoader, self).__init__(backend)
        self.__log = None        
        self.__config = None

    def __get_log(self):
コード例 #32
0
class VFD_Fault_Status(VFD_Status,Fault_Status):
    rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#').VFD_Fault_Status
	
	
コード例 #33
0
class CRAC_Low_Humidity_Alarm(Low_Humidity_Alarm):
    rdf_type = Namespace(
        'https://brickschema.org/schema/1.0.2/Brick#').CRAC_Low_Humidity_Alarm
コード例 #34
0
#    You should have received a copy of the GNU Lesser General Public License
#    along with RDF-REST.  If not, see <http://www.gnu.org/licenses/>.

from time import sleep

from nose.tools import assert_raises
from rdflib import BNode, Graph, Literal, Namespace, RDF, RDFS, XSD

import example2  # can not import do_tests directly, nose tries to run it...
from example2 import EXAMPLE, Group2Implementation, Item2Implementation, \
    make_example2_service
from rdfrest.exceptions import InvalidDataError
from rdfrest.cores.factory import unregister_service
from rdfrest.util.config import get_service_configuration

OTHER = Namespace("http://example.org/other/")

EXPECTING_LITERAL = (EXAMPLE.label, )


class TestMixins:

    #ROOT_URI = URIRef("http://localhost:11235/foo/")
    service = None
    root = None
    item = None

    def setUp(self):
        service_config = get_service_configuration()
        service_config.set('server', 'port', '11235')
        service_config.set('server', 'base-path', '/foo')
コード例 #35
0
class UnobservedSignal(Signal):
    rdf_type = Namespace(
        'https://brickschema.org/schema/1.0.2/BrickFrame#').UnobservedSignal
コード例 #36
0
import sys
from rdflib import Graph, Namespace, Literal
import csv
import urllib2

if __name__ == '__main__':
    indra_ns = 'http://sorger.med.harvard.edu/indra/'
    if len(sys.argv) > 1:
        proteins_file = sys.argv[1]
    else:
        proteins_file = '../../data/ras_pathway_proteins.csv'
    rn = Namespace(indra_ns + 'relations/')
    en = Namespace(indra_ns + 'entities/')
    g = Graph()

    has_name = rn.term('hasName')
    has_long_name = rn.term('hasLongName')
    has_synonym = rn.term('hasSynonym')
    isa = rn.term('isa')

    # Read BEL family names
    res = urllib2.urlopen('http://resource.belframework.org/belframework/'+\
        'latest-release/namespace/selventa-protein-families.belns')
    belns_text = res.read()
    start = belns_text.find('[Values]')
    lines = belns_text[start:].split('\n')
    bel_family_names = []
    for l in lines:
        if l.endswith(' Family|P'):
            family_name = l[:-2].replace(' ', '_').replace('/', '_')
            bel_family_names.append(family_name)
class Medium_Temperature_Hot_Water_Differential_Pressure_Load_Shed_Status(
        Medium_Temperature_Hot_Water, Differential_Pressure_Load_Shed_Status):
    rdf_type = Namespace(
        'https://brickschema.org/schema/1.0.2/Brick#'
    ).Medium_Temperature_Hot_Water_Differential_Pressure_Load_Shed_Status
コード例 #38
0
ファイル: EccrevDiff.py プロジェクト: AKSW/QuitDiff
    def serialize(self, add, delete):

        commit = Namespace("urn:commit:" + str(uuid.uuid1()) + ":")
        eccrev = Namespace("https://vocab.eccenca.com/revision/")

        g = ConjunctiveGraph()
        namespace_manager = NamespaceManager(g)
        namespace_manager.bind('eccrev', eccrev, override=False)

        g.add((commit.term(""), RDF.type, eccrev.Commit))

        graphUris = set(delete.keys()) | set(add.keys())

        for graphUri in graphUris:
            if (graphUri in delete.keys() and len(delete[graphUri]) > 0) or (graphUri in add.keys() and len(add[graphUri]) > 0):
                revision = Namespace("urn:revision:" + str(uuid.uuid1()) + ":")
                g.add((commit.term(""), eccrev.hasRevision, revision.term("")))
                g.add((revision.term(""), RDF.type, eccrev.Revision))
                if str(graphUri) != 'http://quitdiff.default/':
                    g.add((revision.term(""), eccrev.hasRevisionGraph, graphUri))
                if graphUri in delete.keys() and len(delete[graphUri]) > 0:
                    deleteGraphName = revision.term(":delete")
                    g.add((revision.term(""), eccrev.deltaDelete, deleteGraphName))
                    for triple in delete[graphUri]:
                        g.add(triple + (deleteGraphName,))
                if graphUri in add.keys() and len(add[graphUri]) > 0:
                    insertGraphName = revision.term(":insert")
                    g.add((revision.term(""), eccrev.deltaInsert, insertGraphName))
                    for triple in add[graphUri]:
                        g.add(triple + (insertGraphName,))

        return g.serialize(format="trig").decode("utf-8")
コード例 #39
0
def populate_graph(articles, persons, relationships):
    """
    Adds triples to an RDF graph with the following structure:

        Person triples, we store only the wikidata id and the surface string(s) by which is referred
        to in the news articles;
            <wiki_URI, SKOS.altLabel, name>

        Article triples:
            <url, DC.title, title>
            <url, DC.data, date)

        Relationships as Blank Node:
            - <_rel, ns1.type, rel_type>
            - <_rel, ns1.score, rel_score>
            - <_rel, ns1.url, url>
            - g.add((_rel, ns1.ent1, URIRef(f"http://www.wikidata.org/entity/{rel.ent1}")))
            - g.add((_rel, ns1.ent2, URIRef(f"http://www.wikidata.org/entity/{rel.ent2}")))
            - g.add((_rel, ns1.ent1_str, Literal(rel.ent1_str)))
            - g.add((_rel, ns1.ent2_str, Literal(rel.ent2_str)))

    NOTE: linked-data vocabularies can be seen here: https://lov.linkeddata.es/dataset/lov/
    """
    g = Graph()
    ns1 = Namespace("http://www.politiquices.pt/")
    g.bind("politiquices", ns1)
    wiki_prop = Namespace("http://www.wikidata.org/prop/direct/")
    wiki_item = Namespace("http://www.wikidata.org/entity/")
    g.bind("wd", wiki_item)
    g.bind("wdt", wiki_prop)

    print("\nadding Persons")
    for wikidata_id, person in persons.items():

        # state that in 'politiquices' this is a human, following the same as wikidata.org
        g.add((URIRef(f"http://www.wikidata.org/entity/{wikidata_id}"),
               wiki_prop.P31, wiki_item.Q5))
        for name in person.known_as:
            g.add((
                URIRef(f"http://www.wikidata.org/entity/{wikidata_id}"),
                SKOS.altLabel,
                Literal(name, lang="pt"),
            ))

    print("adding Articles")
    for article in articles:
        g.add((URIRef(article.url), DC.title, Literal(article.title,
                                                      lang="pt")))
        g.add((URIRef(article.url), DC.date,
               Literal(article.crawled_date, datatype=XSD.date)))

    print("adding Relationships")
    for rel in relationships:
        _rel = BNode()
        g.add((_rel, ns1.type, Literal(rel.rel_type)))
        g.add((_rel, ns1.score, Literal(rel.rel_score, datatype=XSD.float)))
        g.add((_rel, ns1.url, URIRef(rel.url)))
        g.add((_rel, ns1.ent1,
               URIRef(f"http://www.wikidata.org/entity/{rel.ent1}")))
        g.add((_rel, ns1.ent2,
               URIRef(f"http://www.wikidata.org/entity/{rel.ent2}")))
        g.add((_rel, ns1.ent1_str, Literal(rel.ent1_str)))
        g.add((_rel, ns1.ent2_str, Literal(rel.ent2_str)))

    date_time = datetime.now().strftime("%Y-%m-%d_%H%M")
    f_name = f"politiquices_{date_time}.ttl"
    g.serialize(destination=f_name, format="turtle")
    print("graph has {} statements.".format(len(g)))
    print()
    print("persons      : ", len(persons))
    print("articles     : ", len(articles))
    print("relationships: ", len(relationships))
    print()
コード例 #40
0
def main():
    indra_ns = 'http://sorger.med.harvard.edu/indra/'
    rn = Namespace(indra_ns + 'relations/')
    act = Namespace(indra_ns + 'activities/')
    g = Graph()

    isa = rn.term('isa')

    g.add((act.term('transcription'), isa, act.term('activity')))
    g.add((act.term('catalytic'), isa, act.term('activity')))
    g.add((act.term('gtpbound'), isa, act.term('activity')))
    g.add((act.term('kinase'), isa, act.term('catalytic')))
    g.add((act.term('phosphatase'), isa, act.term('catalytic')))
    g.add((act.term('gef'), isa, act.term('catalytic')))
    g.add((act.term('gap'), isa, act.term('catalytic')))

    save_hierarchy(g, hierarchy_path)
コード例 #41
0
class VFD_Current_Sensor(Current_Sensor):
    rdf_type = Namespace(
        'https://brickschema.org/schema/1.0.2/Brick#').VFD_Current_Sensor
コード例 #42
0
# -*- coding: utf-8 -*-
from pymongo import MongoClient
from rdflib import Graph, Literal, BNode, Namespace, RDF, URIRef
from rdflib.namespace import DC, FOAF, XSD
from entity_to_rdf import EntityConvert

lobbyFacts = Namespace('https://studi.f4.htw-berlin.de/~s0539710/lobbyradar#')
lobbyOntology = Namespace('https://studi.f4.htw-berlin.de/~s0539710/lobbyradar/ontology#')
org = Namespace('http://www.w3.org/ns/org#')
rdf = Namespace('http://www.w3.org/1999/02/22-rdf-syntax-ns#')
vcard = Namespace('http://www.w3.org/2006/vcard/ns#')

# Connect to database and create variables for the collections
client = MongoClient()
db = client.lobbyradar
entities = db.entities
relations = db.relations
# Print some statistics about our database dump
print("Collections in database: " + ", ".join(db.collection_names(include_system_collections=False)))
print("Entities: %s documents" % entities.count())
print("Relations: %s documents" % relations.count())

# Create a graph and prepare by loading ontology and binding prefixes
g = Graph()
relationGraph = Graph()
converter = EntityConvert(g)

entity = entities.find_one({'name': 'Rudolf Henke'})
all_entities = entities.find()
for i,e in enumerate(all_entities):
    if (i % (entities.count() / 20) == 0):
コード例 #43
0
ファイル: generateDoc.py プロジェクト: rapw3k/glosis
# Check the README file for full details.
#
# This programme takes as sole argument the ontology module name:
#
# python3 generateDoc.py glosis_main
#
# SPDX-License-Identifier: CC-BY-NC-SA-3.0-IGO

import os
import sys
import json
import configparser
from rdflib import Graph, Namespace
from rdflib.namespace import RDF, FOAF, RDFS, OWL

DCT = Namespace("http://purl.org/dc/terms/")
SCHEMA = Namespace("http://schema.org/")

mapPreds = {
    "thisVersionURI": OWL.versionIRI,
    "ontologyRevisionNumber": OWL.versionInfo,
    "ontologyTitle": DCT.title,
    "abstract": DCT.abstract
}

widoco_jar = "/opt/widoco/widoco-1.4.15-jar-with-dependencies.jar"
ont_file = "glosis_main"
templ_file = "template.json"

creators = []
contributors = []