import time, logging, math, os, sys, socket
from influxdb import InfluxDBClient
from rdflib import Namespace
from twisted.internet import task
from greplin import scales

log = logging.getLogger()
ROOM = Namespace('http://projects.bigasterisk.com/room/')

stats = scales.collection(
    '/export_to_influxdb',
    scales.PmfStat('exportToInflux'),
)


class RetentionPolicies(object):
    def __init__(self, influx):
        self.influx = influx
        self.createdPolicies = set() # days

    def getCreatedPolicy(self, days):
        name = 'ret_%d_day' % days
        if days not in self.createdPolicies:
            self.influx.create_retention_policy(name,
                                                duration='%dd' % days,
                                                replication='1')
            self.createdPolicies.add(days)
        return name

class InfluxExporter(object):
    def __init__(self, configGraph, influxHost='bang5'):
Exemple #2
0
 def __init__(self, config, counter):
     self.config = config
     self.counter = counter
     
     self.SKOS = Namespace("http://www.w3.org/2004/02/skos/core#")
     self.nsdict = dict(skos=self.SKOS)
Exemple #3
0
import uuid, re
from django.db import transaction
from django.db.models import Q
from django.utils.http import urlencode
from rdflib import Literal, Namespace, RDF, URIRef
from rdflib.namespace import SKOS, DCTERMS
from rdflib.graph import Graph
from time import time
from arches.app.models import models
from arches.app.models.concept import Concept
from arches.app.models.system_settings import settings
from arches.app.utils.betterJSONSerializer import JSONSerializer, JSONDeserializer

# define the ARCHES namespace
ARCHES = Namespace('http://www.archesproject.org/')

class SKOSReader(object):
    def __init__(self):
        self.nodes = []
        self.relations = []

    def read_file(self, path_to_file, format='xml'):
        """
        parse the skos file and extract all available data

        """

        rdf_graph = Graph()

        #bind the namespaces
Exemple #4
0
from datetime import datetime
import random

from .config import CONFIG, DATA_DIR, PROJECT_NAME, DATASET_FILEPATH, MATCHES_FILEPATH, EXPORT_DIR, PROV_AGENT, NS
from .csv import read_csv, write_csv
from .utils import source_files
from pit.prov import Provenance

from .rdf_dataset import RdfDataset

PROCESS_COUNT = 8

# KIRBY NAMESPACES
KIRBY_ENTRY = "http://kirby.diggr.link/entry"
KIRBY_DATASET = "http://kirby.diggr.link/dataset"
KIRBY_PROP = Namespace("http://kirby.diggr.link/property/")
KIRBY_MATCH_NS = Namespace("http://kirby.diggr.link/match/")

# KIRBY ENTITY TYPES
KIRBY_CORE = Namespace("http://kirby.diggr.link/")
SOURCE_DATASET = KIRBY_CORE.Dataset
SOURCE_DATASET_ROW = KIRBY_CORE.DatasetRow
KIRBY_MATCH = KIRBY_CORE.Match

# JSON-LD CONTEXT
CONTEXT = {
    "rdf": str(RDF),
    "kirby": str(KIRBY_CORE),
    "kirby_ds": KIRBY_DATASET,
    "kirby_prop": str(KIRBY_PROP)
}
Exemple #5
0
class Chilled_Water_Differential_Pressure(Chilled_Water):
    rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#'
                         ).Chilled_Water_Differential_Pressure
Exemple #6
0
This filters a graph by changing every subject with a
``foaf:mbox_sha1sum`` into a new subject whose URI is based on the
``sha1sum``. This new graph might be easier to do some operations on.

An advantage of this approach over other methods for collapsing BNodes
is that I can incrementally process new FOAF documents as they come in
without having to access my ever-growing archive. Even if another
``65b983bb397fb71849da910996741752ace8369b`` document comes in next
year, I would still give it the same stable subject URI that merges
with my existing data.
"""

from rdflib import Graph, Namespace
from rdflib.namespace import FOAF

STABLE = Namespace("http://example.com/person/mbox_sha1sum/")

if __name__ == "__main__":
    g = Graph()
    g.parse("smushingdemo.n3", format="n3")

    newURI = {}  # old subject : stable uri
    for s, p, o in g.triples((None, FOAF["mbox_sha1sum"], None)):
        newURI[s] = STABLE[o]

    out = Graph()
    out.bind("foaf", FOAF)

    for s, p, o in g:
        s = newURI.get(s, s)
        o = newURI.get(o, o)  # might be linked to another person
Exemple #7
0
SBUILDING = NSUtil.get_namespase_domain_smart_building()

g1.bind('rdf', RDF)
g1.bind('rdfs', RDFS)
g1.bind('owl', OWL)
g1.bind('xsd', XSD)

# custom namespace
g1.bind('privvuln', PRIVVULN)

g1.bind('privvulnv2', PRIVVULNV2)

g1.bind('sbuilding', SBUILDING)

# model namespace
M = Namespace(
    'https://ontology.hviidnet.com/2020/01/03/privacyvunl-model.ttl#')
g1.bind('m', M)

floor3_room1 = M['Teaching_Room_1']
g1.add((floor3_room1, RDF.type, SBUILDING.Teaching_Room))

scheduleActivities = M['ScheduleActivities']
g1.add((scheduleActivities, RDF.type, SBUILDING.ScheduleActivities))
g1.add((scheduleActivities, RDF.type, PRIVVULN.External))
g1.add((floor3_room1, PRIVVULNV2.has, scheduleActivities))

roomCountingLinesStream = M['CountingLinesStream']
g1.add((roomCountingLinesStream, RDF.type, SBUILDING.CountingLine))
g1.add((roomCountingLinesStream, RDF.type, PRIVVULN.TimeSeries))
g1.add((roomCountingLinesStream, PRIVVULNV2.TemporalResolution,
        Literal("60", datatype=XSD.double)))
Exemple #8
0
class CWS_Chilled_Water_Differential_Pressure_Setpoint(Chilled_Water_Differential_Pressure_Setpoint):
    rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#').CWS_Chilled_Water_Differential_Pressure_Setpoint
	
	
Exemple #9
0
class Medium_Temperature_Hot_Water_Differential_Pressure_Sensor(
        Differential_Temperature_Sensor,
        Hot_Water_Differential_Pressure_Sensor, Medium_Temperature_Hot_Water):
    rdf_type = Namespace(
        'https://brickschema.org/schema/1.0.2/Brick#'
    ).Medium_Temperature_Hot_Water_Differential_Pressure_Sensor
 def __init__(self):
     self.MODELS = Namespace(
         'https://ontology.hviidnet.com/2020/01/03/privacyvunl-model.ttl#')
     super().__init__(self.__DOMAINNAMESPACE__)
Exemple #11
0
import random
from collections import OrderedDict
import pdb
from operator import itemgetter

import rdflib
from rdflib import Graph, URIRef, Literal, Namespace
from rdflib import RDF, RDFS

BASE = Namespace("http://brickschema.org/example#")
BRICK = Namespace("http://brickschema.org/schema/brick#")
BF= Namespace("http://brickschema.org/schema/brickframe#")

def gen_index(ref):
    if ref:
        return ref
    else:
        return str(random.randint(0,100))

with open("Berkeley_SodaHall_MetadataParseKey", "r") as fp:
    building_dict = dict()
    for line in fp:
        line = line.replace(" ", "").replace("\t", "").replace("\n", "")
        srcid, line = line.split("=>")
        item_list = line.split(";")
        item_dict = dict()
        ref_dict = dict()
        for item in item_list:
            if item=="":
                continue
            key, val = item.split("->")
Exemple #12
0
class Cooling_Max_Supply_Air_Flow_Setpoint(Supply_Air_Flow_Setpoint,Cooling_Discharge_Air_Flow_Setpoint):
    rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#').Cooling_Max_Supply_Air_Flow_Setpoint
	
	
Exemple #13
0
class Discharge_Air_Static_Pressure_Increase_Decrease_Step(
        Discharge_Air_Static_Pressure):
    rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#'
                         ).Discharge_Air_Static_Pressure_Increase_Decrease_Step
class Hot_Water_Pump_VFD_Speed(Hot_Water_Pump):
    rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#').Hot_Water_Pump_VFD_Speed
	
	
Exemple #15
0
class Weather_Wind_Direction_Sensor(Wind_Direction_Sensor):
    rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#'
                         ).Weather_Wind_Direction_Sensor
Exemple #16
0
'''
Created on 11/04/2011

@author: acalvo
'''
import rdflib
from rdflib import Graph, Namespace
from rdflib import plugin
FOAF = Namespace("http://xmlns.com/foaf/0.1/")
RDFS = Namespace("http://www.w3.org/2000/01/rdf-schema#")

if False:
    plugin.register('sparql', rdflib.query.Processor,
                    'rdfextras.sparql.processor', 'Processor')
    plugin.register('sparql', rdflib.query.Result, 'rdfextras.sparql.query',
                    'SPARQLQueryResult')

my_data = '''
        @prefix dc:   <http://purl.org/dc/elements/1.1/> .
        @prefix :     <http://example.org/book/> .
        @prefix ns:   <http://example.org/ns#> .
        
        :book1  dc:title  "SPARQL Tutorial" .
        :book1  ns:price  42 .
        :book2  dc:title  "The Semantic Web" .
        :book2  ns:price  23 .
     '''
g = Graph()
g.parse(data=my_data, format="n3")
print g.serialize(format='xml')
class Return_Air_Preheat_Valve_Command(Preheat_Valve_Command,Return_Air):
    rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#').Return_Air_Preheat_Valve_Command
	
	
Exemple #18
0
# script to convert accessMessages.csv (OPAC messages) to json-ld
# requires libraries/plugins: rdflib, rdflib-jsonld

from rdflib.namespace import RDF, SKOS
from rdflib import Namespace, Graph, plugin
from rdflib.serializer import Serializer
import rdflib
import csv

f = open('../csv/accessMessages.csv')
reader = csv.DictReader(f)

nypl = Namespace('http://data.nypl.org/nypl-core/')
skos = Namespace('http://www.w3.org/2004/02/skos/core#')
# dcterms = Namespace('http://purl.org/dc/terms/')
nyplAccessMessage = rdflib.URIRef('http://data.nypl.org/accessMessages/')

g = Graph()

for r in reader:
    id = r['skos:notation']
    type = 'nypl:AccessMessage'
    preflabel = rdflib.Literal(r['skos:prefLabel'])
    altlabel = rdflib.Literal(r['skos:altLabel'])
    notation = rdflib.Literal(r['skos:notation'])
    #    locationType = r['nypl:locationType'].split(';')
    accessMessage = nyplAccessMessage + str(id)

    g.add((accessMessage, RDF.type, nypl.AccessMessage))
    g.add((accessMessage, SKOS.prefLabel, preflabel))
    g.add((accessMessage, SKOS.altLabel, altlabel))
Exemple #19
0
class On_Timer_Sensor(Timer_Sensor):
    rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#').On_Timer_Sensor
	
	
Exemple #20
0
__author__ = 'chimezieogbuji'
import sys
from FuXi.Syntax.InfixOWL import *
from FuXi.DLP                          import SkolemizeExistentialClasses, \
                                              SKOLEMIZED_CLASS_NS, \
                                              LloydToporTransformation, \
                                              makeRule
from FuXi.Horn.HornRules import HornFromN3
from FuXi.Rete.RuleStore import SetupRuleStore
from FuXi.SPARQL.BackwardChainingStore import TopDownSPARQLEntailingStore
from rdflib.Graph import Graph
from cStringIO import StringIO
from rdflib import plugin, RDF, RDFS, OWL, URIRef, URIRef, Literal, Variable, BNode, Namespace
from FuXi.Horn.HornRules import HornFromN3

LIST_NS = Namespace('http://www.w3.org/2000/10/swap/list#')
KOR_NS = Namespace('http://korrekt.org/')
EX_NS = Namespace('http://example.com/')
EX_CL = ClassNamespaceFactory(EX_NS)

derivedPredicates = [
    LIST_NS['in'], KOR_NS.subPropertyOf, RDFS.subClassOf, OWL.onProperty,
    OWL.someValuesFrom
]

hybridPredicates = [RDFS.subClassOf, OWL.onProperty, OWL.someValuesFrom]

CONDITIONAL_THING_RULE=\
"""
@prefix kor:    <http://korrekt.org/>.
@prefix owl:    <http://www.w3.org/2002/07/owl#>.
Exemple #21
0
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file')

args = parser.parse_args()

if args.file:
    filename = args.file
else:
    filename = input('Enter filename (including \'.csv\'): ')

dt = datetime.now().strftime('%Y-%m-%d_%H.%M.%S')

df_1 = pd.read_csv(filename, header=0)
fast_ids = df_1.fast_id.to_list()

skos = Namespace('http://www.w3.org/2004/02/skos/core#')
schema = Namespace('http://schema.org/')
rdfs = Namespace('http://www.w3.org/2000/01/rdf-schema#')
baseURL = 'http://id.worldcat.org/fast/'
ext = '.rdf.xml'

all_items = []
for fast_id in fast_ids:
    g = Graph()
    id = fast_id[3:].strip()
    link = baseURL + id + ext
    graph = g.parse(link, format='xml')
    objects = graph.objects(subject=None, predicate=schema.sameAs)
    tinyDict = {'fast_id': fast_id}
    for object in objects:
        label = graph.value(subject=object, predicate=rdfs.label)
class Discharge_Air_Temperature_Reset_Differential_Setpoint(Temperature_Differential_Reset_Setpoint,Discharge_Air_Temperature_Setpoint,Supply_Air_Temperature_Setpoint):
    rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#').Discharge_Air_Temperature_Reset_Differential_Setpoint
	
	
Exemple #23
0
from django.core.management.base import BaseCommand
from django.db import reset_queries
from rdflib import Namespace, ConjunctiveGraph, URIRef
try:
    import simplejson as json
except ImportError:
    import json

from core import models
from core.management.commands import configure_logging

configure_logging("openoni_link_places.config", "openoni_link_places.log")
_logger = logging.getLogger(__name__)

geo = Namespace('http://www.w3.org/2003/01/geo/wgs84_pos#')
owl = Namespace('http://www.w3.org/2002/07/owl#')
dbpedia = Namespace('http://dbpedia.org/ontology/')


class Command(BaseCommand):
    def handle(self, **options):
        _logger.debug("linking places")
        for place in models.Place.objects.filter(dbpedia__isnull=True):
            if not place.city or not place.state:
                continue

            # formulate a dbpedia place uri
            path = urllib.parse.quote(
                '%s,_%s' % (_clean(place.city), _clean(place.state)))
            url = URIRef('http://dbpedia.org/resource/%s' % path)
def TripleMaker(file_dir):
    '''
    function takes a list of directories and for each directory parses the .nxml file and creates a graph
    that is populated with specific triples using the parsed information. For each directory, a xml file is
    created which contains the triples and has been serialized to turtle format.
    :param file_dir: a list of directories
    :return: nothing is returned
    '''
    g = Graph()  # create graph to store triples

    #add namespaces
    ccp_ext = Namespace("http://ccp.ucdenver.edu/obo/ext/")
    bibo = Namespace("http://purl.org/ontology/bibo/")
    obo = Namespace("http://purl.obolibrary.org/obo/")
    edam = Namespace("http://edamontology.org/")
    swo = Namespace("http://www.ebi.ac.uk/swo/")

    # iterate over folders within the input directory
    for subdir, dirs, files in os.walk(file_dir):
        for sub_file in files:
            item = os.path.join(subdir, sub_file)
            if item.endswith('.nxml.gz'):
                c_inf = BibParser(item)

                #create triples - article class
                g.add((URIRef(str(ccp_ext) + 'P_PMC_' + str(c_inf[0])),
                       RDFS.subClassOf, URIRef(str(obo) + 'IAO_0000311')))
                g.add((URIRef(str(ccp_ext) + 'P_PMC_' + str(c_inf[0])),
                       URIRef(str(bibo) + 'pmid'), Literal(str(c_inf[1]))))
                g.add((URIRef(str(ccp_ext) + 'P_PMC_' + str(c_inf[0])),
                       URIRef(str(ccp_ext) + 'BIBO_EXT_0000001'),
                       Literal('PMC' + str(c_inf[0]))))
                g.add((URIRef(str(ccp_ext) + 'P_PMC_' + str(c_inf[0])),
                       DCTERMS.title, Literal(str(c_inf[2]))))
                g.add((URIRef(str(ccp_ext) + 'P_PMC_' + str(c_inf[0])),
                       URIRef(str(bibo) + 'doi'), Literal(str(c_inf[3]))))
                g.add((URIRef(str(ccp_ext) + 'P_PMC_' + str(c_inf[0])),
                       DCTERMS.published, Literal(str(c_inf[4]))))
                g.add((URIRef(
                    str(ccp_ext) + 'J_' +
                    base64.b64encode(hashlib.sha1(str(c_inf[5])).digest())),
                       DCTERMS.title, Literal(str(c_inf[5]))))
                g.add((URIRef(
                    str(ccp_ext) + 'J_' +
                    base64.b64encode(hashlib.sha1(str(c_inf[5])).digest())),
                       RDF.type, URIRef(str(bibo) + 'Journal')))
                g.add((
                    URIRef(str(ccp_ext) + 'P_PMC_' + str(c_inf[0])),
                    URIRef(str(obo) + 'BFO_0000050'),
                    URIRef(
                        str(ccp_ext) + 'J_' +
                        base64.b64encode(hashlib.sha1(str(c_inf[5])).digest()))
                ))

                for author in c_inf[6]:
                    a_hash = base64.b64encode(
                        hashlib.sha1(str(author[1]) + str(author[0])).digest())
                    g.add((URIRef(str(ccp_ext) + 'P_PMC_' + str(c_inf[0])),
                           DCTERMS.creator,
                           URIRef(str(ccp_ext) + 'A_' + str(a_hash))))
                    g.add((URIRef(str(ccp_ext) + 'A_' + str(a_hash)), RDF.type,
                           FOAF.person))
                    g.add((URIRef(str(ccp_ext) + 'A_' + str(a_hash)),
                           FOAF.familyName, Literal(str(author[0]))))
                    g.add((URIRef(str(ccp_ext) + 'A_' + str(a_hash)),
                           FOAF.givenName, Literal(str(author[1]))))

                #create triples - article instances
                format_conversion_uri = base64.b64encode(
                    hashlib.sha1(
                        str(ccp_ext) + 'P_PMC_' + str(c_inf[0]) + '_XML' +
                        str(ccp_ext) + 'P_PMC_' + str(c_inf[0]) +
                        '_TXT').digest())
                g.add(
                    (URIRef(str(ccp_ext) + 'P_PMC_' + str(c_inf[0]) + '_XML'),
                     RDF.type,
                     URIRef(str(ccp_ext) + 'P_PMC_' + str(c_inf[0]))))
                g.add(
                    (URIRef(str(ccp_ext) + 'P_PMC_' + str(c_inf[0]) + '_TXT'),
                     RDF.type,
                     URIRef(str(ccp_ext) + 'P_PMC_' + str(c_inf[0]))))
                g.add(
                    (URIRef(str(ccp_ext) + 'P_PMC_' + str(c_inf[0]) + '_XML'),
                     URIRef(str(swo) + 'SWO_0004002'),
                     URIRef(str(edam) + 'format_2332')))
                g.add(
                    (URIRef(str(ccp_ext) + 'P_PMC_' + str(c_inf[0]) + '_XML'),
                     URIRef(str(swo) + 'SWO_0000046'),
                     Literal(
                         str('/'.join(item.split('/')[:-1])) + '/PMC' +
                         str(c_inf[0]) + '.nxml.gz')))
                g.add(
                    (URIRef(str(ccp_ext) + 'FC_' + str(format_conversion_uri)),
                     URIRef(str(swo) + 'SWO_0000086'),
                     URIRef(str(ccp_ext) + 'P_PMC_' + str(c_inf[0]) + '_XML')))
                g.add(
                    (URIRef(str(ccp_ext) + 'FC_' + str(format_conversion_uri)),
                     RDF.type, URIRef(str(edam) + 'operation_0335')))
                g.add(
                    (URIRef(str(ccp_ext) + 'FC_' + str(format_conversion_uri)),
                     URIRef(str(swo) + 'SWO_0000087'),
                     URIRef(str(ccp_ext) + 'P_PMC_' + str(c_inf[0]) + '_TXT')))
                g.add(
                    (URIRef(str(ccp_ext) + 'P_PMC_' + str(c_inf[0]) + '_TXT'),
                     URIRef(str(swo) + 'SWO_0004002'),
                     URIRef(str(swo) + 'SWO_3000043')))
                g.add(
                    (URIRef(str(ccp_ext) + 'P_PMC_' + str(c_inf[0]) + '_TXT'),
                     URIRef(str(swo) + 'SWO_0000046'),
                     Literal(
                         str('/'.join(item.split('/')[:-1])) + '/PMC' +
                         str(c_inf[0]) + '.nxml.gz.txt.gz')))

    file_name = file_dir.split('/')[-1]
    out = str('/'.join(file_dir.split('/')[:-2])) + '/metadata_triples'
    g.serialize(destination=str(out) + '/PMC_metadata_' + str(file_name) +
                '.ttl',
                format='turtle')

    print 'Completed ' + str(file_dir)
Exemple #25
0
class FCU_Discharge_Air_Temperature_Sensor(Discharge_Air_Temperature_Sensor,
                                           Supply_Air_Temperature_Sensor):
    rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#'
                         ).FCU_Discharge_Air_Temperature_Sensor
Exemple #26
0
def sparql_search():
    street = request.form.get("street")
    postalCode = request.form.get("postalCode")

    #We have to separate the latitude and the longitude
    if street:
        latitude = str(street).split(',')[0]
        longitude = str(street).split(',')[1]
    else:
        geolocator = Nominatim(user_agent="Geoparking")
        location_user = geolocator.geocode({"postalcode": postalCode})
        latitude, longitude = location_user.latitude, location_user.longitude

    actual_dist = -1
    parking = ''
    totalslots = -1
    addressid = ''
    schedule = ''
    province = ''
    city = ''
    district = ''
    neighborhood = ''
    wikistreet = ''
    wikiprovince = ''
    wikicity = ''
    wikidistrict = ''
    wikineighborhood = ''
    image = ''
    g = Graph()
    github_storage = "https://raw.githubusercontent.com/adrix1341/Curso2020-2021/master/HandsOn/Group02/rdf/output" \
                     "-with-links.nt "
    g.parse(github_storage, format="nt")

    findparking = Namespace("http://findyourparking.es/lcc/ontology/Parking#")
    owl = Namespace("http://www.w3.org/2002/07/owl#")
    wikidata = Namespace("http://www.wikidata.org/prop/direct/")

    q1 = prepareQuery('''
                     SELECT DISTINCT ?x ?parkname ?y ?street ?lat ?lon ?totalslots ?addressid ?schedule ?wikistreet ?province ?city ?district ?neighborhood ?wikiprovince ?wikicity ?wikidistrict ?wikineighborhood
                     WHERE {
                      ?x rdf:type findparking:Parking.
                      ?x findparking:hasName ?parkname.
                      ?x findparking:hasTotalSlots ?totalslots.
                      ?x findparking:hasSchedule ?schedule.
                      ?y rdf:type findparking:Address.
                      ?x findparking:hasProvince ?p.
                      ?x findparking:hasCity ?c.
                      ?x findparking:hasDistrict ?d.
                      ?x findparking:hasNeighborhood ?n.
                      ?p findparking:hasProvince ?province.
                      ?p owl:sameAs ?wikiprovince.
                      ?c owl:sameAs ?wikicity.
                      ?d owl:sameAs ?wikidistrict.
                      ?n owl:sameAs ?wikineighborhood.
                      ?c findparking:hasCity ?city.
                      ?d findparking:hasDistrict ?district.
                      ?n findparking:hasNeighborhood ?neighborhood.
                      ?x findparking:hasAddress ?y.
                      ?y findparking:hasAddressId ?addressid.
                      ?y findparking:hasStreet ?street.
                      ?y owl:sameAs ?wikistreet. 
                      ?x findparking:hasLatitude ?lat.
                      ?x findparking:hasLongitude ?lon
                     }
                     ORDER BY ASC(?street)
                     ''',
                      initNs={
                          "findparking": findparking,
                          "rdf": RDF,
                          "owl": owl,
                          "wikidata": wikidata
                      })

    for st in g.query(q1):

        parkini, streeti, latitudi, longitudi, totalsloti, addressidi, scheduli = st.parkname, st.street, st.lat, st.lon, st.totalslots, st.addressid, st.schedule
        wikistreeti, wikiprovinci, wikiciti, wikidistricti, wikineighborhoodi = st.wikistreet, st.wikiprovince, st.wikicity, st.wikidistrict, st.wikineighborhood
        provinci, citi, districti, neighborhoodi = st.province, st.city, st.district, st.neighborhood
        coordlist = [latitudi, longitudi]

        if street:
            km = return_distance(latitude, longitude, tuple(coordlist))
        else:
            auxList = [location_user.latitude, location_user.longitude]
            km = geodesic(tuple(auxList), tuple(coordlist)).kilometers

        if actual_dist == -1:
            actual_dist = km
            parking = parkini
            park_street = streeti
            totalslots = totalsloti
            addressid = addressidi
            schedule = scheduli
            province = provinci
            city = citi
            district = districti
            neighborhood = neighborhoodi
            wikistreet = wikistreeti
            wikiprovince = wikiprovinci
            wikicity = wikiciti
            wikidistrict = wikidistricti
            wikineighborhood = wikineighborhoodi
            pk_lat = latitudi
            pk_long = longitudi
        elif km < actual_dist:
            actual_dist = km
            parking = parkini
            park_street = streeti
            totalslots = totalsloti
            addressid = addressidi
            schedule = scheduli
            province = provinci
            city = citi
            district = districti
            neighborhood = neighborhoodi
            wikistreet = wikistreeti
            wikiprovince = wikiprovinci
            wikicity = wikiciti
            wikidistrict = wikidistricti
            wikineighborhood = wikineighborhoodi
            pk_lat = latitudi
            pk_long = longitudi

    wd = Namespace("https://www.wikidata.org/wiki#")

    wiki1 = wikistreet.split('Q')[1]
    wiki1 = 'Q' + wiki1
    query2 = str('''
                         PREFIX wikidata: <http://www.wikidata.org/prop/direct/>
                         SELECT DISTINCT ?image
                         WHERE {
                          wd:''' + wiki1 + ''' wikidata:P18 ?image
                          
                         }
                         ''')

    endpoint_url = "https://query.wikidata.org/sparql"

    def get_results(endpoint_url, query2):
        user_agent = "WDQS-example Python/%s.%s" % (sys.version_info[0],
                                                    sys.version_info[1])
        # TODO adjust user agent; see https://w.wiki/CX6
        sparql = SPARQLWrapper(endpoint_url, agent=user_agent)
        sparql.setQuery(query2)
        sparql.setReturnFormat(JSON)
        return sparql.query().convert()

    results = get_results(endpoint_url, query2)
    imaget = ''
    for result in results["results"]["bindings"]:
        imaget = str(result)
    # Splitting the object to obtain only the link
    imaget = imaget.split('h')[1] + 'h' + imaget.split('h')[2]
    imaget = 'h' + imaget  #Inserting the h for http://
    imaget = imaget.split("'")[0]

    map = folium.Map(location=[pk_lat, pk_long], zoom_start=12)
    folium.Marker(
        location=[pk_lat, pk_long],
        popup=("Your nearest parking is: " + '<b>' + parking + '</b>' +
               ", at " + str(actual_dist) + " km from your current position." +
               '<br>' + '<b>Parking Info:</b>' + '<br>' + "  -Address: " +
               ' <a href=' + wikistreet + '>' + addressid + '</a>' + '<br>' +
               "  -Province: " + '<a href=' + wikiprovince + '>' + province +
               '</a><br>' + "  -City: " + ' <a href=' + wikicity + '>' + city +
               '</a>' + '<br>' + "  -District: " + ' <a href=' + wikidistrict +
               '>' + district + '</a>' + '<br>' + "  -Neighborhood: " +
               ' <a href=' + wikineighborhood + '>' + neighborhood + '</a>' +
               '<br>' + "  -TotalSlots: " + totalslots + '<br>' +
               "  -Schedule: " + schedule + '<br>' +
               '<style>img{width: 100%;height: auto;}</style>' + '<img src=' +
               imaget + ' alt="Image of the parking street"' + '></img'),
        tooltip="Click here for more info.",
        icon=folium.Icon(color='red', icon='star')).add_to(map)
    folium.Marker(location=[latitude, longitude],
                  popup="<b>You are here.</b>",
                  tooltip="Click Here!").add_to(map)

    return map._repr_html_()
Exemple #27
0
import sys


def addANode(graph, subject, object):
    subjectURI = URIRef(DBfN[subject])
    objectURI = URIRef(DBfN[object])
    if (subjectURI, RDFS.subClassOf, objectURI) in graph:
        print subject, "already added"
    else:
        graph.add((subjectURI, RDF.type, OWL.Class))
        graph.add((subjectURI, RDFS.subClassOf, objectURI))
        graph.add((subjectURI, RDFS.label, Literal(subject)))
        graph.add((subjectURI, RDFS.comment, Literal(subject)))


DBfN = Namespace(
    "http://maven.renci.org/ontologies/databridgeforneuroscience/")
graph = Graph()
#Clinical = URIRef("Clinical")
graph.add((DBfN.Clinical, RDF.type, OWL.Class))
graph.add((DBfN.Clinical, RDFS.subClassOf, OWL.Thing))
graph.add((DBfN.Clinical, RDFS.label, Literal("Clinical")))
graph.add((DBfN.Clinical, RDFS.comment, Literal("Highest level of ontology")))
workbook = sys.argv[1]
outFile = sys.argv[2]
lastRow = int(sys.argv[3])
sheet = sys.argv[4]

wb = open_workbook(workbook)
for s in wb.sheets():
    # if (s.name == 'SCZ Clin Model Groups'):
    if (s.name == sheet):
class Power_System_Power_Meter(Power_Meter):
    rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#').Power_System_Power_Meter
	
	
Exemple #29
0
from jsonpath_ng import jsonpath, parse
import json
from urllib.parse import urlparse
from rdflib import Graph, Literal, RDF, URIRef, Namespace, BNode
from rdflib.namespace import RDF, RDFS, XSD, DC, DCTERMS, FOAF
import dateutil.parser
import hashlib
from requests.utils import requote_uri
from cpe import CPE

CORE = Namespace("http://ontologies.ti-semantics.com/core#")
CTI = Namespace("http://ontologies.ti-semantics.com/cti#")
PLATFORM = Namespace("http://ontologies.ti-semantics.com/platform#")
SCORE = Namespace("http://ontologies.ti-semantics.com/score#")
VULN = Namespace("http://ontologies.ti-semantics.com/vulnerability#")
NVD = Namespace("https://nvd.nist.gov/feeds/json/cve/1.1/nvdcve-1.1-2020/")
REFS = Namespace("https://csrc.nist.gov/publications/references/")
CPEMATCH = Namespace("https://csrc.nist.gov/schema/cpematch/feed/1.0/")


def all(q, o, fn=None):
    a = [m.value for m in parse(q).find(o)]
    if fn:
        for i in a:
            fn(i)
    else:
        return a


def allDateTime(p, q, o):
    return [(p, Literal(dt, datatype=XSD.dateTime)) for dt in all(q, o)]
#
#  Resources:
#
#   - http://spdx.org/about-spdx/faqs
#   - http://wiki.spdx.org/view/Technical_Team/Best_Practices
#

import os
import sys
import re
import datetime
import sha
import rdflib
from rdflib import URIRef, BNode, Literal, Namespace

RDF = Namespace('http://www.w3.org/1999/02/22-rdf-syntax-ns#')
RDFS = Namespace('http://www.w3.org/2000/01/rdf-schema#')
XSD = Namespace('http://www.w3.org/2001/XMLSchema#')
SPDX = Namespace('http://spdx.org/rdf/terms#')
DOAP = Namespace('http://usefulinc.com/ns/doap#')
DUKTAPE = Namespace('http://duktape.org/rdf/terms#')

def checksumFile(g, filename):
	f = open(filename, 'rb')
	d = f.read()
	f.close()
	shasum = sha.sha(d).digest().encode('hex').lower()

	csum_node = BNode()
	g.add((csum_node, RDF.type, SPDX.Checksum))
	g.add((csum_node, SPDX.algorithm, SPDX.checksumAlgorithm_sha1))