heightOcc = M['height_Occ']
g1.add((heightOcc, RDF.type, SBUILDING.Height))
g1.add((heightOcc, RDF.type, PRIVVULN.Metadata))
g1.add((occ, PRIVVULNV2.has, heightOcc))

shoulderCircumferenceOcc = M['shoulderCircumference_Occ']
g1.add((shoulderCircumferenceOcc, RDF.type, SBUILDING.ShoulderCircumference))
g1.add((shoulderCircumferenceOcc, RDF.type, PRIVVULN.Metadata))
g1.add((occ, PRIVVULNV2.has, shoulderCircumferenceOcc))

wightOcc = M['wight_Occ']
g1.add((wightOcc, RDF.type, SBUILDING.Wight))
g1.add((wightOcc, RDF.type, PRIVVULN.Metadata))
g1.add((occ, PRIVVULNV2.has, wightOcc))

genderOcc = M['gender_Occ']
g1.add((genderOcc, RDF.type, SBUILDING.Gender))
g1.add((genderOcc, RDF.type, PRIVVULN.Metadata))
g1.add((occ, PRIVVULNV2.has, genderOcc))

driver = Driver(debug_mode=True)
print("graph has %s statements." % len(g1))

folder = "output/paper/TORS/"
outputName = "10.1145-3408308.3427612"

g1 = driver.run(g1, folder + outputName)

print("graph has %s statements." % len(g1))

g1.serialize(folder+outputName+".rdf")
示例#2
0
g1.add((teaching_Room, PRIVVULNV2.has, humidity))

hallway_Room = M['Hallway_Room_sub']
g1.add((hallway_Room, RDF.type, SBUILDING.Hallway_Room))
g1.add((hallway_Room, PRIVVULN.star, occupies[1]))

building = M['Building_sub']
g1.add((building, RDF.type, SBUILDING.Building))
g1.add((building, PRIVVULN.star, teaching_Room))
g1.add((building, PRIVVULN.star, hallway_Room))

contextLocation = M['ContextLocation']
g1.add((contextLocation, RDF.type, SBUILDING.ContextLocation))
g1.add((contextLocation, RDF.type, PRIVVULN.Metadata))
g1.add((building, PRIVVULNV2.has, contextLocation))

outdoorTemperature = M['OutdoorTemperature']
g1.add((outdoorTemperature, RDF.type, SBUILDING.OutdoorTemperature))
g1.add((outdoorTemperature, RDF.type, PRIVVULN.TimeSeries))
g1.add((outdoorTemperature, PRIVVULNV2.TemporalResolution, Literal("300", datatype=XSD.double)))
g1.add((building, PRIVVULNV2.has, outdoorTemperature))

driver = Driver(domain_NS=SBUILDING,debug_mode=True)
folder = "output/Datasets/"
outputName = "Gao et al. 2020"

g1 = driver.run(g1, folder + outputName)

print("graph has %s statements." % len(g1))

g1.serialize(folder+outputName+".rdf")
room = M['Room_GCH']
g1.add((room, RDF.type, SBUILDING.Shared_Office_Room))
g1.add((room, PRIVVULN.star, sensors[0]))
g1.add((room, PRIVVULN.star, sensors[1]))
g1.add((room, PRIVVULN.star, zones[0]))
g1.add((room, PRIVVULN.star, zones[1]))
g1.add((room, PRIVVULN.star, zones[2]))

floor = M['Floor_GCH']
g1.add((floor, RDF.type, SBUILDING.Floor))
g1.add((floor, PRIVVULN.star, room))
g1.add((floor, PRIVVULN.star, sensors[2]))
g1.add((floor, PRIVVULN.star, sensors[3]))

building = M['Building_GCH']
g1.add((building, RDF.type, SBUILDING.Building))
g1.add((building, PRIVVULN.star, floor))

driver = Driver()
print("graph has %s statements." % len(g1))

folder = "output/Datasets/"
outputName = "Office_building_2"

g1 = driver.run(g1, folder + outputName)

print("graph has %s statements." % len(g1))

g1.serialize(folder + outputName + ".rdf")
示例#4
0
def run_analyses(json):
    g1 = Graph()

    # source namespaces
    RDF = NSUtil.get_namespase_rdf()
    RDFS = NSUtil.get_namespase_rdfs()
    OWL = NSUtil.get_namespase_owl()
    XSD = NSUtil.get_namespase_xsd()
    PRIVVULN = NSUtil.get_namespase_base_ontology()
    PRIVVULNV2 = NSUtil.get_namespase_extrantion_ontology()
    SBUILDING = NSUtil.get_namespase_domain_smart_building()

    g1.bind('rdf', RDF)
    g1.bind('rdfs', RDFS)
    g1.bind('owl', OWL)
    g1.bind('xsd', XSD)

    # custom namespace
    g1.bind('privvuln', PRIVVULN)

    g1.bind('privvulnv2', PRIVVULNV2)

    g1.bind('sbuilding', SBUILDING)

    nodes = json["nodes"]

    links = json["links"]

    namespace = json["namespace"]
    M = Namespace(namespace)
    g1.bind('m', M)

    for node in nodes:
        if "name" not in node and "type" not in node:
            continue
        subject = M[node["name"]]
        g1.add((subject, RDF.type, rdflib.term.URIRef(node["type"])))
        if "superType" in node and node["superType"] != "" and node[
                "superType"] and node["superType"] != str(PRIVVULNV2.Context):
            g1.add((subject, RDF.type, rdflib.term.URIRef(node["superType"])))

        if "attributes" in node:
            for attribute in node["attributes"]:
                if "name" not in attribute and "value" not in attribute and "dataType" not in attribute:
                    continue

                if attribute["dataType"] == "int" or attribute[
                        "dataType"] == "double" or attribute[
                            "dataType"] == "string":
                    if attribute["value"] != '' and attribute["value"] != None:
                        g1.add(
                            (subject,
                             rdflib.term.URIRef(
                                 str(PRIVVULNV2) + attribute["name"]),
                             Literal(attribute["value"],
                                     datatype=rdflib.term.URIRef(
                                         str(XSD) + attribute["dataType"]))))

    for link in links:
        if "subject" not in link and "predicate" not in link and "object" not in link:
            continue
        g1.add((rdflib.term.URIRef(link["subject"]),
                rdflib.term.URIRef(link["predicate"]),
                rdflib.term.URIRef(link["object"])))

    driver = Driver(debug_mode=True)
    print("graph has %s statements." % len(g1))

    g1, risk_results, graph = driver.run_with_output(g1)

    print("graph has %s statements." % len(g1))

    returnJson = {
        'graph': str(graph.source),
        'privacy_report': risk_results,
    }

    return returnJson