예제 #1
0
def test_rdfs_inference_subclass():
    EX = Namespace("http://example.com/building#")
    graph = Graph(load_brick=True).from_triples([(EX["a"], RDF.type,
                                                  BRICK.Temperature_Sensor)])
    graph.expand(profile="rdfs")

    res1 = graph.query(f"""SELECT ?type WHERE {{
        <{EX["a"]}> rdf:type ?type
    }}""")

    expected = [
        BRICK.Point,
        BRICK.Class,
        BRICK.Sensor,
        RDFS.Resource,
        BRICK.Temperature_Sensor,
    ]

    # filter out BNodes
    res = []
    for row in res1:
        row = tuple(filter(lambda x: not isinstance(x, BNode), row))
        res.append(row)
    res = list(filter(lambda x: len(x) > 0, res))

    assert len(res) == len(expected), f"Results were {res}"
    for expected_class in expected:
        assert (
            expected_class, ) in res, f"{expected_class} not found in {res}"
예제 #2
0
    def __init__(self, folder, dbfile, limit):
        if not os.path.exists(dbfile) or dbfile == ':memory:':
            ttlfiles = glob.glob(f"{folder}/*.ttl")
            csvfiles = glob.glob(f"{folder}/*/*.csv")

            conn = sqlite3.connect(dbfile)
            conn.row_factory = sqlite3.Row
            for stmt in schema.split(';'):
                stmt += ';'
                conn.execute(stmt)
            conn.commit()

            # load in ttl
            g = Graph()
            bind_prefixes(g)
            for ttl in ttlfiles:
                print(f"Loading TTL file {ttl}")
                g.load_file(ttl)
                # values = list(map(tuple, g))
                # conn.executemany("""INSERT OR IGNORE INTO triples(subject, predicate, object) \
                #                     VALUES(?, ?, ?)""", values)
                # conn.commit()
            g = BrickInferenceSession().expand(g)
            triples = list(g.g)
            conn.executemany(
                """INSERT OR IGNORE INTO triples(subject, predicate, object) \
                                VALUES(?, ?, ?)""", triples)
            conn.commit()

            # load in data
            for csvf in sorted(csvfiles)[:limit]:
                print(f"Loading CSV file {csvf}")
                rows = csv2rows(csvf)
                #with open(csvf) as f:
                #    rdr = csv.reader(f)
                #    next(rdr) # consume header
                #    vals = list(rdr)
                conn.executemany(
                    """INSERT OR IGNORE INTO data(timestamp, uuid, value) \
                                    VALUES(?, ?, ?)""", rows)
                conn.commit()
        else:
            conn = sqlite3.connect(dbfile)
            conn.row_factory = sqlite3.Row
            # load in ttl
            g = Graph()
            bind_prefixes(g)
            triples = conn.execute(
                "SELECT subject, predicate, object FROM triples")
            for t in triples:
                t = (
                    parse_uri(t[0]),
                    parse_uri(t[1]),
                    parse_uri(t[2]),
                )
                g.add(t)
        self.g = g
        self.conn = conn
예제 #3
0
def test_haystack_inference():
    data = pkgutil.get_data(__name__, "data/carytown.json").decode()
    raw_model = json.loads(data)
    brick_model = Graph(load_brick=True).from_haystack(
        "http://example.org/carytown", raw_model)
    points = brick_model.query("""SELECT ?p WHERE {
        ?p rdf:type/rdfs:subClassOf* brick:Point
    }""")
    assert len(points) == 17

    equips = brick_model.query("""SELECT ?e WHERE {
        ?e rdf:type/rdfs:subClassOf* brick:Equipment
    }""")
    assert len(equips) == 4
예제 #4
0
def test_tagset_inference():

    g = Graph(load_brick=False)
    data = pkgutil.get_data(__name__, "data/tags.ttl").decode()
    g.load_file(source=io.StringIO(data))
    g.expand(profile="tag")

    afs1 = g.query("SELECT ?x WHERE { ?x rdf:type brick:Air_Flow_Sensor }")
    assert len(afs1) == 1
    afsp1 = g.query("SELECT ?x WHERE { ?x rdf:type brick:Air_Flow_Setpoint }")
    assert len(afsp1) == 1
    mafs1 = g.query(
        "SELECT ?x WHERE { ?x rdf:type brick:Max_Air_Flow_Setpoint_Limit }")
    assert len(mafs1) == 1
예제 #5
0
def test_brick_inference():
    session = BrickInferenceSession()
    assert session is not None
    g = Graph(load_brick=True)
    data = pkgutil.get_data(__name__, "data/brick_inference_test.ttl").decode()
    g.load_file(source=io.StringIO(data))
    g = session.expand(g)

    r = g.query("SELECT ?x WHERE { ?x rdf:type brick:Air_Temperature_Sensor }")
    # assert len(r) == 5
    urls = set([str(row[0]) for row in r])
    real_sensors = set([
        "http://example.com/mybuilding#sensor1",
        "http://example.com/mybuilding#sensor2",
        "http://example.com/mybuilding#sensor3",
        "http://example.com/mybuilding#sensor4",
        "http://example.com/mybuilding#sensor5",
    ])
    assert urls == real_sensors
예제 #6
0
def test_brick_to_vbis_inference_with_owlrl():
    ALIGN = Namespace("https://brickschema.org/schema/Brick/alignments/vbis#")

    # input brick model; instances should have appropriate VBIS tags
    g = Graph(load_brick=True)
    data = pkgutil.get_data(__name__, "data/vbis_inference_test.ttl").decode()
    g.load_file(source=io.StringIO(data))
    g.expand(profile="owlrl")
    g.expand(profile="vbis")

    test_cases = [
        ("http://bldg#f1", "ME-Fa"),
        ("http://bldg#rtu1", "ME-ACU"),
    ]
    for (entity, vbistag) in test_cases:
        query = f"SELECT ?tag WHERE {{ <{entity}> <{ALIGN.hasVBISTag}> ?tag }}"
        res = list(g.query(query))
        assert len(res) == 1
        assert str(res[0][0]) == vbistag

    conforms, _, results = g.validate()
    assert conforms, results
예제 #7
0
def test_brick_inference():
    g = Graph(load_brick=True)
    g.load_extension("shacl_tag_inference")
    data = pkgutil.get_data(__name__, "data/brick_inference_test.ttl").decode()
    g.load_file(source=io.StringIO(data))

    g.expand(profile="owlrl+shacl+owlrl+shacl")

    r = g.query("SELECT ?x WHERE { ?x rdf:type brick:Air_Temperature_Sensor }")
    # assert len(r) == 5
    urls = set([str(row[0]) for row in r])
    real_sensors = set([
        "http://example.com/mybuilding#sensor1",
        "http://example.com/mybuilding#sensor2",
        "http://example.com/mybuilding#sensor3",
        "http://example.com/mybuilding#sensor4",
        "http://example.com/mybuilding#sensor5",
    ])
    assert urls == real_sensors
예제 #8
0
def test_orm():
    g = Graph(load_brick=True)
    data = pkgutil.get_data(__name__, "data/test.ttl").decode()
    g.load_file(source=io.StringIO(data))
    orm = SQLORM(g, connection_string="sqlite:///:memory:")

    equips = orm.session.query(Equipment).all()
    assert len(equips) == 5

    points = orm.session.query(Point).all()
    assert len(points) == 3

    locs = orm.session.query(Location).all()
    assert len(locs) == 4

    hvac_zones = (
        orm.session.query(Location).filter(Location.type == BRICK.HVAC_Zone).all()
    )
    assert len(hvac_zones) == 1

    # test relationships
    BLDG = Namespace("http://example.com/mybuilding#")
    vav2_4 = orm.session.query(Equipment).filter(Equipment.name == BLDG["VAV2-4"]).one()
    assert vav2_4.type == str(BRICK.Variable_Air_Volume_Box)
    assert len(vav2_4.points) == 2

    vav2_4_dpr = (
        orm.session.query(Equipment).filter(Equipment.name == BLDG["VAV2-4.DPR"]).one()
    )
    assert vav2_4_dpr.type == str(BRICK.Damper)
    assert len(vav2_4_dpr.points) == 1

    tstat = orm.session.query(Equipment).filter(Equipment.name == BLDG["tstat1"]).one()
    room_410 = (
        orm.session.query(Location).filter(Location.name == BLDG["Room-410"]).one()
    )
    assert tstat.location == room_410
    assert tstat in room_410.equipment
예제 #9
0
def test_tagset_inference():
    session = TagInferenceSession(approximate=False)
    assert session is not None
    g = Graph(load_brick=False)
    data = pkgutil.get_data(__name__, "data/tags.ttl").decode()
    g.load_file(source=io.StringIO(data))
    g = session.expand(g)

    afs1 = g.query("SELECT ?x WHERE { ?x rdf:type brick:Air_Flow_Sensor }")
    assert len(afs1) == 1
    afsp1 = g.query("SELECT ?x WHERE { ?x rdf:type brick:Air_Flow_Setpoint }")
    assert len(afsp1) == 1
    mafs1 = g.query(
        "SELECT ?x WHERE { ?x rdf:type brick:Max_Air_Flow_Setpoint_Limit }")
    assert len(mafs1) == 1
예제 #10
0
def test_owl_inference_tags():
    EX = Namespace("http://example.com/building#")
    graph = Graph(load_brick=True).from_triples([(EX["a"], RDF.type,
                                                  BRICK.Air_Flow_Setpoint)])
    graph.expand(profile="owlrl", backend="owlrl")

    res1 = graph.query(f"""SELECT ?type WHERE {{
        <{EX["a"]}> rdf:type ?type
    }}""")

    expected = [
        # RDF.Resource,
        # RDFS.Resource,
        OWL.Thing,
        BRICK.Point,
        BRICK.Class,
        BRICK.Setpoint,
        BRICK.Flow_Setpoint,
        BRICK.Air_Flow_Setpoint,
    ]
    # filter out BNodes
    res1 = filter_bnodes(res1)

    assert set(res1) == set(map(lambda x: (x, ), expected))

    res2 = graph.query(f"""SELECT ?tag WHERE {{
        <{EX["a"]}> brick:hasTag ?tag
    }}""")

    expected = [
        TAG.Point,
        TAG.Air,
        TAG.Flow,
        TAG.Setpoint,
    ]
    res2 = filter_bnodes(res2)

    assert set(res2) == set(map(lambda x: (x, ), expected))
예제 #11
0
def graph_from_triples(triples):
    g = Graph(load_brick=True)
    # g.load_file("ttl/owl.ttl")
    g.add(*triples)
    sess = OWLRLAllegroInferenceSession()
    return sess.expand(g)
예제 #12
0
파일: quantities.py 프로젝트: sud335/Brick
from brickschema.graph import Graph
from brickschema.inference import BrickInferenceSession
from rdflib import Literal, URIRef
from .namespaces import SKOS, OWL, RDFS, BRICK, QUDTQK, QUDTDV, QUDT, UNIT


g = Graph()
g.load_file("support/VOCAB_QUDT-QUANTITY-KINDS-ALL-v2.1.ttl")
g.load_file("support/VOCAB_QUDT-UNITS-ALL-v2.1.ttl")
g.g.bind("qudt", QUDT)
g.g.bind("qudtqk", QUDTQK)
sess = BrickInferenceSession()
g = sess.expand(g)


def get_units(brick_quantity):
    """
    Fetches the QUDT unit and symbol (as a Literal) from the QUDT ontology so
    in order to avoid having to pull the full QUDT ontology into Brick
    """
    res = g.query(
        f"""SELECT ?unit ?symbol WHERE {{
                    <{brick_quantity}> qudt:applicableUnit ?unit .
                    ?unit qudt:symbol ?symbol .
                    FILTER(isLiteral(?symbol))
                    }}"""
    )
    for r in res:
        yield r

예제 #13
0
from brickschema.graph import Graph
from rdflib import Literal, URIRef
from .namespaces import SKOS, OWL, RDFS, BRICK, QUDTQK, QUDTDV, QUDT, UNIT


g = Graph()
g.load_file("support/VOCAB_QUDT-QUANTITY-KINDS-ALL-v2.1.ttl")
g.load_file("support/VOCAB_QUDT-UNITS-ALL-v2.1.ttl")
g.bind("qudt", QUDT)
g.bind("qudtqk", QUDTQK)
g.expand(profile="brick")


def get_units(brick_quantity):
    """
    Fetches the QUDT unit and symbol (as a Literal) from the QUDT ontology so
    in order to avoid having to pull the full QUDT ontology into Brick
    """
    res = g.query(
        f"""SELECT ?unit ?symbol WHERE {{
                    <{brick_quantity}> qudt:applicableUnit ?unit .
                    ?unit qudt:symbol ?symbol .
                    FILTER(isLiteral(?symbol))
                    }}"""
    )
    for r in res:
        yield r


"""
Each is a qudt:QuantityKind
def test_simplify():
    g = Graph(load_brick=True)
    data = pkgutil.get_data(__name__, "data/test.ttl").decode()
    g.load_file(source=io.StringIO(data))

    g.expand("brick", simplify=False, backend="owlrl")
    g.serialize("/tmp/test.ttl", format="ttl")

    q = "SELECT ?type WHERE { bldg:VAV2-4.ZN_T a ?type }"
    rows = list(g.query(q))
    bnodes = [r[0] for r in rows if isinstance(r[0], rdflib.BNode)]
    assert len(bnodes) > 0

    g.simplify()

    rows = list(g.query(q))
    bnodes = [r[0] for r in rows if isinstance(r[0], rdflib.BNode)]
    assert len(bnodes) == 0