Пример #1
0
def predicate(label, *args, **kwargs):
    if label in PREDICATES and args == () and kwargs == {}:
        return PREDICATES[label]
    elif label in PREDICATES and CONFIGURATION.get("error_on_redeclare"):
        logging.getLogger(__name__).error("Attempt at redeclaring existing predicate %s" % label)
        raise Exception("Attempt at redeclaring existing predicate %s" % label)
    else:
        if label in PREDICATES:
            logging.getLogger(__name__).warn("Redeclaring existing predicate %s" % label)
        PREDICATES[label] = ltn.predicate(label, *args, **kwargs)
        return PREDICATES[label]
Пример #2
0
import logictensornetworks as ltn
from logictensornetworks import Implies,And,Not,Forall,Exists
import tensorflow as tf
import numpy as np
a = ltn.proposition("a",value=.2)
b = ltn.proposition("b")
c = ltn.proposition("c")
w1 = ltn.proposition("w1",value=.3)
w2 = ltn.proposition("w2",value=.9)

x = ltn.variable("x",np.array([[1,2],[3,4],[5,6]]).astype(np.float32))
P = ltn.predicate("P",2)

formula = And(Implies(And(Forall(x,P(x)),a,b),Not(c)),c)
w1_formula1 = Implies(w1,Forall(x,P(x)))
w2_formula2 = Implies(w2,Exists(x,P(x)))

sat = tf.train.GradientDescentOptimizer(0.01).minimize(-tf.concat([formula,w1_formula1,w2_formula2],axis=0))
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for i in range(100):
        sess.run(sat)
        if i % 10 == 0:
            print(sess.run(formula))
    print(sess.run([a,b,c]))
    print(sess.run(And(a,P(x))))
    print(xy.name, xy.shape)

# variables for single rectangles

x = ltn.variable("x", 4)
y = ltn.variable("y", 4)
z = ltn.variable("z", 4)

# a rectangle and a set of rectangle used to show the results

ct = ltn.constant("ct", [.5, .5, .3, .3])
t = ltn.variable("t", tf.cast(bbst, tf.float32))

# relational predicates

L = ltn.predicate("left", 8)
R = ltn.predicate("right", 8)
B = ltn.predicate("below", 8)
A = ltn.predicate("above", 8)
C = ltn.predicate("contains", 8)
I = ltn.predicate("in", 8)

P = [L, R, B, A, C, I]

inv_P = [R, L, A, B, I, C]

# constraints/axioms

constraints = [Forall(pxy[i], P[i](pxy[i])) for i in range(6)]
constraints += [Forall(npxy[i], Not(P[i](npxy[i]))) for i in range(6)]
constraints += [
Пример #4
0
closed_data = closed_data[np.random.random_integers(0, len(data), 1000)]
distant_data = np.array([
    np.concatenate([data[i], data[j]]) for i in range(len(data))
    for j in range(len(data))
    if np.sum(np.square(data[i] - data[j])) > np.square(1.)
])

# defining the language

x = ltn.variable("x", data)
y = ltn.variable("y", data)
closed_x_y = ltn.variable("closed_x_y", closed_data)
distant_x_y = ltn.variable("distant_x_y", distant_data)

C = {i: ltn.predicate("C_" + str(i), x) for i in clst_ids}

first = ltn.function("first", closed_x_y, fun_definition=lambda d: d[:, :2])
second = ltn.function("second", closed_x_y, fun_definition=lambda d: d[:, 2:])

print("defining the theory T")
T = tf.reduce_mean(
    tf.concat(
        [Forall(x, Or(*[C[i](x) for i in clst_ids]))] +
        [Exists(x, C[i](x)) for i in clst_ids] + [
            Forall(closed_x_y,
                   Equiv(C[i](first(closed_x_y)), C[i](second(closed_x_y))))
            for i in clst_ids
        ] + [
            Forall(
                distant_x_y,
Пример #5
0
data = np.random.uniform(
    [-1, -1],
    [1, 1],
    (500, 2),
).astype(np.float32)

# defining the language

x = ltn.variable("x", data)
y = ltn.variable("y", data)

a = ltn.constant("a", [0.5, 0.5])
b = ltn.constant("x", [-0.5, -0.5])

A = ltn.predicate("A", 2)
B = ltn.predicate("B", 2)
T = And(A(a), B(b), Not(A(b)), Forall(x, Implies(A(x), B(x))))

# start a tensorflow session

sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
opt = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(-T)

# optimize the satisfiability of T

sess.run(init)
sat_level = sess.run(T)
while sat_level == 0.0:
train_X = np.random.uniform(start, end, (training_size)).astype("float32")
train_Y = slope * train_X + np.random.normal(scale=var, size=len(train_X))

W = tf.Variable(np.random.randn(), name="weight")
b = tf.Variable(np.random.randn(), name="bias")


def apply_fun(X):
    return tf.add(tf.multiply(X, W), b)


c_x = [ltn.constant("x_%s" % i, [x]) for i, x in enumerate(train_X)]
c_y = [ltn.constant("y_%s" % i, [y]) for i, y in enumerate(train_Y)]

f = ltn.function("f", 1, 1, fun_definition=apply_fun)
eq = ltn.predicate("equal", 2, lambda x, y: ltnl.equal_euclidian(x, y))

facts = [eq(f(x), y) for x, y in zip(c_x, c_y)]
cost = -tf.reduce_mean(tf.stack(facts))

sess = tf.Session()
opt = tf.train.GradientDescentOptimizer(
    learning_rate=learning_rate).minimize(cost)

init = tf.global_variables_initializer()
sess.run(init)
for i in range(epochs):
    sess.run(opt)
    if i % 10 == 0:
        print(i, "sat level -----> ", sess.run(cost))
Пример #7
0
friends = [('a','b'),('a','e'),('a','f'),('a','g'),('b','c'),('c','d'),('e','f'),('g','h'),
           ('i','j'),('j','m'),('k','l'),('m','n')]
smokes = ['a','e','f','g','j','n']
cancer = ['a','e']


p = ltn.variable("p",tf.concat(list(g.values()),axis=0))
q = ltn.variable("q",tf.concat(list(g.values()),axis=0))

p1 = ltn.variable("p1",tf.concat(list(g1.values()),axis=0))
q1 = ltn.variable("q1",tf.concat(list(g1.values()),axis=0))

p2 = ltn.variable("p2",tf.concat(list(g2.values()),axis=0))
q2 = ltn.variable("q2",tf.concat(list(g2.values()),axis=0))

Friends = ltn.predicate('Friends',size*2)
Smokes = ltn.predicate('Smokes',size)
Cancer = ltn.predicate('Cancer',size)


facts = [Friends(g[x],g[y]) for (x,y) in friends]+\
        [Not(Friends(g[x],g[y])) for x in g1 for y in g1
                                 if (x,y) not in friends and x < y]+\
        [Not(Friends(g[x],g[y])) for x in g2 for y in g2
                                 if (x, y) not in friends and x < y] +\
        [Smokes(g[x]) for x in smokes]+\
        [Not(Smokes(g[x])) for x in g if x not in smokes]+\
        [Cancer(g[x]) for x in cancer]+\
        [Not(Cancer(g[x])) for x in g1 if x not in cancer] +\
        [Forall(p,Not(Friends(p,p))),
         Forall((p,q),Equiv(Friends(p,q),Friends(q,p))),
Пример #8
0
# begin of ltn

dd = ltn.variable("double_digit", y_conv)


def get_nth_element(n):
    def result(p):
        return p[:, n]

    return result


IS1 = {
    n: ltn.predicate("is_equal_to_" + str(n),
                     embedding_size * 2,
                     pred_definition=get_nth_element(n))
    for n in range(10)
}

IS2 = {
    n: ltn.predicate("is_equal_to_" + str(n),
                     embedding_size * 2,
                     pred_definition=get_nth_element(10 + n))
    for n in range(10)
}

examples_of_1 = {
    n: ltn.variable(
        "examples_of_1_" + str(n),
        tf.gather(