Ejemplo n.º 1
0
    def testSimplePredicateOptimization(self):
        nr_samples=100

        ltnw.variable("?data_A",numpy.random.uniform([0.,0.],[.1,1.],(nr_samples,2)).astype("float32"))
        ltnw.variable("?data_not_A",numpy.random.uniform([2.,0],[3.,1.],(nr_samples,2)).astype("float32"))
        
        ltnw.predicate("A",2)
        
        ltnw.axiom("forall ?data_A: A(?data_A)")
        ltnw.axiom("forall ?data_not_A: ~A(?data_not_A)")
        
        ltnw.initialize_knowledgebase(initial_sat_level_threshold=.1)
        sat_level=ltnw.train(track_sat_levels=10000,sat_level_epsilon=.99)
        
        self.assertGreater(sat_level,.8)
            
        ltnw.constant("a",[0.5,0.5])
        ltnw.constant("b",[2.5,0.5])
        
        self.assertGreater(ltnw.ask("A(a)")[0],.8)
        self.assertGreater(ltnw.ask("~A(b)")[0],.8)

        result=ltnw.ask_m(["A(a)","~A(b)"])
        
        for r in result:
            self.assertGreater(r[0],.8)
            self.assertGreater(r[0],.8)
Ejemplo n.º 2
0
# define the language, we translate every training example into constants
[ltnw.constant("x_%s" % i, [x]) for i, x in enumerate(train_X)]
[ltnw.constant("y_%s" % i, [y]) for i, y in enumerate(train_Y)]

# define the function f as a linear regressor
W = tf.Variable(np.random.randn(), name="weight")
b = tf.Variable(np.random.randn(), name="bias")
ltnw.function("f", 1, 1, fun_definition=lambda X: tf.add(tf.multiply(X, W), b))

# defining an equal predicate based on the euclidian distance of two vectors
ltnw.predicate("eq", 2, ltnl.equal_euclidian)

# defining the theory
for f in ["eq(f(x_%s),y_%s)" % (i, i) for i in range(len(train_X))]:
    ltnw.axiom(f)
print("\n".join(sorted(ltnw.AXIOMS.keys())))

# initializing knowledgebase and optimizing
ltnw.initialize_knowledgebase(optimizer=tf.train.GradientDescentOptimizer(
    learning_rate=learning_rate))
ltnw.train(max_epochs=epochs)

# visualize results on training data
ltnw.variable("?x", 1)
prediction = ltnw.ask("f(?x)",
                      feed_dict={"?x": train_X.reshape(len(train_X), 1)})
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.plot(train_X, train_Y, 'bo', label='Training data', color="black")
plt.plot(train_X,
Ejemplo n.º 3
0
    for j in range(len(data))
    if np.sum(np.square(data[i] - data[j])) > np.square(1.)
])

# defining the language
ltnw.variable("?x", data)
ltnw.variable("?y", data)
ltnw.variable("?close_x_y", close_data)
ltnw.variable("?distant_x_y", distant_data)
[ltnw.predicate("C_" + str(i), 2) for i in range(nr_of_clusters)]

ltnw.function("first", 2, fun_definition=lambda d: d[:, :2])
ltnw.function("second", 2, fun_definition=lambda d: d[:, 2:])

print("defining the theory T")
ltnw.axiom("forall ?x: %s" %
           "|".join(["C_%s(?x)" % i for i in range(nr_of_clusters)]))
for i in range(nr_of_clusters):
    ltnw.axiom("exists ?x: C_%s(?x)" % i)
    ltnw.axiom(
        "forall ?close_x_y: C_%s(first(?close_x_y)) %% C_%s(second(?close_x_y))"
        % (i, i))
    ltnw.axiom(
        "forall ?distant_x_y: C_%s(first(?distant_x_y)) %% C_%s(second(?distant_x_y))"
        % (i, i))
    for j in range(i + 1, nr_of_clusters):
        ltnw.axiom("forall ?x: ~(C_%s(?x) & C_%s(?x))" % (i, j))
print("%s" % "\n".join(ltnw.AXIOMS.keys()))

# initialize the knowledgebase and train
ltnw.initialize_knowledgebase(optimizer=torch.optim.RMSprop,
                              initial_sat_level_threshold=.5)
Ejemplo n.º 4
0
                                          names_of_classes=cat_horizontal,
                                          device=device)
cat_vertical = ['Front', 'Behind']
Category_Vertical = ltnw.class_category(class_label='Vertical',
                                        number_of_features=2 * num_of_features,
                                        names_of_classes=cat_vertical,
                                        device=device)

# Object Variables Placeholders
ltnw.variable('?obj', torch.zeros(1, num_of_features))
ltnw.variable('?obj_2', torch.zeros(1, num_of_features))
for i, feat in enumerate(obj_colors):
    ltnw.mlp_predicate(label=feat.capitalize(), class_category=Category_Color)
    ltnw.variable('?is_' + feat, torch.zeros(1, num_of_features,
                                             device=device))
    ltnw.axiom('forall ?is_' + feat + ' : ' + feat.capitalize() + '(?is_' +
               feat + ')')
    ltnw.variable('?isnot_' + feat,
                  torch.zeros(1, num_of_features, device=device))
    ltnw.axiom('forall ?isnot_' + feat + ' : ~' + feat.capitalize() +
               '(?isnot_' + feat + ')')
for i, feat in enumerate(obj_sizes):
    ltnw.mlp_predicate(label=feat.capitalize(), class_category=Category_Size)
    ltnw.variable('?is_' + feat, torch.zeros(1, num_of_features,
                                             device=device))
    ltnw.axiom('forall ?is_' + feat + ' : ' + feat.capitalize() + '(?is_' +
               feat + ')')
    ltnw.variable('?isnot_' + feat,
                  torch.zeros(1, num_of_features, device=device))
    ltnw.axiom('forall ?isnot_' + feat + ' : ~' + feat.capitalize() +
               '(?isnot_' + feat + ')')
for i, feat in enumerate(obj_shapes):
    clusters.append(
        np.random.multivariate_normal(mean=mean,
                                      cov=cov,
                                      size=nr_of_points_x_cluster).astype(
                                          np.float32))
data = np.concatenate(clusters)

# define the language
ltnw.variable("?x", data)
ltnw.variable("?y", data)
ltnw.predicate("close", 2, ltnl.equal_euclidian)
[ltnw.predicate("C_" + str(i), 2) for i in range(nr_of_clusters)]

## define the theory
print("defining the theory T")
ltnw.axiom("forall ?x: %s" %
           "|".join(["C_%s(?x)" % i for i in range(nr_of_clusters)]))
for i in range(nr_of_clusters):
    ltnw.axiom("exists ?x: C_%s(?x)" % i)
    ltnw.axiom("forall ?x,?y: (C_%s(?x) & close(?x,?y)) -> C_%s(?y)" % (i, i))
    ltnw.axiom(
        "forall ?x,?y: (C_%s(?x) & ~close(?x,?y)) -> (%s)" %
        (i, "|".join(["C_%s(?y)" % j
                      for j in range(nr_of_clusters) if i != j])))

    for j in range(i + 1, nr_of_clusters):
        ltnw.axiom("forall ?x: ~(C_%s(?x) & C_%s(?x))" % (i, j))
print("\n".join(sorted(ltnw.AXIOMS.keys())))

## initialize and optimize
ltnw.initialize_knowledgebase(optimizer=tf.train.RMSPropOptimizer(
    learning_rate=0.1, decay=.9),
import logictensornetworks_wrapper as ltnw

nr_samples=500

data=np.random.uniform([0,0],[1.,1.],(nr_samples,2)).astype(np.float32)
data_A=data[np.where(np.sum(np.square(data-[.5,.5]),axis=1)<.09)]
data_not_A=data[np.where(np.sum(np.square(data-[.5,.5]),axis=1)>=.09)]

ltnw.variable("?data_A",data_A)
ltnw.variable("?data_not_A",data_not_A)
ltnw.variable("?data",data)

ltnw.predicate("A",2)

ltnw.axiom("forall ?data_A: A(?data_A)")
ltnw.axiom("forall ?data_not_A: ~A(?data_not_A)")

ltnw.initialize_knowledgebase(initial_sat_level_threshold=.1)
sat_level=ltnw.train(track_sat_levels=1000,sat_level_epsilon=.99)
    
plt.figure(figsize=(12,8))
result=ltnw.ask("A(?data)")
plt.subplot(2,2,1)
plt.scatter(data[:,0],data[:,1],c=result.squeeze())
plt.colorbar()
plt.title("A(x) - training data")

result=ltnw.ask("~A(?data)")
plt.subplot(2,2,2)
plt.scatter(data[:,0],data[:,1],c=result.squeeze())
import matplotlib.pyplot as plt
import numpy as np

import logictensornetworks_wrapper as ltnw

import spatial_relations_data

# generate artificial data
nr_examples = 50 # positive and negative examples for each predicate
nr_test_examples=400

# 1) define the language and examples
ltnw.predicate("Left",8)
ltnw.variable("?left_xy",8)
ltnw.variable("?not_left_xy", 8)
ltnw.axiom("forall ?left_xy: Left(?left_xy)")
ltnw.axiom("forall ?not_left_xy: ~Left(?not_left_xy)")


ltnw.predicate("Right",8)
ltnw.variable("?right_xy",8)
ltnw.variable("?not_right_xy",8)
ltnw.axiom("forall ?right_xy: Right(?right_xy)")
ltnw.axiom("forall ?not_right_xy: ~Right(?not_right_xy)")

ltnw.predicate("Below",8)
ltnw.variable("?below_xy",8)
ltnw.variable("?not_below_xy",8)
ltnw.axiom("forall ?below_xy: Below(?below_xy)")
ltnw.axiom("forall ?not_below_xy: ~Below(?not_below_xy)")
Ejemplo n.º 8
0
ltnw.variable('?behind_pair',
              [full_obj_set[p[0]] + full_obj_set[p[1]] for p in behind_pairs])

time_diff = time.time() - start_time
print('Time to complete : ', time_diff)
start_time = time.time()
print('******* Predicate/Axioms for Object Features ******')

# Object Features
for feat in obj_feat:
    ltnw.predicate(label=feat.capitalize(),
                   number_of_features_or_vars=num_of_features,
                   layers=num_of_layers)

for i, feat in enumerate(obj_feat):
    ltnw.axiom('forall ?is_' + feat + ' : ' + feat.capitalize() + '(?is_' +
               feat + ')')
    ltnw.axiom('forall ?isnot_' + feat + ' : ~' + feat.capitalize() +
               '(?isnot_' + feat + ')')

# Implicit axioms about object features
## objects can only be one color
for c in obj_colors:
    is_color = ''
    is_not_color = ''
    for not_c in obj_colors:
        if not_c == c: is_color = c.capitalize() + '(?obj)'
        if not_c != c: is_not_color += '~' + not_c.capitalize() + '(?obj) &'
    ltnw.axiom('forall ?obj: ' + is_color + ' -> ' + is_not_color[:-1])
    ltnw.axiom('forall ?obj: ' + is_not_color[:-1] + ' -> ' + is_color)
## objects can only be one size
for s in obj_sizes:
import logictensornetworks_wrapper as ltnw

nr_samples=500

data=np.random.uniform([0,0],[1.,1.],(nr_samples,2)).astype(np.float32)
data_A=data[np.where(np.sum(np.square(data-[.5,.5]),axis=1)<.09)]
data_B=data[np.where(np.sum(np.square(data-[.5,.5]),axis=1)>=.09)]

ltnw.variable("?data",data)
ltnw.variable("?data_A",data_A)
ltnw.variable("?data_B",data_B)

ltnw.predicate("A",2)
ltnw.predicate("B",2)

ltnw.axiom("forall ?data_A: A(?data_A)")
ltnw.axiom("forall ?data_B: ~A(?data_B)")

ltnw.axiom("forall ?data_B: B(?data_B)")
ltnw.axiom("forall ?data_A: ~B(?data_A)")

ltnw.axiom("forall ?data: A(?data) -> ~B(?data)")
ltnw.axiom("forall ?data: B(?data) -> ~A(?data)")

ltnw.initialize_knowledgebase(optimizer=torch.optim.RMSprop, initial_sat_level_threshold=.99)
# The number of iterations were dramatically reduced.
sat_level=ltnw.train(track_sat_levels=1000,sat_level_epsilon=.01,max_epochs=2000)

result=ltnw.ask("A(?data)")
plt.figure(figsize=(10,8))
plt.subplot(2,2,1)
Ejemplo n.º 10
0
ltnw.variable("p", tf.concat(list(ltnw.CONSTANTS.values()), axis=0))
ltnw.variable("q", ltnw.VARIABLES["p"])
ltnw.variable("p1", tf.concat([ltnw.CONSTANTS[l] for l in "abcdefgh"], axis=0))
ltnw.variable("q1", ltnw.VARIABLES["p1"])
ltnw.variable("p2", tf.concat([ltnw.CONSTANTS[l] for l in "ijklmn"], axis=0))
ltnw.variable("q2", ltnw.VARIABLES["p2"])

# declare the predicates
ltnw.predicate('Friends', embedding_size * 2)
ltnw.predicate('Smokes', embedding_size)
ltnw.predicate('Cancer', embedding_size)

# add the assertional knowledge in our posses

ltnw.axiom("Friends(a,b)")
ltnw.axiom("~Friends(a,c)")
ltnw.axiom("~Friends(a,d)")
ltnw.axiom("Friends(a,e)")
ltnw.axiom("Friends(a,f)")
ltnw.axiom("Friends(a,g)")
ltnw.axiom("~Friends(a,h)")
ltnw.axiom("Friends(b,c)")
ltnw.axiom("~Friends(b,d)")
ltnw.axiom("~Friends(b,e)")
ltnw.axiom("~Friends(b,f)")
ltnw.axiom("~Friends(b,g)")
ltnw.axiom("~Friends(b,h)")
ltnw.axiom("Friends(c,d)")
ltnw.axiom("~Friends(c,e)")
ltnw.axiom("~Friends(c,f)")
Ejemplo n.º 11
0
data_A=np.random.uniform([0,0],[.25,1.],(nr_samples,2)).astype(np.float32)
data_B=np.random.uniform([.75,0],[1.,1.],(nr_samples,2)).astype(np.float32)
data=np.concatenate([data_A,data_B])

ltnw.variable("?data_A",data_A)
ltnw.variable("?data_A_2",data_A)
ltnw.variable("?data_B",data_B)
ltnw.variable("?data_B_2",data_B)
ltnw.variable("?data",data)
ltnw.variable("?data_1",data)
ltnw.variable("?data_2",data)

ltnw.predicate("A",2)
ltnw.predicate("B",2)

ltnw.axiom("forall ?data_A: A(?data_A)")
ltnw.axiom("forall ?data_B: ~A(?data_B)")

ltnw.axiom("forall ?data_B: B(?data_B)")
ltnw.axiom("forall ?data_A: ~B(?data_A)")

ltnw.predicate("R_A_A",4)
ltnw.predicate("R_B_B",4)
ltnw.predicate("R_A_B",4)

ltnw.axiom("forall ?data, ?data_2: (A(?data) & A(?data_2)) -> R_A_A(?data,?data_2)")
ltnw.axiom("forall ?data, ?data_2: R_A_A(?data,?data_2) -> (A(?data) & A(?data_2))")

ltnw.axiom("forall ?data, ?data_2: (B(?data) & B(?data_2)) -> R_B_B(?data,?data_2)")
ltnw.axiom("forall ?data, ?data_2: R_B_B(?data,?data_2) -> (B(?data) & B(?data_2))")