Example #1
0
    def testSimplePredicateOptimization(self):
        nr_samples=100

        ltnw.variable("?data_A",numpy.random.uniform([0.,0.],[.1,1.],(nr_samples,2)).astype("float32"))
        ltnw.variable("?data_not_A",numpy.random.uniform([2.,0],[3.,1.],(nr_samples,2)).astype("float32"))
        
        ltnw.predicate("A",2)
        
        ltnw.axiom("forall ?data_A: A(?data_A)")
        ltnw.axiom("forall ?data_not_A: ~A(?data_not_A)")
        
        ltnw.initialize_knowledgebase(initial_sat_level_threshold=.1)
        sat_level=ltnw.train(track_sat_levels=10000,sat_level_epsilon=.99)
        
        self.assertGreater(sat_level,.8)
            
        ltnw.constant("a",[0.5,0.5])
        ltnw.constant("b",[2.5,0.5])
        
        self.assertGreater(ltnw.ask("A(a)")[0],.8)
        self.assertGreater(ltnw.ask("~A(b)")[0],.8)

        result=ltnw.ask_m(["A(a)","~A(b)"])
        
        for r in result:
            self.assertGreater(r[0],.8)
            self.assertGreater(r[0],.8)
Example #2
0
# define the function f as a linear regressor
W = tf.Variable(np.random.randn(), name="weight")
b = tf.Variable(np.random.randn(), name="bias")
ltnw.function("f", 1, 1, fun_definition=lambda X: tf.add(tf.multiply(X, W), b))

# defining an equal predicate based on the euclidian distance of two vectors
ltnw.predicate("eq", 2, ltnl.equal_euclidian)

# defining the theory
for f in ["eq(f(x_%s),y_%s)" % (i, i) for i in range(len(train_X))]:
    ltnw.axiom(f)
print("\n".join(sorted(ltnw.AXIOMS.keys())))

# initializing knowledgebase and optimizing
ltnw.initialize_knowledgebase(optimizer=tf.train.GradientDescentOptimizer(
    learning_rate=learning_rate))
ltnw.train(max_epochs=epochs)

# visualize results on training data
ltnw.variable("?x", 1)
prediction = ltnw.ask("f(?x)",
                      feed_dict={"?x": train_X.reshape(len(train_X), 1)})
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.plot(train_X, train_Y, 'bo', label='Training data', color="black")
plt.plot(train_X,
         ltnw.SESSION.run(W) * train_X + ltnw.SESSION.run(b),
         label='Fitted line')
plt.plot(train_X, prediction, 'bo', label='prediction', color="red")
plt.legend()
Example #3
0
ltnw.axiom("forall ?x: %s" %
           "|".join(["C_%s(?x)" % i for i in range(nr_of_clusters)]))
for i in range(nr_of_clusters):
    ltnw.axiom("exists ?x: C_%s(?x)" % i)
    ltnw.axiom(
        "forall ?close_x_y: C_%s(first(?close_x_y)) %% C_%s(second(?close_x_y))"
        % (i, i))
    ltnw.axiom(
        "forall ?distant_x_y: C_%s(first(?distant_x_y)) %% C_%s(second(?distant_x_y))"
        % (i, i))
    for j in range(i + 1, nr_of_clusters):
        ltnw.axiom("forall ?x: ~(C_%s(?x) & C_%s(?x))" % (i, j))
print("%s" % "\n".join(ltnw.AXIOMS.keys()))

# initialize the knowledgebase and train
ltnw.initialize_knowledgebase(optimizer=torch.optim.RMSprop,
                              initial_sat_level_threshold=.5)
ltnw.train(max_epochs=200)

# retrieve the truth values for all samples and all clusters, i.e. check membership
prC = [ltnw.ask("C_%s(?x)" % i) for i in range(nr_of_clusters)]
n = 2
m = (nr_of_clusters + 1) // n + 1

fig = plt.figure(figsize=(3 * 3, m * 3))

fig.add_subplot(m, n, 1)
plt.title("groundtruth")
for c in clusters:
    plt.scatter(c[:, 0], c[:, 1])
data = np.concatenate(clusters)
x0 = data[:, 0]
Example #4
0
                          torch.cat([full_obj_set[p[0]], full_obj_set[p[1]]])
                          for p in pairs['front']
                      ]).to(device),
                      verbose=False)
        ltnw.variable('?behind_pair',
                      torch.stack([
                          torch.cat([full_obj_set[p[0]], full_obj_set[p[1]]])
                          for p in pairs['behind']
                      ]).to(device),
                      verbose=False)

        if ep + b == 0:  # Initialise LTN at very beginning of training
            print('******* Initialising LTN ******')
            sat_level = ltnw.initialize_knowledgebase(
                initial_sat_level_threshold=.5,
                device=device,
                learn_rate=learning_rate,
                perception_mode=perception_mode)
            print("Initial Satisfiability %f" % (sat_level))
            print("Initial p-Value %f" % (p_factor * (sat_level.item()**2)))
        ltnw.set_p_value(p_factor * (sat_level.item()**2))
        sat_level = ltnw.train(
            max_epochs=1,
            sat_level_epsilon=.01,
            track_values=False,
            device=device,
            show_progress=False)  #, early_stop_level=0.00001)

    dictw.writerow({
        key: value.detach().cpu().numpy()[0]
        for (key, value) in ltnw.AXIOMS.items()
           "|".join(["C_%s(?x)" % i for i in range(nr_of_clusters)]))
for i in range(nr_of_clusters):
    ltnw.axiom("exists ?x: C_%s(?x)" % i)
    ltnw.axiom("forall ?x,?y: (C_%s(?x) & close(?x,?y)) -> C_%s(?y)" % (i, i))
    ltnw.axiom(
        "forall ?x,?y: (C_%s(?x) & ~close(?x,?y)) -> (%s)" %
        (i, "|".join(["C_%s(?y)" % j
                      for j in range(nr_of_clusters) if i != j])))

    for j in range(i + 1, nr_of_clusters):
        ltnw.axiom("forall ?x: ~(C_%s(?x) & C_%s(?x))" % (i, j))
print("\n".join(sorted(ltnw.AXIOMS.keys())))

## initialize and optimize
ltnw.initialize_knowledgebase(optimizer=tf.train.RMSPropOptimizer(
    learning_rate=0.1, decay=.9),
                              initial_sat_level_threshold=.0)
ltnw.train(max_epochs=1000)

## visualize results
nr_of_clusters = len(clusters)
prC = [ltnw.ask("C_%s(?x)" % i) for i in range(nr_of_clusters)]
n = 2
m = (nr_of_clusters + 1) // n + 1

fig = plt.figure(figsize=(3 * 3, m * 3))

fig.add_subplot(m, n, 1)
plt.title("groundtruth")
for c in clusters:
    plt.scatter(c[:, 0], c[:, 1])
nr_samples=500

data=np.random.uniform([0,0],[1.,1.],(nr_samples,2)).astype(np.float32)
data_A=data[np.where(np.sum(np.square(data-[.5,.5]),axis=1)<.09)]
data_not_A=data[np.where(np.sum(np.square(data-[.5,.5]),axis=1)>=.09)]

ltnw.variable("?data_A",data_A)
ltnw.variable("?data_not_A",data_not_A)
ltnw.variable("?data",data)

ltnw.predicate("A",2)

ltnw.axiom("forall ?data_A: A(?data_A)")
ltnw.axiom("forall ?data_not_A: ~A(?data_not_A)")

ltnw.initialize_knowledgebase(initial_sat_level_threshold=.1)
sat_level=ltnw.train(track_sat_levels=1000,sat_level_epsilon=.99)
    
plt.figure(figsize=(12,8))
result=ltnw.ask("A(?data)")
plt.subplot(2,2,1)
plt.scatter(data[:,0],data[:,1],c=result.squeeze())
plt.colorbar()
plt.title("A(x) - training data")

result=ltnw.ask("~A(?data)")
plt.subplot(2,2,2)
plt.scatter(data[:,0],data[:,1],c=result.squeeze())
plt.colorbar()
plt.title("~A(x) - training data")
            "?not_right_xy" : spatial_relations_data.generate_data(nr_examples,spatial_relations_data.is_not_right),
            "?above_xy" : spatial_relations_data.generate_data(nr_examples,spatial_relations_data.is_above),
            "?not_above_xy" : spatial_relations_data.generate_data(nr_examples,spatial_relations_data.is_not_above),
            "?below_xy" : spatial_relations_data.generate_data(nr_examples,spatial_relations_data.is_below),
            "?not_below_xy" : spatial_relations_data.generate_data(nr_examples,spatial_relations_data.is_not_below),
            "?contains_xy" : spatial_relations_data.generate_data(nr_examples,spatial_relations_data.contains),
            "?not_contains_xy" : spatial_relations_data.generate_data(nr_examples,spatial_relations_data.not_contains),
            "?contained_in_xy" : spatial_relations_data.generate_data(nr_examples,spatial_relations_data.is_in),
            "?not_contained_in_xy" : spatial_relations_data.generate_data(nr_examples,spatial_relations_data.is_not_in),
            "?x" : spatial_relations_data.generate_rectangles(nr_examples),
            "?y" : spatial_relations_data.generate_rectangles(nr_examples),
            "?z" : spatial_relations_data.generate_rectangles(nr_examples)}

# 4) train the model
ltnw.initialize_knowledgebase(feed_dict=feed_dict,
                              optimizer=tf.train.AdamOptimizer(0.05),
                              formula_aggregator=lambda *x: tf.reduce_min(tf.concat(x,axis=0)))
ltnw.train(feed_dict=feed_dict,max_epochs=10000)

# 5) evaluate the truth of formulas not given directly to the model
for f in ["forall ?x,?y,?z: Contained_in(?x,?y) -> (Left(?y,?z)->Left(?x,?z))",
          "forall ?x,?y,?z: Contained_in(?x,?y) -> (Right(?y,?z)->Right(?x,?z))",
          "forall ?x,?y,?z: Contained_in(?x,?y) -> (Above(?y,?z)->Above(?x,?z))",
          "forall ?x,?y,?z: Contained_in(?x,?y) -> (Below(?y,?z)->Below(?x,?z))",
          "forall ?x,?y,?z: Contained_in(?x,?y) -> (Contains(?y,?z)->Contains(?x,?z))",
          "forall ?x,?y,?z: Contained_in(?x,?y) -> (Contained_in(?y,?z)->Contained_in(?x,?z))"]:
    print("%s: %s" % (f,ltnw.ask(f,feed_dict=feed_dict)))

# 6) plot some examples truth values of P(ct,t) where ct is a central rectangle, and
# t is a set of randomly generated rectangles
ltnw.constant("ct",[.5,.5,.3,.3])
Example #8
0
#ltnw.axiom('forall ?obj: ~Front(?obj, ?obj)')

ltnw.axiom('forall ?obj, ?obj_2: Behind(?obj, ?obj_2) -> ~Front(?obj, ?obj_2)')
#ltnw.axiom('forall ?obj, ?obj_2: Behind(?obj, ?obj_2) -> ~Behind(?obj_2, ?obj)')
ltnw.axiom('forall ?obj, ?obj_2: ~Front(?obj, ?obj_2) -> Behind(?obj, ?obj_2)')
#ltnw.axiom('forall ?obj, ?obj_2: ~Behind(?obj_2, ?obj) -> Behind(?obj, ?obj_2)')
#ltnw.axiom('forall ?obj: ~Left(?obj, ?obj)')

#####################
### Train the LTN ###
#####################
time_diff = time.time() - start_time
print('Time to complete : ', time_diff)
start_time = time.time()
print('******* Initialising LTN ******')
ltnw.initialize_knowledgebase(initial_sat_level_threshold=.5,
                              learn_rate=learning_rate)

time_diff = time.time() - start_time
print('Time to complete : ', time_diff)
start_time = time.time()
print('******* Training LTN ******')
sat_level = ltnw.train(max_epochs=max_epochs,
                       sat_level_epsilon=.005,
                       track_values=True)  #, early_stop_level=0.00001)

####################
### Test the LTN ###
####################

# ask queries about objects in image_val_00000.png
# print('\nIs object0 (large brown cylinder) in front of object3 (large purple sphere)? ', ltnw.ask('Front(object3,object0)'))
Example #9
0
# should be the same. Since in group 1 everything is known, this
# formula has the effect to transfer the knowledge about group 1
# to group 2.

ltnw.axiom("forall p1:Cancer(p1) % forall p2:Cancer(p2)")
ltnw.axiom(
    "forall p1:(Smokes(p1) -> Cancer(p1)) % forall p2:(Smokes(p2) -> Cancer(p2))"
)
ltnw.axiom(
    "forall p1:(Cancer(p1) -> Smokes(p1)) % forall p2:(Cancer(p2) -> Smokes(p2))"
)

# initialize knowledge base

ltnw.initialize_knowledgebase(
    optimizer=tf.train.RMSPropOptimizer(learning_rate=0.01, decay=.9),
    formula_aggregator=lambda *x: 1. / tf.reduce_mean(1. / tf.concat(x, axis=0)
                                                      ))

# Train the KB
ltnw.train(max_epochs=10000)

# query the KB and display the results

df_smokes_cancer = pd.DataFrame(np.concatenate(
    [ltnw.ask("Smokes(p)"), ltnw.ask("Cancer(p)")], axis=1),
                                columns=["Smokes", "Cancer"],
                                index=list('abcdefghijklmn'))

df_friends_ah = pd.DataFrame(np.squeeze(ltnw.ask("Friends(p1,q1)")),
                             index=list('abcdefgh'),
                             columns=list('abcdefgh'))