Пример #1
0
    def testSimplePredicateOptimization(self):
        nr_samples=100

        ltnw.variable("?data_A",numpy.random.uniform([0.,0.],[.1,1.],(nr_samples,2)).astype("float32"))
        ltnw.variable("?data_not_A",numpy.random.uniform([2.,0],[3.,1.],(nr_samples,2)).astype("float32"))
        
        ltnw.predicate("A",2)
        
        ltnw.axiom("forall ?data_A: A(?data_A)")
        ltnw.axiom("forall ?data_not_A: ~A(?data_not_A)")
        
        ltnw.initialize_knowledgebase(initial_sat_level_threshold=.1)
        sat_level=ltnw.train(track_sat_levels=10000,sat_level_epsilon=.99)
        
        self.assertGreater(sat_level,.8)
            
        ltnw.constant("a",[0.5,0.5])
        ltnw.constant("b",[2.5,0.5])
        
        self.assertGreater(ltnw.ask("A(a)")[0],.8)
        self.assertGreater(ltnw.ask("~A(b)")[0],.8)

        result=ltnw.ask_m(["A(a)","~A(b)"])
        
        for r in result:
            self.assertGreater(r[0],.8)
            self.assertGreater(r[0],.8)
Пример #2
0
W = tf.Variable(np.random.randn(), name="weight")
b = tf.Variable(np.random.randn(), name="bias")
ltnw.function("f", 1, 1, fun_definition=lambda X: tf.add(tf.multiply(X, W), b))

# defining an equal predicate based on the euclidian distance of two vectors
ltnw.predicate("eq", 2, ltnl.equal_euclidian)

# defining the theory
for f in ["eq(f(x_%s),y_%s)" % (i, i) for i in range(len(train_X))]:
    ltnw.axiom(f)
print("\n".join(sorted(ltnw.AXIOMS.keys())))

# initializing knowledgebase and optimizing
ltnw.initialize_knowledgebase(optimizer=tf.train.GradientDescentOptimizer(
    learning_rate=learning_rate))
ltnw.train(max_epochs=epochs)

# visualize results on training data
ltnw.variable("?x", 1)
prediction = ltnw.ask("f(?x)",
                      feed_dict={"?x": train_X.reshape(len(train_X), 1)})
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.plot(train_X, train_Y, 'bo', label='Training data', color="black")
plt.plot(train_X,
         ltnw.SESSION.run(W) * train_X + ltnw.SESSION.run(b),
         label='Fitted line')
plt.plot(train_X, prediction, 'bo', label='prediction', color="red")
plt.legend()

# generate test data and visualize regressor results
Пример #3
0
for i in range(nr_of_clusters):
    ltnw.axiom("exists ?x: C_%s(?x)" % i)
    ltnw.axiom(
        "forall ?close_x_y: C_%s(first(?close_x_y)) %% C_%s(second(?close_x_y))"
        % (i, i))
    ltnw.axiom(
        "forall ?distant_x_y: C_%s(first(?distant_x_y)) %% C_%s(second(?distant_x_y))"
        % (i, i))
    for j in range(i + 1, nr_of_clusters):
        ltnw.axiom("forall ?x: ~(C_%s(?x) & C_%s(?x))" % (i, j))
print("%s" % "\n".join(ltnw.AXIOMS.keys()))

# initialize the knowledgebase and train
ltnw.initialize_knowledgebase(optimizer=torch.optim.RMSprop,
                              initial_sat_level_threshold=.5)
ltnw.train(max_epochs=200)

# retrieve the truth values for all samples and all clusters, i.e. check membership
prC = [ltnw.ask("C_%s(?x)" % i) for i in range(nr_of_clusters)]
n = 2
m = (nr_of_clusters + 1) // n + 1

fig = plt.figure(figsize=(3 * 3, m * 3))

fig.add_subplot(m, n, 1)
plt.title("groundtruth")
for c in clusters:
    plt.scatter(c[:, 0], c[:, 1])
data = np.concatenate(clusters)
x0 = data[:, 0]
x1 = data[:, 1]
Пример #4
0
                      ]).to(device),
                      verbose=False)

        if ep + b == 0:  # Initialise LTN at very beginning of training
            print('******* Initialising LTN ******')
            sat_level = ltnw.initialize_knowledgebase(
                initial_sat_level_threshold=.5,
                device=device,
                learn_rate=learning_rate,
                perception_mode=perception_mode)
            print("Initial Satisfiability %f" % (sat_level))
            print("Initial p-Value %f" % (p_factor * (sat_level.item()**2)))
        ltnw.set_p_value(p_factor * (sat_level.item()**2))
        sat_level = ltnw.train(
            max_epochs=1,
            sat_level_epsilon=.01,
            track_values=False,
            device=device,
            show_progress=False)  #, early_stop_level=0.00001)

    dictw.writerow({
        key: value.detach().cpu().numpy()[0]
        for (key, value) in ltnw.AXIOMS.items()
    })
    if sat_level > 0.997: break
    pbar.set_description("Current Satisfiability %f" % (sat_level))
    pbar.update(1)
print("Final p-Value %f" % (p_factor * (sat_level.item()**2)))

####################
### Save the LTN ###
####################
    ltnw.axiom("exists ?x: C_%s(?x)" % i)
    ltnw.axiom("forall ?x,?y: (C_%s(?x) & close(?x,?y)) -> C_%s(?y)" % (i, i))
    ltnw.axiom(
        "forall ?x,?y: (C_%s(?x) & ~close(?x,?y)) -> (%s)" %
        (i, "|".join(["C_%s(?y)" % j
                      for j in range(nr_of_clusters) if i != j])))

    for j in range(i + 1, nr_of_clusters):
        ltnw.axiom("forall ?x: ~(C_%s(?x) & C_%s(?x))" % (i, j))
print("\n".join(sorted(ltnw.AXIOMS.keys())))

## initialize and optimize
ltnw.initialize_knowledgebase(optimizer=tf.train.RMSPropOptimizer(
    learning_rate=0.1, decay=.9),
                              initial_sat_level_threshold=.0)
ltnw.train(max_epochs=1000)

## visualize results
nr_of_clusters = len(clusters)
prC = [ltnw.ask("C_%s(?x)" % i) for i in range(nr_of_clusters)]
n = 2
m = (nr_of_clusters + 1) // n + 1

fig = plt.figure(figsize=(3 * 3, m * 3))

fig.add_subplot(m, n, 1)
plt.title("groundtruth")
for c in clusters:
    plt.scatter(c[:, 0], c[:, 1])
data = np.concatenate(clusters)
x0 = data[:, 0]
data=np.random.uniform([0,0],[1.,1.],(nr_samples,2)).astype(np.float32)
data_A=data[np.where(np.sum(np.square(data-[.5,.5]),axis=1)<.09)]
data_not_A=data[np.where(np.sum(np.square(data-[.5,.5]),axis=1)>=.09)]

ltnw.variable("?data_A",data_A)
ltnw.variable("?data_not_A",data_not_A)
ltnw.variable("?data",data)

ltnw.predicate("A",2)

ltnw.axiom("forall ?data_A: A(?data_A)")
ltnw.axiom("forall ?data_not_A: ~A(?data_not_A)")

ltnw.initialize_knowledgebase(initial_sat_level_threshold=.1)
sat_level=ltnw.train(track_sat_levels=1000,sat_level_epsilon=.99)
    
plt.figure(figsize=(12,8))
result=ltnw.ask("A(?data)")
plt.subplot(2,2,1)
plt.scatter(data[:,0],data[:,1],c=result.squeeze())
plt.colorbar()
plt.title("A(x) - training data")

result=ltnw.ask("~A(?data)")
plt.subplot(2,2,2)
plt.scatter(data[:,0],data[:,1],c=result.squeeze())
plt.colorbar()
plt.title("~A(x) - training data")

data_test=np.random.uniform([0,0],[1.,1.],(500,2)).astype(np.float32)
            "?not_above_xy" : spatial_relations_data.generate_data(nr_examples,spatial_relations_data.is_not_above),
            "?below_xy" : spatial_relations_data.generate_data(nr_examples,spatial_relations_data.is_below),
            "?not_below_xy" : spatial_relations_data.generate_data(nr_examples,spatial_relations_data.is_not_below),
            "?contains_xy" : spatial_relations_data.generate_data(nr_examples,spatial_relations_data.contains),
            "?not_contains_xy" : spatial_relations_data.generate_data(nr_examples,spatial_relations_data.not_contains),
            "?contained_in_xy" : spatial_relations_data.generate_data(nr_examples,spatial_relations_data.is_in),
            "?not_contained_in_xy" : spatial_relations_data.generate_data(nr_examples,spatial_relations_data.is_not_in),
            "?x" : spatial_relations_data.generate_rectangles(nr_examples),
            "?y" : spatial_relations_data.generate_rectangles(nr_examples),
            "?z" : spatial_relations_data.generate_rectangles(nr_examples)}

# 4) train the model
ltnw.initialize_knowledgebase(feed_dict=feed_dict,
                              optimizer=tf.train.AdamOptimizer(0.05),
                              formula_aggregator=lambda *x: tf.reduce_min(tf.concat(x,axis=0)))
ltnw.train(feed_dict=feed_dict,max_epochs=10000)

# 5) evaluate the truth of formulas not given directly to the model
for f in ["forall ?x,?y,?z: Contained_in(?x,?y) -> (Left(?y,?z)->Left(?x,?z))",
          "forall ?x,?y,?z: Contained_in(?x,?y) -> (Right(?y,?z)->Right(?x,?z))",
          "forall ?x,?y,?z: Contained_in(?x,?y) -> (Above(?y,?z)->Above(?x,?z))",
          "forall ?x,?y,?z: Contained_in(?x,?y) -> (Below(?y,?z)->Below(?x,?z))",
          "forall ?x,?y,?z: Contained_in(?x,?y) -> (Contains(?y,?z)->Contains(?x,?z))",
          "forall ?x,?y,?z: Contained_in(?x,?y) -> (Contained_in(?y,?z)->Contained_in(?x,?z))"]:
    print("%s: %s" % (f,ltnw.ask(f,feed_dict=feed_dict)))

# 6) plot some examples truth values of P(ct,t) where ct is a central rectangle, and
# t is a set of randomly generated rectangles
ltnw.constant("ct",[.5,.5,.3,.3])
test_data=spatial_relations_data.generate_rectangles(nr_test_examples)
ltnw.variable("?t",test_data)
Пример #8
0
#####################
### Train the LTN ###
#####################
time_diff = time.time() - start_time
print('Time to complete : ', time_diff)
start_time = time.time()
print('******* Initialising LTN ******')
ltnw.initialize_knowledgebase(initial_sat_level_threshold=.5,
                              learn_rate=learning_rate)

time_diff = time.time() - start_time
print('Time to complete : ', time_diff)
start_time = time.time()
print('******* Training LTN ******')
sat_level = ltnw.train(max_epochs=max_epochs,
                       sat_level_epsilon=.005,
                       track_values=True)  #, early_stop_level=0.00001)

####################
### Test the LTN ###
####################

# ask queries about objects in image_val_00000.png
# print('\nIs object0 (large brown cylinder) in front of object3 (large purple sphere)? ', ltnw.ask('Front(object3,object0)'))
# print('Is object3 (large purple sphere) not to the left of object2 (small green cylinder)? ', ltnw.ask('~Left(object2,object3)'))
# print('Is object2 (small green cylinder) to the left of object1 (large gray cube)? ', ltnw.ask('Left(object1,object2)'))
# print('Is object4 (small gray cube) to the right of object0 (large brown cylinder)? ', ltnw.ask('Right(object0, object4)'))
# print('Is object2 (small green cylinder) small? ', ltnw.ask('Small(object2)'))
# print('Is object1 (large gray cube) a sphere? ', ltnw.ask('Sphere(object1)'))
#print('Is there an object to the right of object1 (large gray cube)?', ltnw.ask('exists ?obj: Right(object1,?obj)'))
ltnw.predicate("A",2)
ltnw.predicate("B",2)

ltnw.axiom("forall ?data_A: A(?data_A)")
ltnw.axiom("forall ?data_B: ~A(?data_B)")

ltnw.axiom("forall ?data_B: B(?data_B)")
ltnw.axiom("forall ?data_A: ~B(?data_A)")

ltnw.axiom("forall ?data: A(?data) -> ~B(?data)")
ltnw.axiom("forall ?data: B(?data) -> ~A(?data)")

ltnw.initialize_knowledgebase(optimizer=torch.optim.RMSprop, initial_sat_level_threshold=.99)
# The number of iterations were dramatically reduced.
sat_level=ltnw.train(track_sat_levels=1000,sat_level_epsilon=.01,max_epochs=2000)

result=ltnw.ask("A(?data)")
plt.figure(figsize=(10,8))
plt.subplot(2,2,1)
plt.title("A(x) - training")
plt.scatter(data[:,0],data[:,1],c=result.squeeze())
plt.colorbar()

plt.subplot(2,2,2)
result=ltnw.ask("B(?data)")
plt.title("B(x) - training")
plt.scatter(data[:,0],data[:,1],c=result.squeeze())
plt.colorbar()

data_test=np.random.uniform([0,0],[1.,1.],(nr_samples,2)).astype(np.float32)
Пример #10
0
ltnw.predicate("R_A_A",4)
ltnw.predicate("R_B_B",4) 
ltnw.predicate("R_A_B",4)


ltnw.axiom("forall ?data, ?data_2: (A(?data) & A(?data_2)) -> R_A_A(?data,?data_2)")
ltnw.axiom("forall ?data, ?data_2: R_A_A(?data,?data_2) -> (A(?data) & A(?data_2))")

ltnw.axiom("forall ?data, ?data_2: (B(?data) & B(?data_2)) -> R_B_B(?data,?data_2)")
ltnw.axiom("forall ?data, ?data_2: R_B_B(?data,?data_2) -> (B(?data) & B(?data_2))")

ltnw.axiom("forall ?data, ?data_2: (A(?data) & B(?data_2)) -> R_A_B(?data,?data_2)")
ltnw.axiom("forall ?data, ?data_2: R_A_B(?data,?data_2) -> (A(?data) & B(?data_2))")

ltnw.initialize_knowledgebase(initial_sat_level_threshold=.1)
sat_level=ltnw.train(track_sat_levels=1000,sat_level_epsilon=.99,max_epochs=epochs)

plt.figure(figsize=(12,8))
plt.subplot(2,2,1)
plt.title("data A/B")
plt.scatter(data_A[:,0],data_A[:,1],c="red",alpha=1.,label="A")
plt.scatter(data_B[:,0],data_B[:,1],c="blue",alpha=1.,label="B")
plt.legend()

idx=2
for pred in ["R_A_A","R_A_B","R_B_B"]:
    result_A_A=ltnw.ask("%s(?data_A,?data_A_2)" % pred)
    result_A_B=ltnw.ask("%s(?data_A,?data_B)" % pred)
    result_B_B=ltnw.ask("%s(?data_B,?data_B_2)" % pred)
    plt.subplot(2,2,idx)
    idx+=1
Пример #11
0
ltnw.predicate("A", 2)
ltnw.predicate("B", 2)

ltnw.variable("?data_A", data_A)
ltnw.variable("?data_B", data_B)
ltnw.variable("?data", data)

ltnw.axiom("forall ?data_A: A(?data_A)")
ltnw.axiom("forall ?data_B: B(?data_B)")

ltnw.axiom("forall ?data: A(?data) -> ~B(?data)")
ltnw.axiom("forall ?data: ~B(?data) -> A(?data)")

ltnw.initialize_knowledgebase(initial_sat_level_threshold=.1)
sat_level = ltnw.train(max_epochs=max_epochs,
                       track_sat_levels=track_sat_levels)

plt.figure(figsize=(10, 8))
result = ltnw.ask("A(?data)")
plt.subplot(2, 2, 1)
plt.title("A(x) - training")
plt.scatter(data[:, 0], data[:, 1], c=result.squeeze())
plt.colorbar()

result = ltnw.ask("B(?data)")
plt.subplot(2, 2, 2)
plt.title("B(x) - training")
plt.scatter(data[:, 0], data[:, 1], c=result.squeeze())
plt.colorbar()

data_test = np.random.uniform([0, 0], [1., 1.],