Beispiel #1
0
    def testSimplePredicateOptimization(self):
        nr_samples=100

        ltnw.variable("?data_A",numpy.random.uniform([0.,0.],[.1,1.],(nr_samples,2)).astype("float32"))
        ltnw.variable("?data_not_A",numpy.random.uniform([2.,0],[3.,1.],(nr_samples,2)).astype("float32"))
        
        ltnw.predicate("A",2)
        
        ltnw.axiom("forall ?data_A: A(?data_A)")
        ltnw.axiom("forall ?data_not_A: ~A(?data_not_A)")
        
        ltnw.initialize_knowledgebase(initial_sat_level_threshold=.1)
        sat_level=ltnw.train(track_sat_levels=10000,sat_level_epsilon=.99)
        
        self.assertGreater(sat_level,.8)
            
        ltnw.constant("a",[0.5,0.5])
        ltnw.constant("b",[2.5,0.5])
        
        self.assertGreater(ltnw.ask("A(a)")[0],.8)
        self.assertGreater(ltnw.ask("~A(b)")[0],.8)

        result=ltnw.ask_m(["A(a)","~A(b)"])
        
        for r in result:
            self.assertGreater(r[0],.8)
            self.assertGreater(r[0],.8)
Beispiel #2
0
    def testSimplePredicate(self):
        import tensorflow
        
        nr_samples=100

        ltnw.constant("a",[2.,3.])
        ltnw.variable("?data_A",numpy.random.uniform([0.,0.],[.1,1.],(nr_samples,2)).astype("float32"))
        
        mu=tensorflow.constant([2.,3.])
        ltnw.predicate("A",2,pred_definition=lambda x: tensorflow.exp(-tensorflow.norm(tensorflow.subtract(x,mu),axis=1)));       
        
        self.assertEqual(ltnw.ask("A(a)"),1.)
        self.assertGreater(ltnw.ask("forall ?data_A: A(?data_A)"),0.)
Beispiel #3
0
# defining an equal predicate based on the euclidian distance of two vectors
ltnw.predicate("eq", 2, ltnl.equal_euclidian)

# defining the theory
for f in ["eq(f(x_%s),y_%s)" % (i, i) for i in range(len(train_X))]:
    ltnw.axiom(f)
print("\n".join(sorted(ltnw.AXIOMS.keys())))

# initializing knowledgebase and optimizing
ltnw.initialize_knowledgebase(optimizer=tf.train.GradientDescentOptimizer(
    learning_rate=learning_rate))
ltnw.train(max_epochs=epochs)

# visualize results on training data
ltnw.variable("?x", 1)
prediction = ltnw.ask("f(?x)",
                      feed_dict={"?x": train_X.reshape(len(train_X), 1)})
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.plot(train_X, train_Y, 'bo', label='Training data', color="black")
plt.plot(train_X,
         ltnw.SESSION.run(W) * train_X + ltnw.SESSION.run(b),
         label='Fitted line')
plt.plot(train_X, prediction, 'bo', label='prediction', color="red")
plt.legend()

# generate test data and visualize regressor results
test_X = np.random.uniform(start, end, (testing_size)).astype("float32")
prediction = ltnw.ask("f(?x)",
                      feed_dict={"?x": test_X.reshape(len(test_X), 1)})
test_Y = slope * test_X + np.random.normal(scale=var, size=len(train_X))
plt.subplot(1, 2, 2)
Beispiel #4
0
        "forall ?close_x_y: C_%s(first(?close_x_y)) %% C_%s(second(?close_x_y))"
        % (i, i))
    ltnw.axiom(
        "forall ?distant_x_y: C_%s(first(?distant_x_y)) %% C_%s(second(?distant_x_y))"
        % (i, i))
    for j in range(i + 1, nr_of_clusters):
        ltnw.axiom("forall ?x: ~(C_%s(?x) & C_%s(?x))" % (i, j))
print("%s" % "\n".join(ltnw.AXIOMS.keys()))

# initialize the knowledgebase and train
ltnw.initialize_knowledgebase(optimizer=torch.optim.RMSprop,
                              initial_sat_level_threshold=.5)
ltnw.train(max_epochs=200)

# retrieve the truth values for all samples and all clusters, i.e. check membership
prC = [ltnw.ask("C_%s(?x)" % i) for i in range(nr_of_clusters)]
n = 2
m = (nr_of_clusters + 1) // n + 1

fig = plt.figure(figsize=(3 * 3, m * 3))

fig.add_subplot(m, n, 1)
plt.title("groundtruth")
for c in clusters:
    plt.scatter(c[:, 0], c[:, 1])
data = np.concatenate(clusters)
x0 = data[:, 0]
x1 = data[:, 1]
for i in range(nr_of_clusters):
    fig.add_subplot(m, n, i + 2)
    plt.title("C" + str(i) + "(?x)")
data_not_A=data[np.where(np.sum(np.square(data-[.5,.5]),axis=1)>=.09)]

ltnw.variable("?data_A",data_A)
ltnw.variable("?data_not_A",data_not_A)
ltnw.variable("?data",data)

ltnw.predicate("A",2)

ltnw.axiom("forall ?data_A: A(?data_A)")
ltnw.axiom("forall ?data_not_A: ~A(?data_not_A)")

ltnw.initialize_knowledgebase(initial_sat_level_threshold=.1)
sat_level=ltnw.train(track_sat_levels=1000,sat_level_epsilon=.99)
    
plt.figure(figsize=(12,8))
result=ltnw.ask("A(?data)")
plt.subplot(2,2,1)
plt.scatter(data[:,0],data[:,1],c=result.squeeze())
plt.colorbar()
plt.title("A(x) - training data")

result=ltnw.ask("~A(?data)")
plt.subplot(2,2,2)
plt.scatter(data[:,0],data[:,1],c=result.squeeze())
plt.colorbar()
plt.title("~A(x) - training data")

data_test=np.random.uniform([0,0],[1.,1.],(500,2)).astype(np.float32)
ltnw.variable("?data_test",data_test)
result=ltnw.ask("A(?data_test)")
plt.subplot(2,2,3)
            "?z" : spatial_relations_data.generate_rectangles(nr_examples)}

# 4) train the model
ltnw.initialize_knowledgebase(feed_dict=feed_dict,
                              optimizer=tf.train.AdamOptimizer(0.05),
                              formula_aggregator=lambda *x: tf.reduce_min(tf.concat(x,axis=0)))
ltnw.train(feed_dict=feed_dict,max_epochs=10000)

# 5) evaluate the truth of formulas not given directly to the model
for f in ["forall ?x,?y,?z: Contained_in(?x,?y) -> (Left(?y,?z)->Left(?x,?z))",
          "forall ?x,?y,?z: Contained_in(?x,?y) -> (Right(?y,?z)->Right(?x,?z))",
          "forall ?x,?y,?z: Contained_in(?x,?y) -> (Above(?y,?z)->Above(?x,?z))",
          "forall ?x,?y,?z: Contained_in(?x,?y) -> (Below(?y,?z)->Below(?x,?z))",
          "forall ?x,?y,?z: Contained_in(?x,?y) -> (Contains(?y,?z)->Contains(?x,?z))",
          "forall ?x,?y,?z: Contained_in(?x,?y) -> (Contained_in(?y,?z)->Contained_in(?x,?z))"]:
    print("%s: %s" % (f,ltnw.ask(f,feed_dict=feed_dict)))

# 6) plot some examples truth values of P(ct,t) where ct is a central rectangle, and
# t is a set of randomly generated rectangles
ltnw.constant("ct",[.5,.5,.3,.3])
test_data=spatial_relations_data.generate_rectangles(nr_test_examples)
ltnw.variable("?t",test_data)

fig = plt.figure(figsize=(12,8))
jet = cm = plt.get_cmap('jet')
cbbst = test_data[:,:2] + 0.5*test_data[:,2:]
for j,p in enumerate(["Left","Right","Above","Below","Contains","Contained_in"]):
    plt.subplot(2, 3, j + 1)
    formula="%s(ct,?t)" % p
    plt.title(formula)
    results=ltnw.ask(formula,feed_dict=feed_dict)
Beispiel #7
0
)

# initialize knowledge base

ltnw.initialize_knowledgebase(
    optimizer=tf.train.RMSPropOptimizer(learning_rate=0.01, decay=.9),
    formula_aggregator=lambda *x: 1. / tf.reduce_mean(1. / tf.concat(x, axis=0)
                                                      ))

# Train the KB
ltnw.train(max_epochs=10000)

# query the KB and display the results

df_smokes_cancer = pd.DataFrame(np.concatenate(
    [ltnw.ask("Smokes(p)"), ltnw.ask("Cancer(p)")], axis=1),
                                columns=["Smokes", "Cancer"],
                                index=list('abcdefghijklmn'))

df_friends_ah = pd.DataFrame(np.squeeze(ltnw.ask("Friends(p1,q1)")),
                             index=list('abcdefgh'),
                             columns=list('abcdefgh'))
df_friends_in = pd.DataFrame(np.squeeze(ltnw.ask("Friends(p2,q2)")),
                             index=list('ijklmn'),
                             columns=list('ijklmn'))
print(df_smokes_cancer)
print(df_friends_ah)
print(df_friends_in)
plt.figure(figsize=(15, 4))
plt.subplot(131)
plt_heatmap(df_smokes_cancer)
Beispiel #8
0
ltnw.axiom("forall ?data, ?data_2: (A(?data) & B(?data_2)) -> R_A_B(?data,?data_2)")
ltnw.axiom("forall ?data, ?data_2: R_A_B(?data,?data_2) -> (A(?data) & B(?data_2))")

ltnw.initialize_knowledgebase(initial_sat_level_threshold=.99)
sat_level=ltnw.train(track_sat_levels=1000,sat_level_epsilon=.01,max_epochs=epochs)

plt.figure(figsize=(12,8))
plt.subplot(2,2,1)
plt.title("data A/B")
plt.scatter(data_A[:,0],data_A[:,1],c="red",alpha=1.,label="A")
plt.scatter(data_B[:,0],data_B[:,1],c="blue",alpha=1.,label="B")
plt.legend()

idx=2
for pred in ["R_A_A","R_A_B","R_B_B"]:
    result_A_A=ltnw.ask("%s(?data_A,?data_A_2)" % pred)
    result_A_B=ltnw.ask("%s(?data_A,?data_B)" % pred)
    result_B_B=ltnw.ask("%s(?data_B,?data_B_2)" % pred)
    plt.subplot(2,2,idx)
    idx+=1
    plt.title(pred)
    for i1,d1 in enumerate(data_A):
        for i2,d2 in enumerate(data_A):
            plt.plot([d1[0],d2[0]],[d1[1],d2[1]],alpha=result_A_A[i1,i2,0],c="black")
    for i1,d1 in enumerate(data_A):
        for i2,d2 in enumerate(data_B):
            plt.plot([d1[0],d2[0]],[d1[1],d2[1]],alpha=result_A_B[i1,i2,0],c="black")
    for i1,d1 in enumerate(data_B):
        for i2,d2 in enumerate(data_B):
            plt.plot([d1[0],d2[0]],[d1[1],d2[1]],alpha=result_B_B[i1,i2,0],c="black")
Beispiel #9
0
                  torch.stack([
                      torch.cat([full_obj_set[p[0]], full_obj_set[p[1]]])
                      for p in front_pairs
                  ]).to(device),
                  verbose=False)
    ltnw.variable('?behind_pair',
                  torch.stack([
                      torch.cat([full_obj_set[p[0]], full_obj_set[p[1]]])
                      for p in behind_pairs
                  ]).to(device),
                  verbose=False)

    ## Test the axioms on the freshly declared variables
    with torch.no_grad():
        for a in axioms.keys():
            axioms[a].append(ltnw.ask(a))

    axioms_mean = {k: sum(axioms[k]) / len(axioms[k]) for k in axioms.keys()}
    all_axioms_mean = np.array([axioms_mean[k] for k in axioms_mean.keys()
                                ]).sum() / len(axioms_mean)
    pbar.set_description("Current Mean : %f" % (all_axioms_mean))
    pbar.update(1)

axioms_mean = {k: sum(axioms[k]) / len(axioms[k]) for k in axioms.keys()}
axioms_min = {k: min(axioms[k]) for k in axioms.keys()}
axioms_max = {k: max(axioms[k]) for k in axioms.keys()}

all_axioms_mean = np.array([axioms_mean[k] for k in axioms_mean.keys()
                            ]).sum() / len(axioms_mean)

for k in axioms_mean: