Beispiel #1
0
    def testSimplePredicateOptimization(self):
        nr_samples=100

        ltnw.variable("?data_A",numpy.random.uniform([0.,0.],[.1,1.],(nr_samples,2)).astype("float32"))
        ltnw.variable("?data_not_A",numpy.random.uniform([2.,0],[3.,1.],(nr_samples,2)).astype("float32"))
        
        ltnw.predicate("A",2)
        
        ltnw.axiom("forall ?data_A: A(?data_A)")
        ltnw.axiom("forall ?data_not_A: ~A(?data_not_A)")
        
        ltnw.initialize_knowledgebase(initial_sat_level_threshold=.1)
        sat_level=ltnw.train(track_sat_levels=10000,sat_level_epsilon=.99)
        
        self.assertGreater(sat_level,.8)
            
        ltnw.constant("a",[0.5,0.5])
        ltnw.constant("b",[2.5,0.5])
        
        self.assertGreater(ltnw.ask("A(a)")[0],.8)
        self.assertGreater(ltnw.ask("~A(b)")[0],.8)

        result=ltnw.ask_m(["A(a)","~A(b)"])
        
        for r in result:
            self.assertGreater(r[0],.8)
            self.assertGreater(r[0],.8)
Beispiel #2
0
    def testBuildFormula(self):
        data = numpy.random.uniform([-1,-1],[1,1],(500,2),).astype(numpy.float32)
        ltnw.constant("c",[1.,0])
        ltnw.variable("?var",data)
        ltnw.variable("?var2",data)
        ltnw.function("f",2,fun_definition=lambda d:d[:,:2])
        ltnw.function("g",4,fun_definition=lambda d:d)
        ltnw.predicate("P",2)
        ltnw.predicate("B",2)
        ltnw.predicate("REL",4)
        
        self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("P(c)")))
        with self.assertRaises(Exception):
            ltnw._build_formula(ltnw._parse_formula("P(d)"))
        self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("P(?var)")))
        with self.assertRaises(Exception):
            ltnw._build_formula(ltnw._parse_formula("P(?vars)"))
        self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("P(f(?var))")))
        with self.assertRaises(Exception):
            ltnw._build_formula(ltnw._parse_formula("P(h(?var))")) # h not declared
        
        self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("~P(?var)")))
        self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("~P(f(?var))")))
        
        self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("~REL(?var,?var2)")))
        self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("~REL(?var,f(g(?var2)))")))
        with self.assertRaises(Exception):
            self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("~REL(f(?var))")))

        for op in ["&","|","->"]:
            self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("P(?var) %s ~ P(?var)" % op)))
            self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("P(?var) %s ~ P(?var)" % op)))
            self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("~P(?var) %s P(?var)" % op)))
            self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("~P(?var) %s ~P(?var)" % op)))

        for i in range(10):
            self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("P(?var) %s ~P(?var)%s ~P(?var)%s P(?var)" % tuple(numpy.random.permutation(["&","|","->"])))))

        self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("forall ?var: P(?var) & ~ P(?var)")))
        self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("forall ?var,?var2: P(?var) & ~ P(?var2)")))        
        self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("P(c) & (forall ?var,?var2: P(?var) & ~ P(?var2))")))

        self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("exists ?var: P(?var) & ~ P(?var)")))
        self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("exists ?var,?var2: P(?var) & ~ P(?var2)")))        


        self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("forall ?var: (exists ?var2: P(?var) & ~ P(?var2))")))
        self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("forall ?var: (exists ?var2: P(?var) & ~ P(?var2))")))
        
        self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("(forall ?var: (exists ?var2: (P(?var) & P(?var2) & (forall ?var: P(?var)))))")))

        self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("P(c) | P(?var)")))
Beispiel #3
0
    def testSimplePredicate(self):
        import tensorflow
        
        nr_samples=100

        ltnw.constant("a",[2.,3.])
        ltnw.variable("?data_A",numpy.random.uniform([0.,0.],[.1,1.],(nr_samples,2)).astype("float32"))
        
        mu=tensorflow.constant([2.,3.])
        ltnw.predicate("A",2,pred_definition=lambda x: tensorflow.exp(-tensorflow.norm(tensorflow.subtract(x,mu),axis=1)));       
        
        self.assertEqual(ltnw.ask("A(a)"),1.)
        self.assertGreater(ltnw.ask("forall ?data_A: A(?data_A)"),0.)
Beispiel #4
0
    def testBuildTerm(self):        
        data = numpy.random.uniform([-1,-1],[1,1],(500,2),).astype(numpy.float32)
        ltnw.constant("c",2)
        ltnw.variable("?var",data)
        ltnw.function("f",2,fun_definition=lambda d:d)
        ltnw.function("g",4,fun_definition=lambda d:d)
        
        self.assertEqual(ltnw.constant("c"), ltnw._build_term('c'))
        
        self.assertEqual(ltnw.variable("?var"), ltnw._build_term('?var'))

        self.assertIsNotNone(ltnw._build_term(['f', ['?var']]))
        self.assertIsNotNone(ltnw._build_term(['f', [['f', ['?var']]]]))

        with self.assertRaises(Exception):
            ltnw._build_term(['h', ['?var']]) # h not declared
        with self.assertRaises(Exception):
            self.assertRaises(ltnw._build_term(['g', ['?vars']])) # vars not declared
Beispiel #5
0
# defining an equal predicate based on the euclidian distance of two vectors
ltnw.predicate("eq", 2, ltnl.equal_euclidian)

# defining the theory
for f in ["eq(f(x_%s),y_%s)" % (i, i) for i in range(len(train_X))]:
    ltnw.axiom(f)
print("\n".join(sorted(ltnw.AXIOMS.keys())))

# initializing knowledgebase and optimizing
ltnw.initialize_knowledgebase(optimizer=tf.train.GradientDescentOptimizer(
    learning_rate=learning_rate))
ltnw.train(max_epochs=epochs)

# visualize results on training data
ltnw.variable("?x", 1)
prediction = ltnw.ask("f(?x)",
                      feed_dict={"?x": train_X.reshape(len(train_X), 1)})
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.plot(train_X, train_Y, 'bo', label='Training data', color="black")
plt.plot(train_X,
         ltnw.SESSION.run(W) * train_X + ltnw.SESSION.run(b),
         label='Fitted line')
plt.plot(train_X, prediction, 'bo', label='prediction', color="red")
plt.legend()

# generate test data and visualize regressor results
test_X = np.random.uniform(start, end, (testing_size)).astype("float32")
prediction = ltnw.ask("f(?x)",
                      feed_dict={"?x": test_X.reshape(len(test_X), 1)})
Beispiel #6
0
data = np.concatenate(clusters)
close_data = np.array([
    np.concatenate([data[i], data[j]]) for i in range(len(data))
    for j in range(i, len(data))
    if np.sum(np.square(data[i] - data[j])) < np.square(.5)
])

close_data = close_data[np.random.random_integers(0, len(data), 1000)]
distant_data = np.array([
    np.concatenate([data[i], data[j]]) for i in range(len(data))
    for j in range(len(data))
    if np.sum(np.square(data[i] - data[j])) > np.square(1.)
])

# defining the language
ltnw.variable("?x", data)
ltnw.variable("?y", data)
ltnw.variable("?close_x_y", close_data)
ltnw.variable("?distant_x_y", distant_data)
[ltnw.predicate("C_" + str(i), 2) for i in range(nr_of_clusters)]

ltnw.function("first", 2, fun_definition=lambda d: d[:, :2])
ltnw.function("second", 2, fun_definition=lambda d: d[:, 2:])

print("defining the theory T")
ltnw.axiom("forall ?x: %s" %
           "|".join(["C_%s(?x)" % i for i in range(nr_of_clusters)]))
for i in range(nr_of_clusters):
    ltnw.axiom("exists ?x: C_%s(?x)" % i)
    ltnw.axiom(
        "forall ?close_x_y: C_%s(first(?close_x_y)) %% C_%s(second(?close_x_y))"
Beispiel #7
0
                                        names_of_classes=cat_materials,
                                        device=device)
cat_horizontal = ['Right', 'Left']
Category_Horizontal = ltnw.class_category(class_label='Horizontal',
                                          number_of_features=2 *
                                          num_of_features,
                                          names_of_classes=cat_horizontal,
                                          device=device)
cat_vertical = ['Front', 'Behind']
Category_Vertical = ltnw.class_category(class_label='Vertical',
                                        number_of_features=2 * num_of_features,
                                        names_of_classes=cat_vertical,
                                        device=device)

# Object Variables Placeholders
ltnw.variable('?obj', torch.zeros(1, num_of_features))
ltnw.variable('?obj_2', torch.zeros(1, num_of_features))
for i, feat in enumerate(obj_colors):
    ltnw.mlp_predicate(label=feat.capitalize(), class_category=Category_Color)
    ltnw.variable('?is_' + feat, torch.zeros(1, num_of_features,
                                             device=device))
    ltnw.axiom('forall ?is_' + feat + ' : ' + feat.capitalize() + '(?is_' +
               feat + ')')
    ltnw.variable('?isnot_' + feat,
                  torch.zeros(1, num_of_features, device=device))
    ltnw.axiom('forall ?isnot_' + feat + ' : ~' + feat.capitalize() +
               '(?isnot_' + feat + ')')
for i, feat in enumerate(obj_sizes):
    ltnw.mlp_predicate(label=feat.capitalize(), class_category=Category_Size)
    ltnw.variable('?is_' + feat, torch.zeros(1, num_of_features,
                                             device=device))
# generate data
nr_of_clusters = 2
nr_of_points_x_cluster = 50
clusters = []
for i in range(nr_of_clusters):
    mean = np.random.uniform([-1, -1], [1, 1], 2).astype(np.float32)
    cov = np.array([[.001, 0], [0, .001]])
    clusters.append(
        np.random.multivariate_normal(mean=mean,
                                      cov=cov,
                                      size=nr_of_points_x_cluster).astype(
                                          np.float32))
data = np.concatenate(clusters)

# define the language
ltnw.variable("?x", data)
ltnw.variable("?y", data)
ltnw.predicate("close", 2, ltnl.equal_euclidian)
[ltnw.predicate("C_" + str(i), 2) for i in range(nr_of_clusters)]

## define the theory
print("defining the theory T")
ltnw.axiom("forall ?x: %s" %
           "|".join(["C_%s(?x)" % i for i in range(nr_of_clusters)]))
for i in range(nr_of_clusters):
    ltnw.axiom("exists ?x: C_%s(?x)" % i)
    ltnw.axiom("forall ?x,?y: (C_%s(?x) & close(?x,?y)) -> C_%s(?y)" % (i, i))
    ltnw.axiom(
        "forall ?x,?y: (C_%s(?x) & ~close(?x,?y)) -> (%s)" %
        (i, "|".join(["C_%s(?y)" % j
                      for j in range(nr_of_clusters) if i != j])))
# -*- coding: utf-8 -*-
import logging; logging.basicConfig(level=logging.INFO)

import numpy as np
import matplotlib.pyplot as plt

import logictensornetworks_wrapper as ltnw

nr_samples=500

data=np.random.uniform([0,0],[1.,1.],(nr_samples,2)).astype(np.float32)
data_A=data[np.where(np.sum(np.square(data-[.5,.5]),axis=1)<.09)]
data_not_A=data[np.where(np.sum(np.square(data-[.5,.5]),axis=1)>=.09)]

ltnw.variable("?data_A",data_A)
ltnw.variable("?data_not_A",data_not_A)
ltnw.variable("?data",data)

ltnw.predicate("A",2)

ltnw.axiom("forall ?data_A: A(?data_A)")
ltnw.axiom("forall ?data_not_A: ~A(?data_not_A)")

ltnw.initialize_knowledgebase(initial_sat_level_threshold=.1)
sat_level=ltnw.train(track_sat_levels=1000,sat_level_epsilon=.99)
    
plt.figure(figsize=(12,8))
result=ltnw.ask("A(?data)")
plt.subplot(2,2,1)
plt.scatter(data[:,0],data[:,1],c=result.squeeze())
plt.colorbar()
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np

import logictensornetworks_wrapper as ltnw

import spatial_relations_data

# generate artificial data
nr_examples = 50 # positive and negative examples for each predicate
nr_test_examples=400

# 1) define the language and examples
ltnw.predicate("Left",8)
ltnw.variable("?left_xy",8)
ltnw.variable("?not_left_xy", 8)
ltnw.axiom("forall ?left_xy: Left(?left_xy)")
ltnw.axiom("forall ?not_left_xy: ~Left(?not_left_xy)")


ltnw.predicate("Right",8)
ltnw.variable("?right_xy",8)
ltnw.variable("?not_right_xy",8)
ltnw.axiom("forall ?right_xy: Right(?right_xy)")
ltnw.axiom("forall ?not_right_xy: ~Right(?not_right_xy)")

ltnw.predicate("Below",8)
ltnw.variable("?below_xy",8)
ltnw.variable("?not_below_xy",8)
ltnw.axiom("forall ?below_xy: Below(?below_xy)")
Beispiel #11
0
    not_obj_attr[feat] = [x for x in full_obj_set if x[i] == 0]
    not_obj_attr[feat] = random.sample(
        not_obj_attr[feat], min(len(obj_attr[feat]), len(not_obj_attr[feat])))

##################
### Set Up LTN ###
##################
time_diff = time.time() - start_time
print('Time to complete : ', time_diff)
start_time = time.time()
print('******* Setting up LTN ******')

# Object Constants/Variables
#for i in range(len(full_obj_set)):
#    ltnw.constant('object'+str(i),full_obj_set[i])
ltnw.variable('?obj', full_obj_set)
ltnw.variable('?obj_2', full_obj_set)
for i, feat in enumerate(obj_feat):
    ltnw.variable('?is_' + feat, obj_attr[feat])
    ltnw.variable('?isnot_' + feat, not_obj_attr[feat])
ltnw.variable('?right_pair',
              [full_obj_set[p[0]] + full_obj_set[p[1]] for p in right_pairs])
ltnw.variable('?left_pair',
              [full_obj_set[p[0]] + full_obj_set[p[1]] for p in left_pairs])
ltnw.variable('?front_pair',
              [full_obj_set[p[0]] + full_obj_set[p[1]] for p in front_pairs])
ltnw.variable('?behind_pair',
              [full_obj_set[p[0]] + full_obj_set[p[1]] for p in behind_pairs])

time_diff = time.time() - start_time
print('Time to complete : ', time_diff)
import logging
logger = logging.getLogger()
logger.basicConfig = logging.basicConfig(level=logging.DEBUG)
import numpy as np
import matplotlib.pyplot as plt
import torch

import logictensornetworks_wrapper as ltnw

nr_samples=500

data=np.random.uniform([0,0],[1.,1.],(nr_samples,2)).astype(np.float32)
data_A=data[np.where(np.sum(np.square(data-[.5,.5]),axis=1)<.09)]
data_B=data[np.where(np.sum(np.square(data-[.5,.5]),axis=1)>=.09)]

ltnw.variable("?data",data)
ltnw.variable("?data_A",data_A)
ltnw.variable("?data_B",data_B)

ltnw.predicate("A",2)
ltnw.predicate("B",2)

ltnw.axiom("forall ?data_A: A(?data_A)")
ltnw.axiom("forall ?data_B: ~A(?data_B)")

ltnw.axiom("forall ?data_B: B(?data_B)")
ltnw.axiom("forall ?data_A: ~B(?data_A)")

ltnw.axiom("forall ?data: A(?data) -> ~B(?data)")
ltnw.axiom("forall ?data: B(?data) -> ~A(?data)")
Beispiel #13
0
ltn.BIAS_factor = -1e-8
ltn.set_universal_aggreg("mean")  # The truth value of forall x p(x) is
# interpretable as the percentage of
# element in the range of x that satisties p

embedding_size = 10  # each constant is interperted in a vector of this size

# create on constant for each individual a,b,... i,j, ...
for l in 'abcdefghijklmn':
    ltnw.constant(l,
                  min_value=[0.] * embedding_size,
                  max_value=[1.] * embedding_size)

# create variables that ranges on all the individuals, and the individuals of group 1 and group 2.

ltnw.variable("p", tf.concat(list(ltnw.CONSTANTS.values()), axis=0))
ltnw.variable("q", ltnw.VARIABLES["p"])
ltnw.variable("p1", tf.concat([ltnw.CONSTANTS[l] for l in "abcdefgh"], axis=0))
ltnw.variable("q1", ltnw.VARIABLES["p1"])
ltnw.variable("p2", tf.concat([ltnw.CONSTANTS[l] for l in "ijklmn"], axis=0))
ltnw.variable("q2", ltnw.VARIABLES["p2"])

# declare the predicates
ltnw.predicate('Friends', embedding_size * 2)
ltnw.predicate('Smokes', embedding_size)
ltnw.predicate('Cancer', embedding_size)

# add the assertional knowledge in our posses

ltnw.axiom("Friends(a,b)")
ltnw.axiom("~Friends(a,c)")
Beispiel #14
0
import logging
logger = logging.getLogger()
logger.basicConfig = logging.basicConfig(level=logging.DEBUG)
import numpy as np
import matplotlib.pyplot as plt

import logictensornetworks_wrapper as ltnw

nr_samples=5
epochs=10000

data_A=np.random.uniform([0,0],[.25,1.],(nr_samples,2)).astype(np.float32)
data_B=np.random.uniform([.75,0],[1.,1.],(nr_samples,2)).astype(np.float32)
data=np.concatenate([data_A,data_B])

ltnw.variable("?data_A",data_A)
ltnw.variable("?data_A_2",data_A)
ltnw.variable("?data_B",data_B)
ltnw.variable("?data_B_2",data_B)
ltnw.variable("?data",data)
ltnw.variable("?data_1",data)
ltnw.variable("?data_2",data)

ltnw.predicate("A",2)
ltnw.predicate("B",2)

ltnw.axiom("forall ?data_A: A(?data_A)")
ltnw.axiom("forall ?data_B: ~A(?data_B)")

ltnw.axiom("forall ?data_B: B(?data_B)")
ltnw.axiom("forall ?data_A: ~B(?data_A)")
Beispiel #15
0
    ##################
    # time_diff = time.time()-start_time
    # print('Time to complete : ', time_diff)
    # start_time = time.time()
    # print('******* Setting up LTN ******')

    num_of_features = len(
        full_obj_set[0]
    )  # =512 (output of resnet-32 layer3 for whole image (256) + object (256))

    # Object Constants/Variables
    #for i in range(len(full_obj_set)):
    #    ltnw.constant('object'+str(i),full_obj_set[i])

    # 'verbose' argument is used to bypass the variable redeclare warning message
    ltnw.variable('?obj', torch.stack(full_obj_set).to(device), verbose=False)
    ltnw.variable('?obj_2',
                  torch.stack(full_obj_set).to(device),
                  verbose=False)
    for i, feat in enumerate(obj_feat):
        ltnw.variable('?is_' + feat,
                      torch.stack(obj_attr[feat]).to(device),
                      verbose=False)
        ltnw.variable('?isnot_' + feat,
                      torch.stack(not_obj_attr[feat]).to(device),
                      verbose=False)
    ltnw.variable('?right_pair',
                  torch.stack([
                      torch.cat([full_obj_set[p[0]], full_obj_set[p[1]]])
                      for p in right_pairs
                  ]).to(device),
Beispiel #16
0
 def testVariable(self):
     v1=ltnw.variable("?var1",[1,2,3])
     v2=ltnw.variable("?var2",2)
     self.assertEqual(ltnw.variable("?var1"),v1)
     self.assertEqual(ltnw.variable("?var2"),v2)