def testSimplePredicateOptimization(self): nr_samples=100 ltnw.variable("?data_A",numpy.random.uniform([0.,0.],[.1,1.],(nr_samples,2)).astype("float32")) ltnw.variable("?data_not_A",numpy.random.uniform([2.,0],[3.,1.],(nr_samples,2)).astype("float32")) ltnw.predicate("A",2) ltnw.axiom("forall ?data_A: A(?data_A)") ltnw.axiom("forall ?data_not_A: ~A(?data_not_A)") ltnw.initialize_knowledgebase(initial_sat_level_threshold=.1) sat_level=ltnw.train(track_sat_levels=10000,sat_level_epsilon=.99) self.assertGreater(sat_level,.8) ltnw.constant("a",[0.5,0.5]) ltnw.constant("b",[2.5,0.5]) self.assertGreater(ltnw.ask("A(a)")[0],.8) self.assertGreater(ltnw.ask("~A(b)")[0],.8) result=ltnw.ask_m(["A(a)","~A(b)"]) for r in result: self.assertGreater(r[0],.8) self.assertGreater(r[0],.8)
def testBuildFormula(self): data = numpy.random.uniform([-1,-1],[1,1],(500,2),).astype(numpy.float32) ltnw.constant("c",[1.,0]) ltnw.variable("?var",data) ltnw.variable("?var2",data) ltnw.function("f",2,fun_definition=lambda d:d[:,:2]) ltnw.function("g",4,fun_definition=lambda d:d) ltnw.predicate("P",2) ltnw.predicate("B",2) ltnw.predicate("REL",4) self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("P(c)"))) with self.assertRaises(Exception): ltnw._build_formula(ltnw._parse_formula("P(d)")) self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("P(?var)"))) with self.assertRaises(Exception): ltnw._build_formula(ltnw._parse_formula("P(?vars)")) self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("P(f(?var))"))) with self.assertRaises(Exception): ltnw._build_formula(ltnw._parse_formula("P(h(?var))")) # h not declared self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("~P(?var)"))) self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("~P(f(?var))"))) self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("~REL(?var,?var2)"))) self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("~REL(?var,f(g(?var2)))"))) with self.assertRaises(Exception): self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("~REL(f(?var))"))) for op in ["&","|","->"]: self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("P(?var) %s ~ P(?var)" % op))) self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("P(?var) %s ~ P(?var)" % op))) self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("~P(?var) %s P(?var)" % op))) self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("~P(?var) %s ~P(?var)" % op))) for i in range(10): self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("P(?var) %s ~P(?var)%s ~P(?var)%s P(?var)" % tuple(numpy.random.permutation(["&","|","->"]))))) self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("forall ?var: P(?var) & ~ P(?var)"))) self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("forall ?var,?var2: P(?var) & ~ P(?var2)"))) self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("P(c) & (forall ?var,?var2: P(?var) & ~ P(?var2))"))) self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("exists ?var: P(?var) & ~ P(?var)"))) self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("exists ?var,?var2: P(?var) & ~ P(?var2)"))) self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("forall ?var: (exists ?var2: P(?var) & ~ P(?var2))"))) self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("forall ?var: (exists ?var2: P(?var) & ~ P(?var2))"))) self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("(forall ?var: (exists ?var2: (P(?var) & P(?var2) & (forall ?var: P(?var)))))"))) self.assertIsNotNone(ltnw._build_formula(ltnw._parse_formula("P(c) | P(?var)")))
def testSimplePredicate(self): import tensorflow nr_samples=100 ltnw.constant("a",[2.,3.]) ltnw.variable("?data_A",numpy.random.uniform([0.,0.],[.1,1.],(nr_samples,2)).astype("float32")) mu=tensorflow.constant([2.,3.]) ltnw.predicate("A",2,pred_definition=lambda x: tensorflow.exp(-tensorflow.norm(tensorflow.subtract(x,mu),axis=1))); self.assertEqual(ltnw.ask("A(a)"),1.) self.assertGreater(ltnw.ask("forall ?data_A: A(?data_A)"),0.)
def testBuildTerm(self): data = numpy.random.uniform([-1,-1],[1,1],(500,2),).astype(numpy.float32) ltnw.constant("c",2) ltnw.variable("?var",data) ltnw.function("f",2,fun_definition=lambda d:d) ltnw.function("g",4,fun_definition=lambda d:d) self.assertEqual(ltnw.constant("c"), ltnw._build_term('c')) self.assertEqual(ltnw.variable("?var"), ltnw._build_term('?var')) self.assertIsNotNone(ltnw._build_term(['f', ['?var']])) self.assertIsNotNone(ltnw._build_term(['f', [['f', ['?var']]]])) with self.assertRaises(Exception): ltnw._build_term(['h', ['?var']]) # h not declared with self.assertRaises(Exception): self.assertRaises(ltnw._build_term(['g', ['?vars']])) # vars not declared
start = -1 end = 1 training_size = 10 testing_size = 10 learning_rate = 0.01 slope = 1. var = 0.1 epochs = 1000 # define data train_X = np.random.uniform(start, end, (training_size)).astype("float32") train_Y = slope * train_X + np.random.normal(scale=var, size=len(train_X)) # define the language, we translate every training example into constants [ltnw.constant("x_%s" % i, [x]) for i, x in enumerate(train_X)] [ltnw.constant("y_%s" % i, [y]) for i, y in enumerate(train_Y)] # define the function f as a linear regressor W = tf.Variable(np.random.randn(), name="weight") b = tf.Variable(np.random.randn(), name="bias") ltnw.function("f", 1, 1, fun_definition=lambda X: tf.add(tf.multiply(X, W), b)) # defining an equal predicate based on the euclidian distance of two vectors ltnw.predicate("eq", 2, ltnl.equal_euclidian) # defining the theory for f in ["eq(f(x_%s),y_%s)" % (i, i) for i in range(len(train_X))]: ltnw.axiom(f) print("\n".join(sorted(ltnw.AXIOMS.keys())))
plt.scatter(data[:,0],data[:,1],c=result.squeeze()) plt.colorbar() plt.title("A(x) - training data") result=ltnw.ask("~A(?data)") plt.subplot(2,2,2) plt.scatter(data[:,0],data[:,1],c=result.squeeze()) plt.colorbar() plt.title("~A(x) - training data") data_test=np.random.uniform([0,0],[1.,1.],(500,2)).astype(np.float32) ltnw.variable("?data_test",data_test) result=ltnw.ask("A(?data_test)") plt.subplot(2,2,3) plt.title("A(x) - test") plt.scatter(data_test[:,0],data_test[:,1],c=result.squeeze()) plt.colorbar() plt.title("A(x) - test data") result=ltnw.ask("~A(?data_test)") plt.subplot(2,2,4) plt.scatter(data_test[:,0],data_test[:,1],c=result.squeeze()) plt.title("~A(x) - test data") plt.show() ltnw.constant("a",[0.25,.5]) ltnw.constant("b",[1.,1.]) print("a is in A: %s" % ltnw.ask("A(a)")) print("b is in A: %s" % ltnw.ask("A(b)"))
ltnw.initialize_knowledgebase(feed_dict=feed_dict, optimizer=tf.train.AdamOptimizer(0.05), formula_aggregator=lambda *x: tf.reduce_min(tf.concat(x,axis=0))) ltnw.train(feed_dict=feed_dict,max_epochs=10000) # 5) evaluate the truth of formulas not given directly to the model for f in ["forall ?x,?y,?z: Contained_in(?x,?y) -> (Left(?y,?z)->Left(?x,?z))", "forall ?x,?y,?z: Contained_in(?x,?y) -> (Right(?y,?z)->Right(?x,?z))", "forall ?x,?y,?z: Contained_in(?x,?y) -> (Above(?y,?z)->Above(?x,?z))", "forall ?x,?y,?z: Contained_in(?x,?y) -> (Below(?y,?z)->Below(?x,?z))", "forall ?x,?y,?z: Contained_in(?x,?y) -> (Contains(?y,?z)->Contains(?x,?z))", "forall ?x,?y,?z: Contained_in(?x,?y) -> (Contained_in(?y,?z)->Contained_in(?x,?z))"]: print("%s: %s" % (f,ltnw.ask(f,feed_dict=feed_dict))) # 6) plot some examples truth values of P(ct,t) where ct is a central rectangle, and # t is a set of randomly generated rectangles ltnw.constant("ct",[.5,.5,.3,.3]) test_data=spatial_relations_data.generate_rectangles(nr_test_examples) ltnw.variable("?t",test_data) fig = plt.figure(figsize=(12,8)) jet = cm = plt.get_cmap('jet') cbbst = test_data[:,:2] + 0.5*test_data[:,2:] for j,p in enumerate(["Left","Right","Above","Below","Contains","Contained_in"]): plt.subplot(2, 3, j + 1) formula="%s(ct,?t)" % p plt.title(formula) results=ltnw.ask(formula,feed_dict=feed_dict) plt.scatter(cbbst[:,0], cbbst[:,1], c=np.squeeze(results)) plt.colorbar() plt.show()
plt.scatter(data[:,0],data[:,1],c=result.squeeze()) plt.colorbar() plt.subplot(2,2,2) result=ltnw.ask("B(?data)") plt.title("B(x) - training") plt.scatter(data[:,0],data[:,1],c=result.squeeze()) plt.colorbar() data_test=np.random.uniform([0,0],[1.,1.],(nr_samples,2)).astype(np.float32) ltnw.variable("?data_test",data_test) result=ltnw.ask("A(?data_test)") plt.subplot(2,2,3) plt.title("A(x) - test") plt.scatter(data_test[:,0],data_test[:,1],c=result.squeeze()) plt.colorbar() result=ltnw.ask("B(?data_test)") plt.subplot(2,2,4) plt.title("B(x) -test") plt.scatter(data_test[:,0],data_test[:,1],c=result.squeeze()) plt.colorbar() plt.show() ltnw.constant("a",[0.5,.5]) ltnw.constant("b",[0.75,.75]) print("a is in A: %s" % ltnw.ask("A(a)")) print("b is in A: %s" % ltnw.ask("A(b)")) print("a is in B: %s" % ltnw.ask("B(a)")) print("b is in B: %s" % ltnw.ask("B(b)"))
plt.xticks(np.arange(0.5, len(df.columns), 1), df.columns) plt.colorbar() ltn.LAYERS = 4 ltn.BIAS_factor = -1e-8 ltn.set_universal_aggreg("mean") # The truth value of forall x p(x) is # interpretable as the percentage of # element in the range of x that satisties p embedding_size = 10 # each constant is interperted in a vector of this size # create on constant for each individual a,b,... i,j, ... for l in 'abcdefghijklmn': ltnw.constant(l, min_value=[0.] * embedding_size, max_value=[1.] * embedding_size) # create variables that ranges on all the individuals, and the individuals of group 1 and group 2. ltnw.variable("p", tf.concat(list(ltnw.CONSTANTS.values()), axis=0)) ltnw.variable("q", ltnw.VARIABLES["p"]) ltnw.variable("p1", tf.concat([ltnw.CONSTANTS[l] for l in "abcdefgh"], axis=0)) ltnw.variable("q1", ltnw.VARIABLES["p1"]) ltnw.variable("p2", tf.concat([ltnw.CONSTANTS[l] for l in "ijklmn"], axis=0)) ltnw.variable("q2", ltnw.VARIABLES["p2"]) # declare the predicates ltnw.predicate('Friends', embedding_size * 2) ltnw.predicate('Smokes', embedding_size) ltnw.predicate('Cancer', embedding_size)
def testConstant(self): zero=ltnw.constant("zero",[0,0]) one=ltnw.constant("one",[1,1]) self.assertEqual(ltnw.constant("zero"),zero) self.assertEqual(ltnw.constant("one"),one)