def setUp(self): self.c1 = core.Constant([2.1, 3], trainable=False) self.c2 = core.Constant([4.5, 0.8], trainable=False) self.n_x = 10 self.x = core.Variable('x', np.random.normal(0., 1., (self.n_x, 2))) self.n_y = 5 self.y = core.Variable('y', np.random.normal(0., 4., (self.n_y, 2)))
def setUp(self): self.n_x = 10 self.x = core.Variable('x', np.random.normal(0., 1., (self.n_x, 2))) self.n_y = 5 self.y = core.Variable('y', np.random.normal(0., 4., (self.n_y, 2))) self.n_z = 3 self.z = core.Variable('z', np.random.normal(0., 4., (self.n_z, 2))) self.p = core.Predicate.MLP([2, 2, 2])
def test_empty_semantics_exist(self): x = core.Variable('x', np.random.rand(self.n_x, 1)) y = core.Variable('y', np.random.rand(self.n_y, 1) + 1.) # all y are greater mask1 = self.is_greater_than([x, y]) take_y = np.random.randint(self.n_y) actual = self.Exists(x, self.p1([x, y]), mask=mask1).take("y", take_y).tensor desired = 0. self.assertEqual(actual, desired)
def setUp(self): self.And = core.Wrapper_Connective(fuzzy_ops.And_Prod()) self.Not = core.Wrapper_Connective(fuzzy_ops.Not_Std()) self.c1 = core.Constant([2.1, 3], trainable=True) self.n_x = 10 self.x = core.Variable('x', np.random.normal(0., 1., (self.n_x, 2))) self.n_y = 5 self.y = core.Variable('y', np.random.normal(0., 4., (self.n_y, 2))) self.p1 = core.Predicate.MLP([2]) self.p2 = core.Predicate.MLP([2, 2]) self.a = core.Proposition(0., trainable=True)
def setUp(self): self.Forall = core.Wrapper_Quantifier(fuzzy_ops.Aggreg_pMeanError(p=2), semantics="forall") self.Exists = core.Wrapper_Quantifier(fuzzy_ops.Aggreg_pMean(p=5), semantics="exists") self.n_x = 10 self.x = core.Variable('x', np.random.normal(0., 1., (self.n_x, 2))) self.n_y = 5 self.y = core.Variable('y', np.random.normal(0., 4., (self.n_y, 2))) self.n_z = 5 self.z = core.Variable('z', np.random.normal(0., 4., (self.n_z, 2))) self.p1 = core.Predicate.MLP([2, 2]) self.p2 = core.Predicate.MLP([2, 2, 2])
def setUp(self): self.n_x = 10 self.x = core.Variable('x', np.random.normal(0., 1., (self.n_x, 1))) self.n_y = 10 self.y = core.Variable('y', np.random.normal(0., 1., (self.n_y, 1))) self.is_greater_than = core.Predicate.Lambda( lambda inputs: inputs[0] > inputs[1]) self.mask1 = self.is_greater_than([self.x, self.y]) self.p1 = core.Predicate.MLP([1, 1]) self.Forall = core.Wrapper_Quantifier(fuzzy_ops.Aggreg_pMeanError(p=2), semantics="forall") self.Exists = core.Wrapper_Quantifier(fuzzy_ops.Aggreg_pMean(p=5), semantics="exists")
def setUp(self): self.n_x = 10 self.x = core.Variable('x', np.random.normal(0., 1., (self.n_x, 1))) self.n_y = 10 self.y = core.Variable('y', np.random.normal(0., 1., (self.n_y, 1))) self.n_z = 10 self.z = core.Variable('z', np.random.normal(0., 2., (self.n_z, 1))) self.is_greater_than = core.Predicate.Lambda( lambda inputs: inputs[0] > inputs[1]) self.add = core.Function.Lambda(lambda inputs: inputs[0] + inputs[1]) self.mask1 = self.is_greater_than([self.x, self.y]) self.mask2 = self.is_greater_than([self.add([self.x, self.y]), self.z]) self.p1 = core.Predicate.MLP([1, 1]) self.p2 = core.Predicate.MLP([1, 1, 1])
def test_init(self): for x_val in self.x_vals: label = "x" x = core.Variable("x", x_val) self.assertTrue(array_allclose(x.tensor.numpy(), x_val)) self.assertTrue(isinstance(x.tensor, tf.Tensor)) self.assertEqual(x.free_vars, [label]) self.assertEqual(x.label, label)
def setUp(self): self.x = core.Variable('x', np.random.rand(3, 1)) self.y = core.Variable('y', np.random.rand(4, 1)) self.c = core.Constant([3.], trainable=False) self.f1 = core.Function.MLP(input_shapes=[1], output_shape=[1]) self.f2 = core.Function.MLP(input_shapes=[1, 1], output_shape=[1]) self.p1 = core.Predicate.MLP(input_shapes=[1]) self.p2 = core.Predicate.MLP(input_shapes=[1, 1]) self.q = core.Proposition(0., trainable=False) self.And = core.Wrapper_Connective(fuzzy_ops.And_Prod()) self.Not = core.Wrapper_Connective(fuzzy_ops.Not_Std()) self.Exists = core.Wrapper_Quantifier(fuzzy_ops.Aggreg_Mean(), semantics="exists") self.mask = core.Formula(tf.constant([[1., 1., 0., 0.], [0., 1., 1., 0.], [0., 0., 1., 0.]]), free_vars=['x', 'y'])
def test_aggreg_first_var_of_mask(self): x = core.Variable('x', np.random.rand(3, 1)) y = core.Variable('y', np.random.rand(4, 1)) mask = core.Formula(tf.constant([[1., 1., 0., 0.], [0., 1., 1., 0.], [0., 0., 1., 0.]]), free_vars=['x', 'y']) wff = core.Formula(tf.constant([[.4, .2, .8, .0], [.9, .1, .5, .2], [.3, .0, .8, .9]]), free_vars=['x', 'y']) # Result after mask # [[.4,.2, , ], # [ ,.1,.5, ], # [ , ,.8, ]]), Exists = core.Wrapper_Quantifier(fuzzy_ops.Aggreg_Mean(), semantics="exists") actual = Exists(x, wff, mask=mask) expected = np.array([.4, .15, .65, .0]) self.assertTrue(array_allclose(actual.tensor, expected))
def test_values_correct(self): take_x = np.random.randint(self.n_x) actual = self.Forall(self.y, self.p1([self.x, self.y]), mask=self.mask1).take('x', take_x) safe_y = core.Variable( 'safe_y', tf.boolean_mask(self.y.tensor, self.x.take('x', take_x).tensor > self.y.tensor)) desired = self.Forall(safe_y, self.p1([self.x, safe_y])).take('x', take_x) self.assertTrue(array_allclose(actual.tensor, desired.tensor))
def setUp(self): self.var_settings = { "x1": { "n_individuals": 10, "shape_individual": [2] }, "x2": { "n_individuals": 5, "shape_individual": [2] }, "x3": { "n_individuals": 7, "shape_individual": [2, 2] } } self.xs = {} for label, v_s in self.var_settings.items(): x = core.Variable( label, np.random.rand(v_s["n_individuals"], *v_s["shape_individual"])) self.xs[label] = x