class TestArray(MLTestCase):
    def setUp(self):
        self.x = Array([1, 2, 3])

    def test__check(self):
        with self.assertRaises(AssertionError):
            self.x + Array([1, 2])

    def test_addition(self):
        self.assertEqual(self.x + Array([1, 1, 1]), Array([2, 3, 4]))

    def test_subtraction(self):
        self.assertEqual(self.x - self.x, Array([0, 0, 0]))

    def test_multiplication(self):
        self.assertEqual(2 * self.x, Array([2, 4, 6]))
        self.assertEqual(self.x * -1, Array([-1, -2, -3]))

    def test_neg(self):
        self.assertEqual(-self.x + self.x, Array([0, 0, 0]))

    def test_dot(self):
        self.assertEqual(self.x.dot(Array([2, 4, 6])), 1 * 2 + 2 * 4 + 3 * 6)

    def test_norm(self):
        self.assertEqual(self.x.norm(), (1**2 + 2**2 + 3**2)**0.5)

    def test_get_set_item(self):
        x = Array([1, 1, 1])
        self.assertEqual(x[2], 1)
        x[2] = 8
        self.assertEqual(x[2], 8)
 def test_no_noise(self):
     test_X = [Array([1, j, j**2]) for j in range(10)]
     weights = Array([1, 0.2, 1])
     test_y = Array([weights.dot(x) for x in test_X])
     reg = LinearRegression()
     reg.fit(test_X, test_y)
     self.assertArrayEqual(reg.coef_, weights, 2)
Esempio n. 3
0
class TestFunction(MLTestCase):
    def setUp(self):
        self.x = Array([1, 2, 3])
        self.f = Function(lambda x: x.norm())

    def test_call(self):
        self.assertEqual(self.f(self.x), self.x.norm())

    def test_mul(self):
        g = 3 * self.f
        h = self.f * -1
        self.assertEqual(g(self.x), 3 * self.x.norm())
        self.assertEqual(h(self.x), -self.x.norm())

    def test_add(self):
        double_norm = self.f + self.f
        self.assertEqual(double_norm(self.x), 2 * self.x.norm())

    def test_sub(self):
        self.assertEqual((self.f - self.f)(self.x), 0)

    def test_grad(self):
        # D(||x||) = x / ||x||
        df = Function(lambda x: x.norm()).grad(self.x)
        expected = self.x / self.x.norm()
        self.assertArrayEqual(df, expected)

        # D(||x||^2) = 2 * x
        df = Function(lambda x: x.norm()**2).grad(self.x)
        expected = 2 * self.x
        self.assertArrayEqual(df, expected)

        # D(x[0]^2 - x[1] * x[2]) = (2 * x[0], -x[2], -x[1])
        df = Function(lambda x: x[0]**2 - x[1] * x[2]).grad(self.x)
        expected = Array([2 * self.x[0], -self.x[2], -self.x[1]])
        self.assertArrayEqual(df, expected)

    def test_minimize(self):
        # ||x - v||^2 is minimized when x = v
        f = Function(lambda x: (x - Array(range(len(x)))).norm_squared())
        for dims in range(1, 3):
            self.assertArrayEqual(f.minimize(dims=dims), Array(range(dims)), 3)
Esempio n. 4
0
    def test_grad(self):
        # D(||x||) = x / ||x||
        df = Function(lambda x: x.norm()).grad(self.x)
        expected = self.x / self.x.norm()
        self.assertArrayEqual(df, expected)

        # D(||x||^2) = 2 * x
        df = Function(lambda x: x.norm()**2).grad(self.x)
        expected = 2 * self.x
        self.assertArrayEqual(df, expected)

        # D(x[0]^2 - x[1] * x[2]) = (2 * x[0], -x[2], -x[1])
        df = Function(lambda x: x[0]**2 - x[1] * x[2]).grad(self.x)
        expected = Array([2 * self.x[0], -self.x[2], -self.x[1]])
        self.assertArrayEqual(df, expected)
 def setUp(self):
     self.x = Array([1, 2, 3])
 def test_get_set_item(self):
     x = Array([1, 1, 1])
     self.assertEqual(x[2], 1)
     x[2] = 8
     self.assertEqual(x[2], 8)
 def test_dot(self):
     self.assertEqual(self.x.dot(Array([2, 4, 6])), 1 * 2 + 2 * 4 + 3 * 6)
 def test_neg(self):
     self.assertEqual(-self.x + self.x, Array([0, 0, 0]))
 def test_multiplication(self):
     self.assertEqual(2 * self.x, Array([2, 4, 6]))
     self.assertEqual(self.x * -1, Array([-1, -2, -3]))
Esempio n. 10
0
 def test_addition(self):
     self.assertEqual(self.x + Array([1, 1, 1]), Array([2, 3, 4]))
Esempio n. 11
0
 def squared_error(w):
     Xw = Array([x.dot(w) for x in X])
     return (Xw - y).norm()
Esempio n. 12
0
 def transform(self, X):
     if self.coef_ is None:
         raise NotImplementedError("Must fit the model first")
     return Array([1 / (1 + exp(-self.coef_.dot(x))) for x in X])
Esempio n. 13
0
 def setUp(self):
     self.x = Array([1, 2, 3])
     self.f = Function(lambda x: x.norm())
Esempio n. 14
0
 def test_minimize(self):
     # ||x - v||^2 is minimized when x = v
     f = Function(lambda x: (x - Array(range(len(x)))).norm_squared())
     for dims in range(1, 3):
         self.assertArrayEqual(f.minimize(dims=dims), Array(range(dims)), 3)
Esempio n. 15
0
 def test__check(self):
     with self.assertRaises(AssertionError):
         self.x + Array([1, 2])
Esempio n. 16
0
 def grad(self, x):
     return Array([derivative(self, x, eps(x, j)) for j in range(len(x))])
Esempio n. 17
0
 def test_subtraction(self):
     self.assertEqual(self.x - self.x, Array([0, 0, 0]))