Esempio n. 1
0
    def test_dot_eval(self):
        x = expr.Variable('x')
        y = expr.Variable('y')
        dotprod = op.Dot(x, y)
        m, k, n = 5, 10, 3

        valuation = pl.valuation()
        nx = np.random.uniform(0, 1, (5, 10)).astype(np.float32)
        ny = np.random.uniform(0, 1, (10, 3)).astype(np.float32)
        valuation['x'] = nx
        valuation['y'] = ny
        gd = dotprod.evaluate(valuation)
        d = gd.get()
        self.assertTrue(np.allclose(d, np.dot(nx, ny)))
        # TODO tests for different dimensions cases...

        # batch cases
        batch_size = 10
        nx = np.random.uniform(0, 1, (batch_size, m, k)).astype(np.float32)
        ny = np.random.uniform(0, 1, (batch_size, k, n)).astype(np.float32)
        val2 = pl.valuation()
        val2['x'] = nx
        val2['y'] = ny
        gdotp = dotprod.evaluate(val2)
        expected = np.array([np.dot(nx[i], ny[i]) for i in range(batch_size)])
        d = gdotp.get()
        # print >>sys.stderr, '\nexpected:\n', expected
        # print >>sys.stderr, 'got:\n', d
        self.assertTrue(np.allclose(d, expected))
Esempio n. 2
0
    def test_dot_revgrad(self):
        x = expr.Variable('x')
        y = expr.Variable('y')
        dotprod = op.Dot(x, y)

        valuation = pl.valuation()
        nx = np.random.uniform(0, 1, (5, 10)).astype(np.float32)
        ny = np.random.uniform(0, 1, (10, 3)).astype(np.float32)

        D = nx.dot(ny)
        dD = np.ones_like(D)
        # print >>sys.stderr, '\nDDshape:', dD.shape
        dX = dD.dot(ny.T)
        dY = nx.T.dot(dD)
        # print >>sys.stderr, '\ndX.shape:', dX.shape, 'dY.shape:', dY.shape

        valuation['x'] = nx
        valuation['y'] = ny

        grad = dotprod.rev_grad(valuation)
        dx = grad['x'].get()
        dy = grad['y'].get()

        # print >>sys.stderr, '\ndx:', dx.shape
        # print >>sys.stderr, 'dy:', dy.shape

        self.assertTrue(np.allclose(dx, dX))
        self.assertTrue(np.allclose(dy, dY))
Esempio n. 3
0
 def test(self, X, Y):
     val = pl.valuation()
     val['X'] = X
     val['Y'] = Y
     for param, value in self.params:
         val[param] = value
     err = self.errors.evaluate(val)
     return err
Esempio n. 4
0
    def test_fwd_grad(self):
        x = expr.Variable('x')
        wrt = {'x': 1}

        valuation = plat.valuation()

        e1 = op.Add(x, x)
        valuation['x'] = np.eye(2, dtype=np.float32)
        d1 = e1.fwd_grad(wrt, valuation)
        self.assertEqual(d1, 2)
Esempio n. 5
0
 def test_sigmoid_eval(self):
     X = np.random.uniform(-1, 1, (10, 10)).astype(np.float32)
     vX = expr.Variable('X')
     sig = op.Sigmoid(vX)
     expected = 1.0 / (1.0 + np.exp(-X))
     val = pl.valuation()
     val['X'] = X
     gs = sig.evaluate(val)
     s = gs.get()
     self.assertTrue(np.allclose(s, expected))
Esempio n. 6
0
    def test_addition(self):
        x = expr.Variable("x")
        y = expr.Constant(2)

        valuation = plat.valuation()

        valuation['x'] = np.eye(2, dtype=np.float32)

        z = op.Add(x,y).evaluate(valuation)
        zz = z.get()
        self.assertEqual(zz[0,0], 3.0)
        self.assertEqual(zz[0,1], 2.0)
        self.assertEqual(zz[1,0], 2.0)
        self.assertEqual(zz[1,1], 3.0)
Esempio n. 7
0
    def test_dot_fwdgrad(self):
        x = expr.Variable('x')
        y = expr.Variable('y')
        dotprod = op.Dot(x, y)

        valuation = pl.valuation()
        nx = np.random.uniform(0, 1, (10, )).astype(np.float32)
        ny = np.random.uniform(0, 1, (10, )).astype(np.float32)
        valuation['x'] = nx
        valuation['y'] = ny
        xw = clarray.zeros(pl.qs[0], (10, ), dtype=np.float32) + 1.0
        yw = clarray.zeros(pl.qs[0], (10, ), dtype=np.float32)

        gddot = dotprod.fwd_grad({'x': xw, 'y': yw}, valuation)
        ddot = gddot.get()
Esempio n. 8
0
    def test_conv2d_eval(self):
        img = expr.Variable('img')
        k = expr.Variable('k')
        b = expr.Variable('b')
        convolution = op.Conv2d(img, k, b, strides=(2, 2), zero_padding=(0, 0))

        valuation = pl.valuation()
        nimg = np.asarray([[[[0., 0., 0., 0., 0., 0., 0.],
                             [0., 2., 1., 0., 2., 2., 0.],
                             [0., 2., 2., 1., 1., 2., 0.],
                             [0., 2., 0., 0., 2., 1., 0.],
                             [0., 0., 2., 0., 1., 0., 0.],
                             [0., 1., 1., 0., 0., 0., 0.],
                             [0., 0., 0., 0., 0., 0., 0.]],
                            [[0., 0., 0., 0., 0., 0., 0.],
                             [0., 2., 0., 2., 0., 1., 0.],
                             [0., 2., 2., 1., 0., 2., 0.],
                             [0., 2., 1., 0., 0., 1., 0.],
                             [0., 2., 0., 1., 0., 0., 0.],
                             [0., 1., 1., 0., 2., 2., 0.],
                             [0., 0., 0., 0., 0., 0., 0.]],
                            [[0., 0., 0., 0., 0., 0., 0.],
                             [0., 2., 0., 0., 2., 1., 0.],
                             [0., 2., 2., 2., 2., 0., 0.],
                             [0., 1., 1., 2., 1., 0., 0.],
                             [0., 2., 1., 2., 1., 0., 0.],
                             [0., 1., 0., 0., 0., 2., 0.],
                             [0., 0., 0., 0., 0., 0.,
                              0.]]]]).astype(np.float32)
        nk = np.asarray([[[[-1, 1, 1], [1, 0, 0], [1, 0, 0]],
                          [[1, -1, 1], [1, -1, -1], [1, 1, 1]],
                          [[0, 0, 0], [0, -1, 1], [0, 1, -1]]],
                         [[[0, 0, -1], [0, 1, 0], [0, 0, -1]],
                          [[-1, -1, 1], [-1, 1, -1], [1, 1, 0]],
                          [[0, 1, -1], [1, 0, 0], [0, -1,
                                                   0]]]]).astype(np.float32)
        nb = np.asarray([1, 0]).astype(np.float32)
        expected = np.asarray([[[[1, 7, 4], [5, 6, 2], [-2, -1, -2]],
                                [[2, 2, 7], [-1, -6, 1],
                                 [-2, -4, 0]]]]).astype(np.float32)

        valuation['img'] = nimg
        valuation['k'] = nk
        valuation['b'] = nb
        ret = convolution.evaluate(valuation)
        nret = ret.get()
        self.assertTrue(np.allclose(expected, nret))
Esempio n. 9
0
    def test_neq_eval(self):
        n = 10000
        X = np.zeros((n, ), dtype=np.float32)
        Y = np.ones((n, ), dtype=np.float32)
        varX = expr.Variable('X')
        varY = expr.Variable('Y')

        neq = op.NotEq(varX, varY)
        eq = op.NotEq(varX, varX)

        val = pl.valuation()
        val['X'] = X
        val['Y'] = Y
        gres = neq.evaluate(val)
        gres2 = eq.evaluate(val)
        self.assertTrue(gres.all())
        self.assertFalse(gres2.any())
Esempio n. 10
0
    def test_rev_grad(self):
        x = expr.Variable('x')
        y = expr.Variable('y')
        z = expr.Variable('z')

        valuation = plat.valuation()
        valuation['x'] = np.ones((2,2), dtype=np.float32)*5
        valuation['y'] = np.eye(2, dtype=np.float32)*2
        valuation['z'] = np.ones((2,2), dtype=np.float32)*3

        ex = op.Add(op.Mul(x, y), z)
        # rg(X.*Y+Z) = [
        rg = ex.rev_grad(valuation)

        # dex/dx = y
        dx = rg['x'].get()
        yy = valuation['y'].get()
        self.assertTrue(np.all(dx == yy))
Esempio n. 11
0
    def test_sigmoid_grads(self):
        # TODO Testiraj da li AD gradijent 1/(1+exp(-x)) daje priblizno iste rezultate
        # kao i sigmoid(x)*(1-sigmoid(x)), tj. grad sigmoida.
        x = expr.Variable('x')
        valuation = plat.valuation()
        valuation['x'] = np.ones((3,3), dtype=np.float32)*2

        sigm1 = op.Div(expr.Constant(1.0), op.Add(expr.Constant(1.0), op.Exp(op.Neg(x))))
        sigm2 = op.Sigmoid(x)

        e1 = sigm1.evaluate(valuation)
        e2 = sigm2.evaluate(valuation)

        rg1 = sigm1.rev_grad(valuation)
        rg2 = sigm2.rev_grad(valuation)

        xg1 = rg1['x'].get()
        xg2 = rg2['x'].get()
        self.assertTrue(np.all((xg1 - xg2) < 0.001))
Esempio n. 12
0
    def train(self, X, Y, learning_rate=0.01):
        val = pl.valuation()
        val['X'] = X
        val['Y'] = Y
        for name, value in self.params:
            val[name] = value

        grad = self.cost.rev_grad(val)

        debatch_help_vector = clarray.zeros(pl.qs[0], (Y.shape[0], 1),
                                            dtype=np.float32) + 1
        for name, value in self.params:
            if name.startswith('b'):
                dbh = linalg.dot(pl.qs[0],
                                 grad[name],
                                 debatch_help_vector,
                                 transA=True)
                value -= learning_rate * dbh.ravel()
            else:
                value -= learning_rate * grad[name]
Esempio n. 13
0
    def train(self, X, Y, learning_rate=0.01, momentum=0.0):
        # self.do1.test = False
        val = pl.valuation()  # TODO Ovo uzrokuje trasfere
        val['X'] = X
        val['Y'] = Y
        for name, value in self.params:
            val[name] = value

        grad = self.cost.rev_grad(val)
        # print grad
        for name, value in self.params:
            # print 'updating', name
            # print 'shape:', value.shape, 'grad shape:', grad[name].shape
            if name.startswith('b_F') or name.startswith('b_S'):
                bgsum = misc.sum(pl.qs[0], grad[name], axis=0)
                value -= learning_rate * bgsum
            else:
                dv = learning_rate * grad[name]
                if self.prev_grad is not None and momentum > 0:
                    dv += momentum * self.prev_grad[name]
                value -= dv
        if momentum > 0:
            self.prev_grad = grad