Example #1
0
    def setUp(self):
        self.A = Tensor([[1, 2], [3, 4]], name='A')
        self.B = Tensor([[1, 2], [3, 4]], name='B')
        self.C = Tensor([[5, 6], [7, 8]], name='C')
        self.D = Constant([[2, 1], [3, 2]], name='D')

        # Define an involved computation graph with the constants and variables above
        e = TensorAddition(self.A, self.B)
        f = TensorElemMultiply(Constant(2 * np.ones(e.shape)), e)
        g = TensorNegLog(f)
        h = TensorAddition(self.A, g)
        i = TensorMultiply(h, self.C)
        j = TensorSubtraction(i, self.D)
        l = TensorSum(j)

        self.bp = BackwardsPass(l)
Example #2
0
    def setUp(self):
        self.data = np.array([
            [[1, 2, 3], [4, 5, 6]],
            [[7, 8, 9], [10, 11, 12]],
        ])

        self.tens = Tensor(self.data)
Example #3
0
    def test_constants_attributes(self):
        """ Test tensor attributes """

        # value attr should return the wrapped data
        self.assertTrue(np.array_equal(self.const.value, self.data))
        # shape should return the shape of the wrapped data
        self.assertEqual(self.const.shape, (2, 2, 3))
        # node_uid should be a random string generated uniquely for each tensor
        self.assertTrue(type(self.const.node_uid) == str)
        self.assertTrue(len(self.const.node_uid) > 0)
        new_const = Tensor(self.const.value)
        self.assertNotEqual(self.const.node_uid, new_const.node_uid)
Example #4
0
    def test_vjps(self):
        # test case where a is of dim greater than 1 and b is of dim 1
        a = Tensor(np.random.random((3, 5, 4)))
        b = Tensor(np.random.random((4, )))
        c = self.operation(a, b)
        g = np.random.random(c.shape)
        contract_num = max(0, len(b.shape) - (len(a.shape) != 0))
        a_vjp, b_vjp = c.vector_jacobian_product()
        a_ndim = len(a.shape)
        b_ndim = len(b.shape)

        self.assertTrue(
            np.array_equal(a_vjp(g), np.tensordot(g, b.value, contract_num)))
        res = np.asarray(
            np.tensordot(
                g, a.value,
                [range(-a_ndim - b_ndim + 2, -b_ndim + 1),
                 range(a_ndim - 1)]))
        self.assertTrue(np.array_equal(b_vjp(g), res))

        # test case where a is of dim 1 and b is of dim greater than 1
        a = Tensor(np.random.random((4, )))
        b = Tensor(np.random.random((3, 4, 5)))
        c = self.operation(a, b)
        g = np.random.random(c.shape)
        contract_num = max(0, len(a.shape) - (len(b.shape) != 0))
        a_vjp, b_vjp = c.vector_jacobian_product()
        a_ndim = len(a.shape)
        b_ndim = len(b.shape)

        self.assertTrue(
            np.array_equal(
                a_vjp(g),
                np.tensordot(g, np.swapaxes(b.value, -1, -2), b_ndim - 1)))
        self.assertTrue(
            np.array_equal(
                b_vjp(g),
                np.asarray(
                    np.swapaxes(np.tensordot(g, a.value, contract_num), -1,
                                -2))))
Example #5
0
    def optimize(self, nn, x, y, batch_size, epochs, early_stopping):
        self._print_optimization_message(nn)
        x, y = self._shuffle(x, y)

        x_train, x_test, y_train, y_test = self._train_val_split(x, y)
        n_batches = int(x_train.shape[0] / batch_size)
        for epoch in range(epochs):
            x_train, y_train = self._shuffle(x_train, y_train)
            for batch in range(n_batches):
                batch_grad = {}
                start_splice = batch * batch_size
                end_splice = (batch + 1) * batch_size

                x_batch = x_train[start_splice:end_splice, ...]
                y_batch = y_train[start_splice:end_splice, ...]
                # forward pass
                y_hat = nn.forward_pass(Tensor(x_batch))
                loss = self.loss(Constant(y_batch), y_hat)

                # backwards pass
                grad = BackwardsPass(loss).execute()
                for var in grad:
                    if var not in batch_grad:
                        batch_grad[var] = grad[var]
                    else:
                        batch_grad[var] += grad[var]
                for var in grad:
                    grad[var] = grad[var] / batch_size

                # update w/ optimization alg
                self._update(nn, grad)
                # control outputs
                self._handle_prints(epoch, batch, n_batches)
            # eval performance
            train_loss = self._eval_perf(x_train, y_train, nn)
            validation_loss = self._eval_perf(x_test, y_test, nn)

            # early stopping
            if early_stopping and epoch > 0:
                if validation_loss > lst_epch_val_loss:
                    self._handle_prints(epoch, batch, n_batches, train_loss,
                                        validation_loss)
                    break
            lst_epch_val_loss = validation_loss

            self._handle_prints(epoch, batch, n_batches, train_loss,
                                validation_loss)
Example #6
0
    def fit(self,
            x,
            y,
            batch_size=1,
            epochs=1,
            optimizer='sgd',
            loss='mse',
            learning_rate=.01,
            l2=0,
            early_stopping=True):

        if len(y.shape) == 1:
            y = np.reshape(y, y.shape + (1, ))
        if not self.setup:
            self._setup_layers(Tensor(x))
        self.l2 = l2
        optimizer = OPTIMIZERS[optimizer](self._build_loss_function(loss),
                                          learning_rate)
        optimizer.optimize(self,
                           x,
                           y,
                           batch_size,
                           epochs,
                           early_stopping=early_stopping)
Example #7
0
 def predict(self, x):
     return [self.forward_pass(Tensor(i)) for i in x]
Example #8
0
 def setUp(self):
     self.A = Tensor(np.random.random((3, 5, 3)))
     self.B = self.operation(self.A)
Example #9
0
 def setUp(self):
     self.A = Tensor(np.random.random((7, )))
     self.n_rows = 5
     self.B = self.operation(self.A, self.n_rows)
Example #10
0
    def setUp(self):
        self.A = Tensor(np.random.random((2, 2, 3)))
        self.B = Tensor(np.random.random((2, 2, 3)))

        self.C = self.operation(self.A, self.B)
Example #11
0
    def test_constants_instantiation(self):
        """ Test tensor instantiation approaches """

        new_const = Tensor(self.data.tolist())
        # tensors can accept lists on instantiation and will internally convert the data to an ndarray
        self.assertTrue(np.array_equal(new_const.value, self.const.value))
Example #12
0
    def _eval_perf(self, x, y, model):
        n_evals = x.shape[0]
        y_hat = model.forward_pass(Tensor(x))

        loss = self.loss(Constant(y), y_hat).value[0]
        return loss / n_evals