Пример #1
0
    def step(self, x_batch, y_batch):

        drelu = lambda x: kp.relu(x) / (kp.relu(x) + kp.arr(0.000000000001))

        s1 = self.W1 @ x_batch
        s2 = s1 + self.b
        s3 = kp.relu(s2)
        s4 = self.W2 @ s3
        s5 = kp.softmax(s4.reshape([self.batch_size, self.o
                                    ])).reshape([self.batch_size, self.o, 1])
        loss = (s5 - y_batch)
        x_entropy = (-y_batch * kp.log(s5)).mean()

        dW2 = loss @ s3.reshape([self.batch_size, 1, self.h])

        r1 = loss.reshape([self.batch_size, 1, self.o]) @ self.W2
        r2 = r1.reshape([self.batch_size, self.h, 1]) * drelu(s2)

        db = r2

        dW1 = r2 @ x_batch.reshape([self.batch_size, 1, 784])

        self.W1 -= self.learning_rate * dW1.mean(0)
        self.b -= self.learning_rate * db.mean(0)
        self.W2 -= self.learning_rate * dW2.mean(0)
        return x_entropy.mean()
Пример #2
0
    def accuracy(self, x_test, y_test):

        predictions = kp.softmax(
            self.W2
            @ kp.relu(self.W1 @ x_test.reshape([-1, self.i, 1]) + self.b), -2)

        success = predictions.numpy().argmax(-2) == y_test.argmax(-2)
        return success.mean()
Пример #3
0
    def test_relu(self):
        print('test_relu')

        for k in range(nb_random_checks):
            shape = rand_shape()

            ka = kp.arr('rand', shape=shape)
            na = ka.numpy()

            np.testing.assert_almost_equal(
                kp.relu(ka).numpy(), 
                na * (na > 0),
                err_msg=f"shape = {shape}"
            )
Пример #4
0
def f(x):
    x = kp.relu(W1 @ x + b)
    return kp.softmax(W2 @ x)
Пример #5
0
def f(x):
    return kp.relu(x + x)
Пример #6
0
def f(x):
    x = x @ ((d + v - w) * y / z)
    return kp.relu(-x)
Пример #7
0
def f(x, p):
    y = kp.relu(W1 @ x + b)
    y = kp.softmax(W2 @ x + p + y)
    return y