def test_compare1(self):
        rate = 0.4
        x = np.random.rand(10, 2)
        t = np.zeros((10)).astype(int)
        layer = dezero.layers.Linear(2, 3, nobias=True)
        layer.W.data = np.ones_like(layer.W.data)

        optimizer = dezero.optimizers.SGD().setup(layer)
        optimizer.add_hook(dezero.optimizers.WeightDecay(rate=rate))

        layer.cleargrads()
        y = layer(x)
        y = F.softmax_cross_entropy(y, t)
        y.backward()
        optimizer.update()
        W0 = layer.W.data.copy()

        layer.W.data = np.ones_like(layer.W.data)
        optimizer.hooks.clear()
        layer.cleargrads()
        y = layer(x)
        y = F.softmax_cross_entropy(y, t) + rate / 2 * (layer.W**2).sum()
        y.backward()
        optimizer.update()
        W1 = layer.W.data
        self.assertTrue(array_allclose(W0, W1))
 def test_forward1(self):
     x = np.array([[-1, 0, 1, 2], [2, 0, 1, -1]], np.float32)
     t = np.array([3, 0]).astype(np.int32)
     y = F.softmax_cross_entropy(x, t)
     y2 = CF.softmax_cross_entropy(x, t)
     res = np.allclose(y.data, y2.data)
     self.assertTrue(res)
Пример #3
0
    def test_MNIST(self):
        max_epoch = 5
        batch_size = 100
        hidden_size = 1000

        train_set = dezero.datasets.MNIST(train=True)
        test_set = dezero.datasets.MNIST(train=False)
        train_loader = DataLoader(train_set, batch_size)
        test_loader = DataLoader(test_set, batch_size, shuffle=False)

        #model = MLP((hidden_size, 10))
        model = MLP((hidden_size, hidden_size, 10), activation=F.relu)
        optimizer = optimizers.SGD().setup(model)

        if dezero.cuda.gpu_enable:
            train_loader.to_gpu()
            model.to_gpu()

        for epoch in range(max_epoch):
            sum_loss, sum_acc = 0, 0
            for x, t in train_loader:
                y = model(x)
                loss = F.softmax_cross_entropy(y, t)
                acc = F.accuracy(y, t)
                model.cleargrads()
                loss.backward()
                optimizer.update()

                sum_loss += float(loss.data) * len(t)
                sum_acc += float(acc.data) * len(t)

            print('epoch: {}'.format(epoch + 1))
            print('train loss: {:.4f}, accuracy: {:.4f}'.format(
                sum_loss / len(train_set), sum_acc / len(train_set)))

            sum_loss, sum_acc = 0, 0
            with dezero.no_grad():
                for x, t in test_loader:
                    y = model(x)
                    loss = F.softmax_cross_entropy(y, t)
                    acc = F.accuracy(y, t)
                    sum_loss += float(loss.data) * len(t)
                    sum_acc += float(acc.data) * len(t)

                print('test loss: {:.4f}, accuracy: {:.4f}'.format(
                    sum_loss / len(test_set), sum_acc / len(test_set)))
Пример #4
0
    def test_softmax_cross_entropy(self):
        x = Variable(np.array([[1.0, 1.0, 1.0], [3.0, 2.0, 1.0]]))
        t = Variable(np.array([1, 0]))

        y = softmax_cross_entropy(x, t)
        p_1 = np.log(np.exp(1.0) / (3 * np.exp(1.0)))
        p_2 = np.log(np.exp(3.0) / (np.exp(3.0) + np.exp(2.0) + np.exp(1.0)))
        expected_ans = -(p_1 + p_2) / 2
        assert_array_equal(y.data, np.array(expected_ans))
Пример #5
0
    def test_SoftmaxCrossEntorpy(self):
        max_epoch = 0
        batch_size = 30
        hidden_size = 10
        lr = 1.0

        train_set = Spiral(train=True)
        test_set = Spiral(train=False)
        train_loader = DataLoader(train_set, batch_size)
        test_loader = DataLoader(test_set, batch_size, shuffle=False)

        model = MLP((hidden_size, hidden_size, hidden_size, 3))
        optimizer = optimizers.SGD(lr).setup(model)

        for epoch in range(max_epoch):
            sum_loss, sum_acc = 0, 0

            for x, t in train_loader:
                y = model(x)
                loss = F.softmax_cross_entropy(y, t)
                acc = F.accuracy(y, t)
                model.cleargrads()
                loss.backward()

                optimizer.update()

                sum_loss += float(loss.data) * len(t)
                sum_acc += float(acc.data) * len(t)

            print('epoch: {}'.format(epoch + 1))
            print('train loss: {:.4f}, accuracy: {:.4f}'.format(
                sum_loss / len(train_set), sum_acc / len(train_set)))

            sum_loss, sum_acc = 0, 0
            with dezero.no_grad():
                for x, t in test_loader:
                    y = model(x)
                    loss = F.softmax_cross_entropy(y, t)
                    acc = F.accuracy(y, t)
                    sum_loss += float(loss.data) * len(t)
                    sum_acc += float(acc.data) * len(t)

            print('test loss: {:.4f}, accuracy: {:.4f}'.format(
                sum_loss / len(test_set), sum_acc / len(test_set)))
Пример #6
0
 def test_forward(self):
     x = Variable(
         np.array([[0.2, -0.4], [0.3, 0.5], [1.3, -3.2], [2.1, 0.3]]))
     t = np.array([1, 0, 1, 0])
     y = F.softmax_cross_entropy(x, t)
     y_expected = np.exp(x.data) / \
         np.exp(x.data).sum(axis=1, keepdims=True)
     y_expected = -np.log(y_expected)
     y_expected = y_expected[np.arange(4), t].sum() / 4
     assert_almost_equal(y.data, y_expected, decimal=5)
Пример #7
0
    def test_backward(self):
        x = Variable(
            np.array([[0.2, -0.4], [0.3, 0.5], [1., -3.2], [2.1, 0.3]]))
        t = np.array([1, 0, 1, 0])
        t_onehot = np.eye(2)[t]
        L = F.softmax_cross_entropy(x, t)
        L.backward()
        m = len(x.data)

        y = np.exp(x.data) / np.exp(x.data).sum(axis=1, keepdims=True)
        gx_expected = 1 / m * (y - t_onehot)
        assert_almost_equal(x.grad.data, gx_expected)
Пример #8
0
optimizer = optimizers.SGD(lr).setup(model)

data_size = len(x)
max_iter = math.ceil(data_size / batch_size)

for epoch in range(max_epoch):
    index = np.random.permutation(data_size)
    sum_loss = 0

    for i in range(max_iter):
        batch_index = index[i * batch_size:(i + 1) * batch_size]
        batch_x = x[batch_index]
        batch_t = t[batch_index]

        y = model(batch_x)
        loss = F.softmax_cross_entropy(y, batch_t)
        model.clear_grads()
        loss.backward()
        optimizer.update()

        sum_loss += float(loss.data) * len(batch_t)

    avg_loss = sum_loss / data_size
    print('epoch %d, loss %.2f' % (epoch + 1, avg_loss))

# Plot boundary area the model predict
h = 0.001
x_min, x_max = x[:, 0].min() - .1, x[:, 0].max() + .1
y_min, y_max = x[:, 1].min() - .1, x[:, 1].max() + .1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
X = np.c_[xx.ravel(), yy.ravel()]
# /root/.dezero/t10k-images-idx3-ubyte.gz 경로에 있음
train_set = dezero.datasets.MNIST(train=True)
test_set = dezero.datasets.MNIST(train=False)
train_loader = DataLoader(train_set, batch_size)
test_loader = DataLoader(test_set, batch_size, shuffle=False)

# model = MLP((hidden_size, 10))
model = MLP((hidden_size, hidden_size, 10), activation=F.relu)
optimizer = SGD().setup(model)

for epoch in range(max_epoch):
    sum_loss, sum_acc = 0, 0

    for x, t in train_loader:
        y = model(x)
        loss = F.softmax_cross_entropy(y, t)
        acc = F.accuracy(y, t)
        model.cleargrads()
        loss.backward()
        optimizer.update()

        sum_loss += float(loss.data) * len(t)
        sum_acc += float(acc.data) * len(t)

    print("epoch: {}".format(epoch + 1))
    print("train loss: {:.4f}, accuracy: {:.4f}".format(
        sum_loss / len(train_set), sum_acc / len(train_set)))

    sum_loss, sum_acc = 0, 0
    with dezero.no_grad():  # 기울기 불필요 모드
        for x, t in test_loader:  # 테스트용 미니배치 데이터
 def test_backward3(self):
     N, CLS_NUM = 100, 10
     x = np.random.randn(N, CLS_NUM)
     t = np.random.randint(0, CLS_NUM, (N, ))
     f = lambda x: F.softmax_cross_entropy(x, t)
     self.assertTrue(gradient_check(f, x))
 def test_backward1(self):
     x = np.array([[-1, 0, 1, 2], [2, 0, 1, -1]], np.float32)
     t = np.array([3, 0]).astype(np.int32)
     f = lambda x: F.softmax_cross_entropy(x, Variable(t))
     self.assertTrue(gradient_check(f, x))
Пример #12
0
 def test_dont_divide_by_zero(self):
     x = np.array([[1., 0., 300000., 400000.]])
     t = np.array([1])
     L = F.softmax_cross_entropy(x, t)
     self.assertFalse(np.isnan(L.data) or np.isinf(L.data))
Пример #13
0
 def test_dont_overflow(self):
     x = Variable(np.array([[1.0, 100.0, 10000.0]]))
     t = np.array([0])
     L = F.softmax_cross_entropy(x, t)
     self.assertFalse(np.isnan(L.data) or np.isinf(L.data))