def show_digits(epoch=0):
    """Display a 2D manifold of the digits"""
    n = 15  # 15x15 digits
    digit_size = 28
    figure = np.zeros((digit_size * n, digit_size * n))
    grid_x = norm.ppf(np.linspace(0.05, 0.95, n))
    grid_y = norm.ppf(np.linspace(0.05, 0.95, n))

    for i, yi in enumerate(grid_x):
        for j, xi in enumerate(grid_y):
            z_sample = np.array([[xi, yi]])
            if use_gpu:
                z_sample = dezero.cuda.as_cupy(z_sample)
            with dezero.no_grad():
                x_decoded = vae.decoder(z_sample)
            if use_gpu:
                x_decoded.data = dezero.cuda.as_numpy(x_decoded.data)
            digit = x_decoded.data.reshape(digit_size, digit_size)
            figure[i * digit_size: (i + 1) * digit_size,
            j * digit_size: (j + 1) * digit_size] = digit

    plt.figure(figsize=(10, 10))
    plt.axis('off')
    plt.imshow(figure, cmap='Greys_r')
    plt.show()
示例#2
0
    def test_config(self):
        # 学習時
        self.assertTrue(Config.enable_backprop)
        x = Variable(np.ones((100, 100, 100)))
        y = square(square(square(x)))
        y.backward()
        with no_grad():
            x = Variable(np.array(2.0))
            y = square(x)
            self.assertFalse(Config.enable_backprop)

        self.assertTrue(Config.enable_backprop)
示例#3
0
    def test_MNIST(self):
        max_epoch = 5
        batch_size = 100
        hidden_size = 1000

        train_set = dezero.datasets.MNIST(train=True)
        test_set = dezero.datasets.MNIST(train=False)
        train_loader = DataLoader(train_set, batch_size)
        test_loader = DataLoader(test_set, batch_size, shuffle=False)

        #model = MLP((hidden_size, 10))
        model = MLP((hidden_size, hidden_size, 10), activation=F.relu)
        optimizer = optimizers.SGD().setup(model)

        if dezero.cuda.gpu_enable:
            train_loader.to_gpu()
            model.to_gpu()

        for epoch in range(max_epoch):
            sum_loss, sum_acc = 0, 0
            for x, t in train_loader:
                y = model(x)
                loss = F.softmax_cross_entropy(y, t)
                acc = F.accuracy(y, t)
                model.cleargrads()
                loss.backward()
                optimizer.update()

                sum_loss += float(loss.data) * len(t)
                sum_acc += float(acc.data) * len(t)

            print('epoch: {}'.format(epoch + 1))
            print('train loss: {:.4f}, accuracy: {:.4f}'.format(
                sum_loss / len(train_set), sum_acc / len(train_set)))

            sum_loss, sum_acc = 0, 0
            with dezero.no_grad():
                for x, t in test_loader:
                    y = model(x)
                    loss = F.softmax_cross_entropy(y, t)
                    acc = F.accuracy(y, t)
                    sum_loss += float(loss.data) * len(t)
                    sum_acc += float(acc.data) * len(t)

                print('test loss: {:.4f}, accuracy: {:.4f}'.format(
                    sum_loss / len(test_set), sum_acc / len(test_set)))
示例#4
0
    def test_SoftmaxCrossEntorpy(self):
        max_epoch = 0
        batch_size = 30
        hidden_size = 10
        lr = 1.0

        train_set = Spiral(train=True)
        test_set = Spiral(train=False)
        train_loader = DataLoader(train_set, batch_size)
        test_loader = DataLoader(test_set, batch_size, shuffle=False)

        model = MLP((hidden_size, hidden_size, hidden_size, 3))
        optimizer = optimizers.SGD(lr).setup(model)

        for epoch in range(max_epoch):
            sum_loss, sum_acc = 0, 0

            for x, t in train_loader:
                y = model(x)
                loss = F.softmax_cross_entropy(y, t)
                acc = F.accuracy(y, t)
                model.cleargrads()
                loss.backward()

                optimizer.update()

                sum_loss += float(loss.data) * len(t)
                sum_acc += float(acc.data) * len(t)

            print('epoch: {}'.format(epoch + 1))
            print('train loss: {:.4f}, accuracy: {:.4f}'.format(
                sum_loss / len(train_set), sum_acc / len(train_set)))

            sum_loss, sum_acc = 0, 0
            with dezero.no_grad():
                for x, t in test_loader:
                    y = model(x)
                    loss = F.softmax_cross_entropy(y, t)
                    acc = F.accuracy(y, t)
                    sum_loss += float(loss.data) * len(t)
                    sum_acc += float(acc.data) * len(t)

            print('test loss: {:.4f}, accuracy: {:.4f}'.format(
                sum_loss / len(test_set), sum_acc / len(test_set)))
示例#5
0
        model.clear_grads()
        loss.backward()
        optimizer.update()

        sum_loss += float(loss.data) * len(batch_t)

    avg_loss = sum_loss / data_size
    print('epoch %d, loss %.2f' % (epoch + 1, avg_loss))

# Plot boundary area the model predict
h = 0.001
x_min, x_max = x[:, 0].min() - .1, x[:, 0].max() + .1
y_min, y_max = x[:, 1].min() - .1, x[:, 1].max() + .1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
X = np.c_[xx.ravel(), yy.ravel()]

with dezero.no_grad():
    score = model(X)
predict_cls = np.argmax(score.data, axis=1)
Z = predict_cls.reshape(xx.shape)
plt.contourf(xx, yy, Z)

# Plot data points of the dataset
N, CLS_NUM = 100, 3
markers = ['o', 'x', '^']
colors = ['orange', 'blue', 'green']
for i in range(len(x)):
    c = t[i]
    plt.scatter(x[i][0], x[i][1], s=40, marker=markers[c], c=colors[c])
plt.show()
for epoch in range(max_epoch):
    sum_loss, sum_acc = 0, 0

    for x, t in train_loader:
        y = model(x)
        loss = F.softmax_cross_entropy(y, t)
        acc = F.accuracy(y, t)
        model.cleargrads()
        loss.backward()
        optimizer.update()

        sum_loss += float(loss.data) * len(t)
        sum_acc += float(acc.data) * len(t)

    print("epoch: {}".format(epoch + 1))
    print("train loss: {:.4f}, accuracy: {:.4f}".format(
        sum_loss / len(train_set), sum_acc / len(train_set)))

    sum_loss, sum_acc = 0, 0
    with dezero.no_grad():  # 기울기 불필요 모드
        for x, t in test_loader:  # 테스트용 미니배치 데이터
            y = model(x)
            loss = F.softmax_cross_entropy(y, t)
            acc = F.accuracy(y, t)  # 테스트 데이터의 인식 정확도
            sum_loss += float(loss.data) * len(t)
            sum_acc += float(acc.data) * len(t)

    print("test loss: {:.4f}, accuracy: {:.4f}".format(
        sum_loss / len(test_set), sum_acc / len(test_set)))
示例#7
0
 def test_no_grad(self):
     with no_grad():
         assert not Config.enable_backprop
示例#8
0
for epoch in range(max_epoch):
    sum_loss, sum_acc = 0, 0

    for x, t in train_loader:
        y = model(x)
        loss = F.softmax_cross_entropy(y, t)
        acc = F.accuracy(y, t)
        model.cleargrads()
        loss.backward()
        optimizer.update()

        sum_loss += float(loss.data) * len(t)
        sum_acc += float(acc.data) * len(t)

    print('epoch: {}'.format(epoch + 1))
    print('train loss: {:.4f}, accuracy: {:.4f}'.format(
        sum_loss / len(train_set), sum_acc / len(train_set)))

    sum_loss, sum_acc = 0, 0
    with dezero.no_grad():  # when gradients are not needed
        for x, t in test_loader:
            y = model(x)
            loss = F.softmax_cross_entropy(y, t)
            acc = F.accuracy(y, t)
            sum_loss += float(loss.data) * len(t)
            sum_acc += float(acc.data) * len(t)

    print('test loss: {:.4f}, accuracy: {:.4f}'.format(
        sum_loss / len(test_set), sum_acc / len(test_set)))