def __init__(self):
        self.gamma = 0.9
        self.lr = 0.01
        self.epsilon = 0.1
        self.action_size = 4

        self.qnet = QNet()
        self.optimizer = optimizers.SGD(self.lr)
        self.optimizer.setup(self.qnet)
Esempio n. 2
0
    def test_MNIST(self):
        max_epoch = 5
        batch_size = 100
        hidden_size = 1000

        train_set = dezero.datasets.MNIST(train=True)
        test_set = dezero.datasets.MNIST(train=False)
        train_loader = DataLoader(train_set, batch_size)
        test_loader = DataLoader(test_set, batch_size, shuffle=False)

        #model = MLP((hidden_size, 10))
        model = MLP((hidden_size, hidden_size, 10), activation=F.relu)
        optimizer = optimizers.SGD().setup(model)

        if dezero.cuda.gpu_enable:
            train_loader.to_gpu()
            model.to_gpu()

        for epoch in range(max_epoch):
            sum_loss, sum_acc = 0, 0
            for x, t in train_loader:
                y = model(x)
                loss = F.softmax_cross_entropy(y, t)
                acc = F.accuracy(y, t)
                model.cleargrads()
                loss.backward()
                optimizer.update()

                sum_loss += float(loss.data) * len(t)
                sum_acc += float(acc.data) * len(t)

            print('epoch: {}'.format(epoch + 1))
            print('train loss: {:.4f}, accuracy: {:.4f}'.format(
                sum_loss / len(train_set), sum_acc / len(train_set)))

            sum_loss, sum_acc = 0, 0
            with dezero.no_grad():
                for x, t in test_loader:
                    y = model(x)
                    loss = F.softmax_cross_entropy(y, t)
                    acc = F.accuracy(y, t)
                    sum_loss += float(loss.data) * len(t)
                    sum_acc += float(acc.data) * len(t)

                print('test loss: {:.4f}, accuracy: {:.4f}'.format(
                    sum_loss / len(test_set), sum_acc / len(test_set)))
    def test_SDG(self):
        x = np.random.rand(100, 1)
        y = np.sin(2 * np.pi * x) + np.random.rand(100, 1)

        hidden_size = 10

        model = MLP((hidden_size, 1))
        optimizer = optimizers.SGD(lr)
        optimizer.setup(model)
        for i in range(max_iters):
            y_pred = model(x)
            loss = F.mean_squared_error(y, y_pred)

            model.cleargrads()
            loss.backward()
            optimizer.update()
        assert True
Esempio n. 4
0
    def test_SoftmaxCrossEntorpy(self):
        max_epoch = 0
        batch_size = 30
        hidden_size = 10
        lr = 1.0

        train_set = Spiral(train=True)
        test_set = Spiral(train=False)
        train_loader = DataLoader(train_set, batch_size)
        test_loader = DataLoader(test_set, batch_size, shuffle=False)

        model = MLP((hidden_size, hidden_size, hidden_size, 3))
        optimizer = optimizers.SGD(lr).setup(model)

        for epoch in range(max_epoch):
            sum_loss, sum_acc = 0, 0

            for x, t in train_loader:
                y = model(x)
                loss = F.softmax_cross_entropy(y, t)
                acc = F.accuracy(y, t)
                model.cleargrads()
                loss.backward()

                optimizer.update()

                sum_loss += float(loss.data) * len(t)
                sum_acc += float(acc.data) * len(t)

            print('epoch: {}'.format(epoch + 1))
            print('train loss: {:.4f}, accuracy: {:.4f}'.format(
                sum_loss / len(train_set), sum_acc / len(train_set)))

            sum_loss, sum_acc = 0, 0
            with dezero.no_grad():
                for x, t in test_loader:
                    y = model(x)
                    loss = F.softmax_cross_entropy(y, t)
                    acc = F.accuracy(y, t)
                    sum_loss += float(loss.data) * len(t)
                    sum_acc += float(acc.data) * len(t)

            print('test loss: {:.4f}, accuracy: {:.4f}'.format(
                sum_loss / len(test_set), sum_acc / len(test_set)))
Esempio n. 5
0
    def test_sgd(self):
        np.random.seed(0)
        x = np.random.rand(100, 1)
        y = np.sin(2 * np.pi * x) + np.random.rand(100, 1)

        lr = 0.2
        max_iter = 10000
        hidden_size = 10

        model = models.MLP((hidden_size, 1))
        optimizer = optimizers.SGD(lr)
        optimizer.setup(model)

        for i in range(max_iter):
            y_pred = model(x)
            loss = F.mean_squared_error(y, y_pred)

            model.cleargrads()
            loss.backward()

            optimizer.update()
            if i % 1000 == 0:
                print(loss)
Esempio n. 6
0
import matplotlib.pyplot as plt
import numpy as np

import dezero
import dezero.functions as F
from dezero import optimizers
from dezero.models import MLP

max_epoch = 300
batch_size = 30
hidden_size = 10
lr = 1.0

x, t = dezero.datasets.get_spiral(train=True)
model = MLP((hidden_size, 3))
optimizer = optimizers.SGD(lr).setup(model)

data_size = len(x)
max_iter = math.ceil(data_size / batch_size)

for epoch in range(max_epoch):
    index = np.random.permutation(data_size)
    sum_loss = 0

    for i in range(max_iter):
        batch_index = index[i * batch_size:(i + 1) * batch_size]
        batch_x = x[batch_index]
        batch_t = t[batch_index]

        y = model(batch_x)
        loss = F.softmax_cross_entropy(y, batch_t)
import dezero.functions as F
from dezero import Variable, optimizers
from dezero.models import MLP

np.random.rand(0)
x = np.random.rand(100, 1)
y = np.sin(2 * np.pi * x) + np.random.rand(100, 1)
x, y = Variable(x), Variable(y)

lr = 0.2
iters = 10000

hidden_size = 10
model = MLP((hidden_size, 1))
optimizer = optimizers.SGD(lr)
optimizer.setup(model)

for i in range(iters):
    y_pred = model(x)
    loss = F.mean_squared_error(y, y_pred)

    model.clear_grads()
    loss.backward()

    optimizer.update()
    if i % 1000 == 0:
        print(loss)

# Plot
plt.scatter(x.data, y.data, s=10)