Ejemplo n.º 1
0
import dezero
from dezero import Model
import dezero.functions as F
import dezero.layers as L
from dezero import optimizers
from dezero.dataset import DatasetLoader
from dezero.models import TwoLayerNet

max_epoch = 20
batch_size = 100
hidden_size = 1000

train_set, test_set = dezero.datasets.get_mnist()
train_loader = DatasetLoader(train_set, batch_size)
test_loader = DatasetLoader(test_set, batch_size, shuffle=False)


class TwoLayerNet(Model):
    def __init__(self, in_size, hidden_size, out_size, activation=F.sigmoid):
        super().__init__()
        self.f = activation
        self.l1 = L.Linear(in_size, hidden_size)
        self.l2 = L.Linear(hidden_size, out_size)

    def __call__(self, x):
        y = self.f(self.l1(x))
        y = self.l2(y)
        return y


model = TwoLayerNet(784, hidden_size, 10)
import time
import dezero
import dezero.functions as F
from dezero import optimizers
from dezero.dataset import DatasetLoader
from dezero.models import TwoLayerNet

max_epoch = 3
batch_size = 100

train_set, test_set = dezero.datasets.get_mnist()
train_loader = DatasetLoader(train_set, batch_size)
model = TwoLayerNet(784, 1000, 10)
optimizer = optimizers.SGD().setup(model)

# GPU mode
train_loader.to_gpu()
model.to_gpu()

for epoch in range(max_epoch):
    start = time.time()
    sum_loss = 0

    for x, t in train_loader:
        y = model(x)
        loss = F.softmax_cross_entropy(y, t)
        model.cleargrads()
        loss.backward()
        optimizer.update()
        sum_loss += float(loss.data) * len(t)
Ejemplo n.º 3
0
import dezero
import dezero.functions as F
from dezero import optimizers
from dezero.dataset import DatasetLoader
from dezero.models import TwoLayerNet

max_epoch = 3
batch_size = 100

train_set, test_set = dezero.datasets.get_mnist()
train_loader = DatasetLoader(train_set, batch_size)
model = TwoLayerNet(784, 1000, 10)
optimizer = optimizers.SGD().setup(model)

# パラーメタの読み込み
model.load_weights('two_layer_net.npz')

for epoch in range(max_epoch):
    sum_loss = 0

    for x, t in train_loader:
        y = model(x)
        loss = F.softmax_cross_entropy(y, t)
        model.cleargrads()
        loss.backward()
        optimizer.update()
        sum_loss += float(loss.data) * len(t)

    print('epoch: {}, loss: {:.4f}'.format(epoch + 1,
                                           sum_loss / len(train_set)))
Ejemplo n.º 4
0
from dezero.datasets import get_mnist
from dezero.dataset import DatasetLoader

batch_size = 10
max_epoch = 1

train, test = get_mnist()
train_loader = DatasetLoader(train, batch_size)
test_loader = DatasetLoader(test, batch_size, shuffle=False)

for epoch in range(max_epoch):
    for x, t in train_loader:
        print(x.shape, t.shape)
        break

    for x, t in test_loader:
        print(x.shape, t.shape)
        break


def preprocess(x):
    x = x.reshape(1, 28, 28)  # Reshape
    x *= 255.0  # Rescaling
    return x


train_loader = DatasetLoader(train, batch_size, preprocess=preprocess)
x, t = train_loader.__next__()
print(x.shape, t.shape)