Exemplo n.º 1
0
    def test_MNIST(self):
        max_epoch = 5
        batch_size = 100
        hidden_size = 1000

        train_set = dezero.datasets.MNIST(train=True)
        test_set = dezero.datasets.MNIST(train=False)
        train_loader = DataLoader(train_set, batch_size)
        test_loader = DataLoader(test_set, batch_size, shuffle=False)

        #model = MLP((hidden_size, 10))
        model = MLP((hidden_size, hidden_size, 10), activation=F.relu)
        optimizer = optimizers.SGD().setup(model)

        if dezero.cuda.gpu_enable:
            train_loader.to_gpu()
            model.to_gpu()

        for epoch in range(max_epoch):
            sum_loss, sum_acc = 0, 0
            for x, t in train_loader:
                y = model(x)
                loss = F.softmax_cross_entropy(y, t)
                acc = F.accuracy(y, t)
                model.cleargrads()
                loss.backward()
                optimizer.update()

                sum_loss += float(loss.data) * len(t)
                sum_acc += float(acc.data) * len(t)

            print('epoch: {}'.format(epoch + 1))
            print('train loss: {:.4f}, accuracy: {:.4f}'.format(
                sum_loss / len(train_set), sum_acc / len(train_set)))

            sum_loss, sum_acc = 0, 0
            with dezero.no_grad():
                for x, t in test_loader:
                    y = model(x)
                    loss = F.softmax_cross_entropy(y, t)
                    acc = F.accuracy(y, t)
                    sum_loss += float(loss.data) * len(t)
                    sum_acc += float(acc.data) * len(t)

                print('test loss: {:.4f}, accuracy: {:.4f}'.format(
                    sum_loss / len(test_set), sum_acc / len(test_set)))
Exemplo n.º 2
0
    def test_SoftmaxCrossEntorpy(self):
        max_epoch = 0
        batch_size = 30
        hidden_size = 10
        lr = 1.0

        train_set = Spiral(train=True)
        test_set = Spiral(train=False)
        train_loader = DataLoader(train_set, batch_size)
        test_loader = DataLoader(test_set, batch_size, shuffle=False)

        model = MLP((hidden_size, hidden_size, hidden_size, 3))
        optimizer = optimizers.SGD(lr).setup(model)

        for epoch in range(max_epoch):
            sum_loss, sum_acc = 0, 0

            for x, t in train_loader:
                y = model(x)
                loss = F.softmax_cross_entropy(y, t)
                acc = F.accuracy(y, t)
                model.cleargrads()
                loss.backward()

                optimizer.update()

                sum_loss += float(loss.data) * len(t)
                sum_acc += float(acc.data) * len(t)

            print('epoch: {}'.format(epoch + 1))
            print('train loss: {:.4f}, accuracy: {:.4f}'.format(
                sum_loss / len(train_set), sum_acc / len(train_set)))

            sum_loss, sum_acc = 0, 0
            with dezero.no_grad():
                for x, t in test_loader:
                    y = model(x)
                    loss = F.softmax_cross_entropy(y, t)
                    acc = F.accuracy(y, t)
                    sum_loss += float(loss.data) * len(t)
                    sum_acc += float(acc.data) * len(t)

            print('test loss: {:.4f}, accuracy: {:.4f}'.format(
                sum_loss / len(test_set), sum_acc / len(test_set)))
import dezero.functions as F

from dezero import DataLoader
from dezero.models import MLP
from dezero.optimizers import SGD

# MNIST 데이터 학습하기

max_epoch = 5
batch_size = 100
hidden_size = 1000

# /root/.dezero/t10k-images-idx3-ubyte.gz 경로에 있음
train_set = dezero.datasets.MNIST(train=True)
test_set = dezero.datasets.MNIST(train=False)
train_loader = DataLoader(train_set, batch_size)
test_loader = DataLoader(test_set, batch_size, shuffle=False)

# model = MLP((hidden_size, 10))
model = MLP((hidden_size, hidden_size, 10), activation=F.relu)
optimizer = SGD().setup(model)

for epoch in range(max_epoch):
    sum_loss, sum_acc = 0, 0

    for x, t in train_loader:
        y = model(x)
        loss = F.softmax_cross_entropy(y, t)
        acc = F.accuracy(y, t)
        model.cleargrads()
        loss.backward()
Exemplo n.º 4
0
    dis(fake_images)

    for l in dis.layers + gen.layers:
        classname = l.__class__.__name__
        if classname.lower() in ('conv2d', 'linear', 'deconv2d'):
            l.W.data = 0.02 * np.random.randn(*l.W.data.shape)


init_weight(dis, gen, hidden_size)

opt_g = Adam(alpha=0.0002, beta1=0.5).setup(gen)
opt_d = Adam(alpha=0.0002, beta1=0.5).setup(dis)

transform = lambda x: (x / 255.0).astype(np.float32)
train_set = dezero.datasets.MNIST(train=True, transform=transform)
train_loader = DataLoader(train_set, batch_size)

if use_gpu:
    gen.to_gpu()
    dis.to_gpu()
    train_loader.to_gpu()
    xp = dezero.cuda.cupy
else:
    xp = np

label_real = xp.ones(batch_size).astype(np.int)
label_fake = xp.zeros(batch_size).astype(np.int)
test_z = xp.random.randn(25, hidden_size).astype(np.float32)


def generate_image():
if '__file__' in globals():
    import os, sys
    sys.path.append(os.path.join(os.path.dirname(__file__), '..'))

import os
import dezero
import dezero.functions as F
from dezero import optimizers
from dezero import DataLoader
from dezero.models import MLP

max_epoch = 3
batch_size = 100

train_set = dezero.datasets.MNIST(train=True)
train_loader = DataLoader(train_set, batch_size)
model = MLP((1000, 10))
optimizer = optimizers.SGD().setup(model)

# read params if exist
if os.path.exists('my_nlp.npz'):
    model.load_weights('my_nlp.npz')

else:
    for epoch in range(max_epoch):
        sum_loss = 0

        for x, t in train_loader:
            y = model(x)
            loss = F.softmax_cross_entropy(y, t)
            acc = F.accuracy(y, t)