コード例 #1
0
max_epoch = 3
batch_size = 100

train_set = dezero.datasets.MNIST(train=True)
train_loader = DataLoader(train_set, batch_size)
model = MLP((1000, 10))
optimizer = optimizers.SGD().setup(model)

# read params if exist
if os.path.exists('my_nlp.npz'):
    model.load_weights('my_nlp.npz')

else:
    for epoch in range(max_epoch):
        sum_loss = 0

        for x, t in train_loader:
            y = model(x)
            loss = F.softmax_cross_entropy(y, t)
            acc = F.accuracy(y, t)
            model.cleargrads()
            loss.backward()
            optimizer.update()
            sum_loss += float(loss.data) * len(t)

        print('epoch: {}, loss: {:.4f}'.format(epoch + 1,
                                               sum_loss / len(train_set)))

    # save params
    model.save_weights('my_nlp.npz')
コード例 #2
0
model = MLP((1000, 10))
optimizer = optimizers.SGD().setup(model)

# GPU mode
if dezero.cuda.gpu_enable:
    train_loader.to_gpu()
    model.to_gpu()

# 매개변수 읽기
if os.path.exists("my_mlp.npz"):
    model.load_weights("my_mlp.npz")

for epoch in range(max_epoch):
    start = time.time()
    sum_loss = 0

    for x, t in train_loader:
        y = model(x)
        loss = F.softmax_cross_entropy(y, t)
        model.cleargrads()
        loss.backward()
        optimizer.update()
        sum_loss += float(loss.data) * len(t)

    elapsed_time = time.time() - start
    print("epoch: {}, loss: {:.4f}, time: {:.4f}[sec]".format(
        epoch + 1, sum_loss / len(train_set), elapsed_time))

# 매개변수 저장하기
model.save_weights("my_mlp.npz")