# train_set = my_framework.datasets.MNIST(train=True, transform=f) # test_set = my_framework.datasets.MNIST(train=False, transform=f) max_epoch = 200 batch_size = 100 hidden_size = 1000 bit_size = 1 train_set = my_framework.datasets.MNIST(train=True) test_set = my_framework.datasets.MNIST(train=False) train_loader = DataLoader(train_set, batch_size) test_loader = DataLoader(test_set, batch_size, shuffle=False) model = MLP((hidden_size, 10)) optimizer = optimizers.SGD().setup(model) train_acc = np.zeros(max_epoch) test_acc = np.zeros(max_epoch) train_loss = np.zeros(max_epoch) test_loss = np.zeros(max_epoch) for epoch in range(max_epoch): sum_loss, sum_acc = 0, 0 for x, t in train_loader: y = model(x) loss = F.softmax_cross_entropy_simple(y, t) acc = F.accuracy(y, t) model.cleargrads() loss.backward()
sys.path.append(os.path.join(os.path.dirname(__file__), '..')) import numpy as np from my_framework import Variable, Model, optimizers import my_framework.functions as F import my_framework.layers as L from my_framework.models import MLP np.random.seed(0) x = np.random.rand(100, 1) y = np.sin(2 * np.pi * x) + np.random.rand(100, 1) lr = 0.2 iters = 10000 hidden_size = 10 model = MLP((hidden_size, 1)) optimizer = optimizers.SGD(lr) optimizer.setup(model) for i in range(iters): y_pred = model(x) loss = F.mean_squared_error(y, y_pred) model.cleargrads() loss.backward() optimizer.update() if i % 1000 == 0: print(loss)