# t = np.array([1, 2, 0]) # acc = F.accuracy(y, t) # print(acc) max_epoch = 300 batch_size = 30 hidden_size = 10 lr = 1.0 train_set = myPackage.datasets.Spiral(train=True) test_set = myPackage.datasets.Spiral(train=False) train_loader = DataLoader(train_set, batch_size) test_loader = DataLoader(test_set, batch_size, shuffle=False) model = MLP((hidden_size, 3)) optimizer = optimizers.SGD(lr).setup(model) train_loss_list = [] test_loss_list = [] train_acc_list = [] test_acc_list = [] for epoch in range(max_epoch): sum_loss, sum_acc = 0, 0 for x, t in train_loader: y = model(x) loss = F.softmax_cross_entropy(y, t) acc = F.accuracy(y, t) model.cleargrads() loss.backward()
sys.path.append(os.path.join(os.path.dirname(__file__), '..')) import numpy as np from myPackage import Variable from myPackage import optimizers import myPackage.functions as F from myPackage.models import MLP np.random.seed(0) x = np.random.rand(100, 1) y = np.sin(2 * np.pi * x) + np.random.rand(100, 1) lr = 0.2 max_iter = 10000 hidden_size = 10 model = MLP((hidden_size, 1)) optimizer = optimizers.SGD(lr) optimizer.setup(model) # optimizer = optimizers.SGD(lr).setup(model) for i in range(max_iter): y_pred = model(x) loss = F.mean_squared_error(y, y_pred) model.cleargrads() loss.backward() optimizer.update() if i % 1000 == 0: print(loss)