from jhML.tutorial.dataset import MNIST def get_accuracy(pred, gt): pred = jhML.as_cpu(pred) gt = jhML.as_cpu(gt) return np.sum((pred == gt).astype(float))/len(gt) net = nn.Sequential( nn.Linear(28*28, 28*28), nn.ReLU(), nn.Linear(28*28, 1000), nn.ReLU(), nn.Linear(1000, 1000), nn.ReLU(), nn.Linear(1000, 100), nn.ReLU(), nn.Linear(100, 10), nn.ReLU(), nn.Linear(10, MNIST.num_class) ) if __name__ == "__main__": using_gpu = True num_class = 10 dataset_mnist_train = MNIST(train=True, flatten=True) dataset_mnist_test = MNIST(train=False, flatten=True) train_dataloader = jhML.Dataloader(dataset_mnist_train, batch_size=256, gpu=using_gpu, drop_last=True) test_dataloader = jhML.Dataloader(dataset_mnist_test, batch_size=256, shuffle=False, gpu=using_gpu, drop_last=False)
[1, 1, 1], #7 ] gt = [ [0], [6], [4], [3], [2], [7], ] num_class = 8 net = nn.Sequential(nn.Linear(3, 4), nn.ReLU(), nn.Linear(4, 1000), nn.ReLU(), nn.Linear(1000, 800), nn.ReLU(), nn.Linear(800, 100), nn.ReLU(), nn.Linear(100, 8), nn.ReLU(), nn.Linear(8, num_class)).to_gpu() #### learning optim = optim.RMSprop(net.params(), lr=1e-4) num_epoch = int(1e+4) for epoch in range(num_epoch): data, label = jhML.Variable(x).to_gpu(), jhML.Variable(gt).to_gpu() optim.zero_grad() pred = net(data) loss = F.softmax_cross_entropy(pred, label) loss.backward() optim.step() if epoch % (num_epoch / 100) == 0: print("%d/%d" % (epoch, num_epoch))
[0, 1], [1, 0], [1, 1]] gt = [[0], [1], [1], [0]] data = jhML.Variable(x) net = nn.Sequential( nn.Linear(2, 4), nn.ReLU(), nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 8), nn.ReLU(), nn.Linear(8, 1) ) #### learning optim = optim.RMSprop(net.params(), lr=1e-4) num_epoch = int(1e+5) for epoch in range(num_epoch): optim.zero_grad() pred = net(data) loss = F.mean_squared_error(pred, gt) loss.backward() optim.step()