if img.shape[1] != 32 or img.shape[2] != 32: img = transform.resize(img, (3, 32, 32)) img = torch.from_numpy(img) print("Warning: Incorrect image input dimensions. Resizing using scalar interploation.") img = img.to(device) output = net(img) _, predicted = torch.max(output.data, 1) print("prediction result: ", classes[predicted]) if __name__ == '__main__': if not os.path.exists('model'): try: os.makedirs('model') except OSError as e: raise Exception("Cannot create directory") mode, image_path = parse() PATH = './model/cifar_net.pth' classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") net = Net() net.to(device) if mode == 'train': train_loader, test_loader = load_data() criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=0.1, momentum=0.5) train(train_loader, test_loader, net, criterion, optimizer, device) else: net.load_state_dict(torch.load(PATH)) test(image_path, net, device, classes)
from my_net import Net import data_loader import torch from torch.autograd import Variable import torch.optim as optim import json config=json.load(open('./config.json','r')) def get_label(traj): lngs_last=torch.unsqueeze(Variable(traj['lngs_last']),dim=-1) lats_last=torch.unsqueeze(Variable(traj['lats_last']),dim=-1) return torch.cat((lngs_last,lats_last),dim=-1) #[bs,2] model=Net(3,10) loss=torch.nn.MSELoss(size_average=True) optimizer = optim.Adam(model.parameters(), lr = 1e-3) for i in range(10): for i in range(5): data_iter = data_loader.get_loader('train_0'+str(i), 5) for idx, (attr, traj) in enumerate(data_iter): label=get_label(traj) output=model(traj) l=loss(output,label) optimizer.zero_grad() l.backward() optimizer.step() temp_std=Variable(torch.FloatTensor([0.04988770679679998, 0.04154695076189434]).expand_as(output)) temp_mean=Variable(torch.FloatTensor([104.05810954320589,30.652312982784895])) print(output*temp_std+temp_mean) print(label*temp_std+temp_mean)
# get some random training images dataiter = iter(trainloader) images, labels = dataiter.next() # show images imshow(torchvision.utils.make_grid(images)) # print labels print(' '.join('%5s' % classes[labels[j]] for j in range(4))) import torch.nn as nn import torch.nn.functional as F from my_net import Net PATH = './cifar_net_trained.pth' # retrain net = Net() net.load_state_dict(torch.load(PATH)) # Use a Classification Cross-Entropy loss and SGD with momentum. import torch.optim as optim criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) for epoch in range(4): # loop over the dataset multiple times running_loss = 0.0 for i, data in enumerate(trainloader, 0): # get the inputs; data is a list of [inputs, labels] inputs, labels = data
shuffle=False, num_workers=2) classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') # get some random training images dataiter = iter(trainloader) images, labels = dataiter.next() import torch.nn as nn import torch.nn.functional as F from my_net import Net net = Net() net.to(device) # Use a Classification Cross-Entropy loss and SGD with momentum. import torch.optim as optim criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) for epoch in range(2): # loop over the dataset multiple times running_loss = 0.0 for i, data in enumerate(trainloader, 0): # get the inputs; data is a list of [inputs, labels] inputs, labels = data[0].to(device), data[1].to(device)
shuffle=False, num_workers=2) classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') # get some random training images dataiter = iter(trainloader) images, labels = dataiter.next() # print labels print(' '.join('%5s' % classes[labels[j]] for j in range(4))) PATH = './cifar_net.pth' net = Net() net.load_state_dict(torch.load(PATH)) outputs = net(images) _, predicted = torch.max(outputs, 1) print('Predicted: ', ' '.join('%5s' % classes[predicted[j]] for j in range(4))) correct = 0 total = 0 with torch.no_grad(): for data in testloader: images, labels = data outputs = net(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0)
from sklearn.datasets import load_digits digits = load_digits() from my_net import Net net = Net() from skorch.classify import Classifier from skorch.torch_env import * sknet = Classifier(net, loss_func=F.nll_loss) sknet.load_weight('sklearn.dataset.digits.pkl') X = digits.data.reshape(-1, 1, 8, 8) Y = digits.target sknet.fit(X, Y, early_stop=3) # sknet.save_weight('sklearn.dataset.digits.pkl') s = sknet.score(X, Y) print(s)
def main(): classes, testloader, trainloader = set_dataset() # ランダムな訓練データを取得 dataiter = iter(trainloader) images, labels = dataiter.next() # 画像を表示 imshow(torchvision.utils.make_grid(images)) print(' '.join('%5s' % classes[labels[j]] for j in range(4))) net = Net() criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) for epoch in range(2): running_loss = 0.0 for i, data in enumerate(trainloader, 0): inputs, labels = data optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() running_loss += loss.item() if i % 2000 == 1999: print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 2000)) running_loss = 0.0 print('Finished Training') PATH = './cifar_net.pth' torch.save(net.state_dict(), PATH) dataiter = iter(testloader) images, labels = dataiter.next() imshow(torchvision.utils.make_grid(images)) print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(4))) net = Net() net.load_state_dict(torch.load(PATH)) outputs = net(images) _, predicted = torch.max(outputs, 1) print('Predicted: ', ' '.join('%5s' % classes[predicted[j]] for j in range(4))) correct = 0 total = 0 with torch.no_grad(): for data in testloader: images, labels = data outputs = net(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the 10000 test images: %d %%' % (100 * correct / total)) class_correct = list(0. for i in range(10)) class_total = list(0. for i in range(10)) with torch.no_grad(): for data in testloader: images, labels = data outputs = net(images) _, predicted = torch.max(outputs, 1) c = (predicted == labels).squeeze() for i in range(4): label = labels[i] class_correct[label] += c[i].item() class_total[label] += 1 for i in range(10): print('Accuracy of %5s : %2d %%' % ( classes[i], 100 * class_correct[i] / class_total[i])) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(device) net.to(device) inputs, labels = data[0].to(device), data[1].to(device)