Esempio n. 1
0
    training_correct = 0
    training_total = 0
    with torch.no_grad():
        for data in trainloader:
            images, labels = data
            outputs = model(images)
            predicted = torch.argmax(outputs.data, 1)
            training_correct += (predicted == labels).sum().item()
            training_total += labels.size(0)
    training_accuracy = 100. * training_correct / training_total

    # Compute test set accuracy
    test_correct = 0
    test_total = 0
    with torch.no_grad():
        for data in testloader:
            images, labels = data
            outputs = model(images)
            predicted = torch.argmax(outputs.data, 1)
            test_correct += (predicted == labels).sum().item()
            test_total += labels.size(0)
    test_accuracy = 100. * test_correct / test_total

    # Print the accuracies
    print(
        'Epoch : %2d, training accuracy = %6.2f %%, test accuracy = %6.2f %%' %
        (epoch, training_accuracy, test_accuracy))

# Save the network weights
torch.save(model.state_dict(), 'my_network.pth')
Esempio n. 2
0
        for j in range(len(hist)):
            hist[j] = len(np.where(labels_per_sp == u_labels_per_sp[j])[0])
        im_target[l_inds[i]] = u_labels_per_sp[np.argmax(hist)]
    target = torch.from_numpy(im_target)
    if use_cuda:
        target = target.cuda()
    target = Variable(target)
    loss = loss_fn(output, target)
    loss.backward()
    optimizer.step()

    #print (batch_idx, '/', args.maxIter, ':', nLabels, loss.data[0])
    print(batch_idx, '/', args.maxIter, ':', nLabels, loss.item())

    if nLabels <= args.minLabels:
        print("nLabels", nLabels, "reached minLabels", args.minLabels, ".")
        break

# save output image
if not args.visualize:
    output = model(data)[0]
    output = output.permute(1, 2, 0).contiguous().view(-1, args.nChannel)
    ignore, target = torch.max(output, 1)
    im_target = target.data.cpu().numpy()
    im_target_rgb = np.array([label_colours[c % 100] for c in im_target])
    im_target_rgb = im_target_rgb.reshape(im.shape).astype(np.uint8)
cv2.imwrite("output.png", im_target_rgb)

# Save model weights
torch.save(model.state_dict(), 'unsupervised_weights.pth')
Esempio n. 3
0
def main(args):
    #hyper parameter
    end_epoch = 100
    lr = 0.001
    beta1 = 0.5
    beta2 = 0.99
    gpu = 0

    #set model
    model = MyNet()

    #set GPU or CPU
    if gpu >= 0 and torch.cuda.is_available():
        device = 'cuda:{}'.format(gpu)
    else:
        device = 'cpu'
    model.to(device)

    #print params
    params = 0
    for p in model.parameters():
        if p.requires_grad:
            params += p.numel()
    print(params)
    print(model)

    criteria = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=lr,
                                 betas=(beta1, beta2))

    dataset = MyDataset("data/", is_train=True)
    train_loader = torch.utils.data.DataLoader(dataset,
                                               batch_size=4,
                                               shuffle=True)

    for epoch in range(end_epoch):
        epoch_loss = 0
        epoch_acc = 0
        for i, data in enumerate(train_loader):
            print("\repoch: {} iteration: {}".format(epoch, i), end="")

            inputs, labels = data
            optimizer.zero_grad()
            outputs = model(inputs.to(device))
            _, preds = torch.max(outputs.data, 1)
            loss = criteria(outputs, labels.to(device))

            loss.backward()
            optimizer.step()

            epoch_loss += loss.data.to('cpu') * inputs.size(0)
            epoch_acc += torch.sum(preds.to('cpu') == labels.data)

        epoch_loss /= len(train_loader) * 4
        epoch_acc = epoch_acc / float(len(train_loader) * 4)

        print("[epoch: {}] [Loss: {:.4f}] [Acc: {:.4f}]".format(
            epoch, epoch_loss, epoch_acc))
        if (epoch + 1) % 10 == 0:
            if not os.path.exists("models/" + args.model):
                os.makedirs("models/" + args.model)
            torch.save(model.state_dict(),
                       "models/" + args.model + "/" + str(epoch) + ".pth")
from train import train_model

if __name__ == "__main__":

    model = MyNet()
    optimizer = optim.SGD(filter(lambda p: p.requires_grad,
                                 model.parameters()),
                          lr=0.001)
    # Data loader init
    # data_dir = 'D:/data_odometry_gray/dataset'
    data_dir = 'D:/data_odometry_color/dataset/'
    batch_size = 16

    trainData = DataGetter(data_dir, batch_size, 0, 6, randomize_data=True)
    valData = DataGetter(data_dir, batch_size, 7, 7, randomize_data=True)

    model, metrics = train_model(model,
                                 optimizer,
                                 trainData,
                                 valData,
                                 num_epochs=50)

    # Save model and results
    name = time.ctime(time.time()).replace(' ', '_').replace(':', '_')
    with open(name + '.pkl', 'wb') as f:
        pickle.dump(metrics, f)

    model.eval()
    torch.save(model.state_dict(), 'model_' + name + '.pkl')