示例#1
0
def train(model, optimizer, criterion, trainloader, valloader, testloader, epochs, device, root):
    best_acc = 0.0
    supervised_loss = []

    for epoch in range(epochs):  # loop over the dataset multiple times
      for i, data in enumerate(trainloader):

        l_x, l_y = data[0].to(device), data[1].to(device)

        optimizer.zero_grad()

        outputs = model(l_x)
        sup_loss = criterion(outputs, l_y)
        loss = sup_loss

        loss.backward()
        optimizer.step()

      # Calculating loss and accuracy
      vat_acc =  evaluate_classifier(model, valloader, device)
      print('Epoch: {}, Val_acc: {:.3} Sup_loss: {:.3}'.format(epoch, vat_acc, sup_loss.item()))

      supervised_loss.append(sup_loss.item())

      if (vat_acc > best_acc):
        loadsave(model, optimizer, "Lenet", root=root, mode='save')
        best_acc = vat_acc

    return supervised_loss
示例#2
0
def main(args):
    transform_SVHN = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
    transform_MNIST = transforms.Compose([toRGB(), transforms.Resize(32), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

    trainset_SVHN = torchvision.datasets.SVHN(root=args.dataset_path[0], split='train', download=True, transform=transform_SVHN)
    testset_SVHN = torchvision.datasets.SVHN(root=args.dataset_path[0], split='test', download=True, transform=transform_SVHN)
    testset_MNIST = torchvision.datasets.MNIST(root=args.dataset_path[0], train=False, download=True, transform=transform_MNIST)

    discard_size = int(0.4 * len(trainset_SVHN))
    train_size = len(trainset_SVHN) - discard_size
    val_size = int(0.2 * len(testset_MNIST))
    test_size = len(testset_MNIST) - val_size
    trainset, discardset = torch.utils.data.random_split(trainset_SVHN, [train_size, discard_size])
    valset, testset = torch.utils.data.random_split(testset_MNIST, [val_size, test_size])

    # Should increase batch size to decrease training time. Batch size for LeNet and VAT datasets can be different, i.e. 32 for LeNet and 128 for VAT
    trainloader = DataLoader(trainset, batch_size=32, shuffle=True, num_workers=2)
    valloader = DataLoader(valset, batch_size=1, shuffle=True, num_workers=2)
    testloader = DataLoader(testset, batch_size=1, shuffle=False, num_workers=2)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print("Device: " + str(device))

    lenet0 = LeNet(device)
    lenet0 = lenet0.to(device)
    print(lenet0)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(lenet0.parameters(), lr=args.lr) # Should implement lr scheduler.
    # optimizer = optim.SGD(lenet0.parameters(), lr=args.lr, momentum=0.9)

    if args.eval_only:
        loadsave(lenet0, optimizer, "Lenet", root=args.weights_path[0], mode='load')

    else:
        supervised_loss = train(lenet0, optimizer, criterion, trainloader, valloader, testloader, args.epochs, device, args.weights_path[0])

        plt.subplot(1,1,1)
        plt.plot(supervised_loss)
        plt.title("Supervised loss")
        plt.xlabel("Epochs")
        plt.ylabel("Loss")
        plt.grid(True)

        plt.show()

        loadsave(lenet0, optimizer, "Lenet", root=args.weights_path[0], mode='load')

    vat_acc =  evaluate_classifier(lenet0, testloader, device)
    print("Accuracy of the network on SVHN is %d%%\n" %(vat_acc*100))

    barchartplot(lenet0, testloader, device)
def train(model, optimizer, criterion, criterion_VAT, trainloader_SVHN,
          trainloader_MNIST, valloader, testloader_SVHN, alpha, epochs, device,
          root):
    best_acc = 0.0
    supervised_loss = []
    unsupervised_loss = []

    for epoch in range(epochs):  # loop over the dataset multiple times
        dataloader_iterator = iter(trainloader_MNIST)

        for i, data in enumerate(trainloader_SVHN):
            try:
                data2 = next(dataloader_iterator)
            except StopIteration:
                dataloader_iterator = iter(trainloader_MNIST)
                data2 = next(dataloader_iterator)

            l_x, l_y = data[0].to(device), data[1].to(device)
            ul_x, ul_y = data2[0].to(device), data2[1].to(device)
            optimizer.zero_grad()

            outputs = model(l_x)
            sup_loss = criterion(outputs, l_y)
            unsup_loss = alpha * criterion_VAT(model, ul_x)
            loss = sup_loss + unsup_loss

            loss.backward()
            optimizer.step()

        # Calculating loss and accuracy
        vat_acc, org_acc = evaluate_classifier(model, valloader,
                                               testloader_SVHN, device)
        print(
            'Epoch: {}, Val_acc: {:.3} Org_acc: {:.3} Sup_loss: {:.3} Unsup_loss: {:.3}'
            .format(epoch, vat_acc, org_acc, sup_loss.item(),
                    unsup_loss.item()))

        supervised_loss.append(sup_loss.item())
        unsupervised_loss.append(unsup_loss.item())

        # if (vat_acc > best_acc):
        #   loadsave(model, optimizer, "LenetVAT", root=root, mode='save')
        #   best_acc = vat_acc

    loadsave(model, optimizer, "LenetVAT", root=root, mode='save')
    return supervised_loss, unsupervised_loss
示例#4
0
            loss = recon_loss + args.alpha * ce_loss
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            train_ce_loss += ce_loss.item() * batch_len
            train_recon_loss += recon_loss.item() * batch_len
            train_acc += (pred_y.argmax(1) == label).sum().item() / args.k_iwae
            train_n += batch_len
            mse += utils.mean_squared_error(observed_data, pred_x.mean(0),
                                            observed_mask) * batch_len
        total_time += time.time() - start_time
        val_loss, val_acc, val_auc = utils.evaluate_classifier(
            rec,
            val_loader,
            args=args,
            classifier=classifier,
            reconst=True,
            num_sample=1,
            dim=dim)
        if val_loss <= best_val_loss:
            best_val_loss = min(best_val_loss, val_loss)
            rec_state_dict = rec.state_dict()
            dec_state_dict = dec.state_dict()
            classifier_state_dict = classifier.state_dict()
            optimizer_state_dict = optimizer.state_dict()
        test_loss, test_acc, test_auc = utils.evaluate_classifier(
            rec,
            test_loader,
            args=args,
            classifier=classifier,
            reconst=True,
示例#5
0
                label = label.view(-1, N)
                _, label = label.max(-1)
                loss = criterion(out, label.long())
            else:
                loss = criterion(out, label)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            train_loss += loss.item() * batch_len
            train_acc += torch.mean(
                (out.argmax(1) == label).float()).item() * batch_len
            train_n += batch_len
        total_time += time.time() - start_time
        val_loss, val_acc, val_auc = utils.evaluate_classifier(rec,
                                                               val_loader,
                                                               args=args,
                                                               dim=dim)
        best_val_loss = min(best_val_loss, val_loss)
        test_loss, test_acc, test_auc = utils.evaluate_classifier(rec,
                                                                  test_loader,
                                                                  args=args,
                                                                  dim=dim)
        print(
            'Iter: {}, loss: {:.4f}, acc: {:.4f}, val_loss: {:.4f}, val_acc: {:.4f}, test_acc: {:.4f}, test_auc: {:.4f}'
            .format(itr, train_loss / train_n, train_acc / train_n, val_loss,
                    val_acc, test_acc, test_auc))

        if itr % 100 == 0 and args.save:
            torch.save(
                {
                    'args': args,
def main(args):
    transform_SVHN = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])
    transform_MNIST = transforms.Compose([
        toRGB(),
        transforms.Resize(32),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    trainset_SVHN = torchvision.datasets.SVHN(root=args.dataset_path[0],
                                              split='train',
                                              download=True,
                                              transform=transform_SVHN)
    fullset_MNIST = torchvision.datasets.MNIST(root=args.dataset_path[0],
                                               train=True,
                                               download=True,
                                               transform=transform_MNIST)
    testset = torchvision.datasets.MNIST(root=args.dataset_path[0],
                                         train=False,
                                         download=True,
                                         transform=transform_MNIST)
    testset_SVHN = torchvision.datasets.SVHN(root=args.dataset_path[0],
                                             split='test',
                                             download=True,
                                             transform=transform_SVHN)

    train_size = int(0.8 * len(fullset_MNIST))
    val_size = len(fullset_MNIST) - train_size
    trainset_MNIST, valset = torch.utils.data.random_split(
        fullset_MNIST, [train_size, val_size])

    # Should increase batch size to decrease training time. Batch size for LeNet and VAT datasets can be different, i.e. 32 for LeNet and 128 for VAT
    trainloader_SVHN = DataLoader(trainset_SVHN,
                                  batch_size=32,
                                  shuffle=True,
                                  num_workers=2)
    trainloader_MNIST = DataLoader(trainset_MNIST,
                                   batch_size=32,
                                   shuffle=True,
                                   num_workers=2)
    valloader = DataLoader(valset, batch_size=1, shuffle=True, num_workers=2)
    testloader = DataLoader(testset,
                            batch_size=1,
                            shuffle=False,
                            num_workers=2)
    testloader_SVHN = DataLoader(testset_SVHN,
                                 batch_size=1,
                                 shuffle=False,
                                 num_workers=2)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print("Device: " + str(device))

    lenet0 = LeNet(device)
    lenet0 = lenet0.to(device)
    print(lenet0)

    criterion = nn.CrossEntropyLoss()
    criterion_VAT = VAT(device,
                        eps=args.eps,
                        xi=args.xi,
                        k=args.k,
                        use_entmin=args.use_entmin)
    optimizer = optim.Adam(lenet0.parameters(),
                           lr=args.lr)  # Should implement lr scheduler.
    # optimizer = optim.SGD(lenet0.parameters(), lr=args.lr, momentum=0.9)

    if args.eval_only:
        loadsave(lenet0,
                 optimizer,
                 "LenetVAT",
                 root=args.weights_path[0],
                 mode='load')

    else:
        supervised_loss, unsupervised_loss = train(
            lenet0, optimizer, criterion, criterion_VAT, trainloader_SVHN,
            trainloader_MNIST, valloader, testloader_SVHN, args.alpha,
            args.epochs, device, args.weights_path[0])
        loss_plot(supervised_loss, unsupervised_loss)

    vat_acc, org_acc = evaluate_classifier(lenet0, testloader, testloader_SVHN,
                                           device)
    print(
        "Accuracy of the network on MNIST is %d%%\nAccuracy of the network on SVHN is %d%%\n"
        % (vat_acc * 100, org_acc * 100))

    barchartplot(lenet0, testloader, device)