コード例 #1
0
def test_MVCNN(test_description):
    print("\nTest MVCNN\n")
    MVCNN = 'mvcnn'
    RESNET = 'resnet'
    MODELS = [RESNET, MVCNN]
    DATA_PATH = globals.DATA_PATH
    DEPTH = 18
    MODEL = MODELS[1]
    PRETRAINED = True
    test_description = test_description

    REMOTE = True if os.uname()[1] == "fry" else False

    criterion = nn.CrossEntropyLoss()

    print('Loading data')

    transform = transforms.Compose([
        transforms.CenterCrop(500),
        transforms.Resize(224),
        transforms.ToTensor(),
    ])

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # Load dataset
    dset_test = MultiViewDataSet(DATA_PATH, 'test', transform=transform)
    test_loader = DataLoader(dset_test,
                             batch_size=4,
                             shuffle=False,
                             num_workers=2)

    classes = dset_test.classes
    print(len(classes), classes)

    if MODEL == RESNET:
        if DEPTH == 18:
            model = resnet18(pretrained=PRETRAINED, num_classes=4)
        elif DEPTH == 34:
            model = resnet34(pretrained=PRETRAINED, num_classes=4)
        elif DEPTH == 50:
            model = resnet50(pretrained=PRETRAINED, num_classes=4)
        elif DEPTH == 101:
            model = resnet101(pretrained=PRETRAINED, num_classes=4)
        elif DEPTH == 152:
            model = resnet152(pretrained=PRETRAINED, num_classes=4)
        else:
            raise Exception(
                'Specify number of layers for resnet in command line. --resnet N'
            )
        print('Using ' + MODEL + str(DEPTH))
    else:
        model = mvcnn(pretrained=PRETRAINED, num_classes=len(classes))
        print('Using ' + MODEL)

    model.to(device)
    cudnn.benchmark = True

    print('Running on ' + str(device))

    # The above code mostly sets up stuff. Now is the important logic
    ###########
    PATH = "checkpoint/" + test_description + "/mvcnn_checkpoint.pth.tar"
    loaded_model = torch.load(PATH)
    model.load_state_dict(loaded_model['state_dict'])
    model.eval()

    fig, axs = plt.subplots(2)
    fig.suptitle('Vertically stacked subplots')
    axs[0].plot(loaded_model['loss_per_epoch'], 'r')
    axs[1].plot(loaded_model['acc_per_epoch'], 'b')
    if not REMOTE:
        plt.show()

    correct = 0
    total = 0
    print("we have total of ", len(test_loader), " batches")
    for i, (inputs, targets) in enumerate(test_loader):
        print("..processing batch", i)
        with torch.no_grad():
            # Convert from list of 3D to 4D
            inputs = np.stack(inputs, axis=1)

            inputs = torch.from_numpy(inputs)

            inputs, targets = inputs.cuda(device), targets.cuda(device)
            inputs, targets = Variable(inputs), Variable(targets)

            # compute output
            outputs = model(inputs)
            loss = criterion(outputs, targets)

            _, predicted = torch.max(outputs.data, 1)

            total += targets.size(0)
            correct += (predicted.cpu() == targets.cpu()).sum()

    acc = 100 * correct.item() / total
    print("total Accuracy:", acc)

    output_file = PATH[:PATH.rfind('/') + 1] + "output.txt"
    f = open(output_file, "a+")
    f.write(test_description)
    f.write("\nAccuracy: %d\rLoss: %d\r\n\n" % (acc, loss))
    print(output_file)
コード例 #2
0
def train_MVCNN(case_description):
    print("\nTrain MVCNN\n")
    MVCNN = 'mvcnn'
    RESNET = 'resnet'
    MODELS = [RESNET, MVCNN]

    DATA_PATH = globals.DATA_PATH
    DEPTH = None
    MODEL = MODELS[1]
    EPOCHS = 100
    BATCH_SIZE = 10
    LR = 0.0001
    MOMENTUM = 0.9
    LR_DECAY_FREQ = 30
    LR_DECAY = 0.1
    PRINT_FREQ = 10
    RESUME = ""
    PRETRAINED = True

    REMOTE = globals.REMOTE
    case_description = case_description

    print('Loading data')

    transform = transforms.Compose([
        transforms.CenterCrop(500),
        transforms.Resize(224),
        transforms.ToTensor(),
    ])

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # Load dataset
    dset_train = MultiViewDataSet(DATA_PATH, 'train', transform=transform)
    train_loader = DataLoader(dset_train,
                              batch_size=BATCH_SIZE,
                              shuffle=True,
                              num_workers=2)

    ## Got rid of this, not using validation here.
    # dset_val = MultiViewDataSet(DATA_PATH, 'test', transform=transform)
    # val_loader = DataLoader(dset_val, batch_size=BATCH_SIZE, shuffle=True, num_workers=2)

    classes = dset_train.classes
    print(len(classes), classes)

    if MODEL == RESNET:
        if DEPTH == 18:
            model = resnet18(pretrained=PRETRAINED, num_classes=len(classes))
        elif DEPTH == 34:
            model = resnet34(pretrained=PRETRAINED, num_classes=len(classes))
        elif DEPTH == 50:
            model = resnet50(pretrained=PRETRAINED, num_classes=len(classes))
        elif DEPTH == 101:
            model = resnet101(pretrained=PRETRAINED, num_classes=len(classes))
        elif DEPTH == 152:
            model = resnet152(pretrained=PRETRAINED, num_classes=len(classes))
        else:
            raise Exception(
                'Specify number of layers for resnet in command line. --resnet N'
            )
        print('Using ' + MODEL + str(DEPTH))
    else:
        # number of ModelNet40 needs to match loaded pre-trained model
        model = mvcnn(pretrained=PRETRAINED, num_classes=40)
        print('Using ' + MODEL)

    cudnn.benchmark = True

    print('Running on ' + str(device))
    """
    Load pre-trained model and freeze weights for training.
    This is done by setting param.requires_grad to False
    """
    """Just added this check to load my pretrained model instead of copying it to the repo and having a duplicate"""
    if REMOTE:
        PATH = "../../MVCNN_Peter/checkpoint/mvcnn18_checkpoint.pth.tar"
    else:
        PATH = "checkpoint/model_from_pete.tar"

    loaded_model = torch.load(PATH)
    model.load_state_dict(loaded_model['state_dict'])
    for param in model.parameters():
        param.requires_grad = False
    num_ftrs = model.classifier[6].in_features
    model.classifier[6] = nn.Linear(num_ftrs, len(classes))

    model.to(device)

    print(model)

    logger = Logger('logs')

    # Loss and Optimizer
    lr = LR
    n_epochs = EPOCHS
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)

    best_acc = 0.0
    start_epoch = 0

    # Helper functions
    def load_checkpoint():
        global best_acc, start_epoch
        # Load checkpoint.
        print('\n==> Loading checkpoint..')
        assert os.path.isfile(RESUME), 'Error: no checkpoint file found!'

        checkpoint = torch.load(RESUME)
        best_acc = checkpoint['best_acc']
        start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])

    def train():
        train_size = len(train_loader)
        loss = None
        total = 0
        correct = 0
        for i, (inputs, targets) in enumerate(train_loader):

            # Convert from list of 3D to 4D
            inputs = np.stack(inputs, axis=1)

            inputs = torch.from_numpy(inputs)

            inputs, targets = inputs.cuda(device), targets.cuda(device)
            inputs, targets = Variable(inputs), Variable(targets)

            # compute output
            outputs = model(inputs)
            loss = criterion(outputs, targets)

            _, predicted = torch.max(outputs.data, 1)
            total += targets.size(0)
            correct += (predicted.cpu() == targets.cpu()).sum().item()
            """
            print("total: ", total)
            print("correct: ", correct)
            print()
            """

            # compute gradient and do SGD step
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if (i + 1) % PRINT_FREQ == 0:
                print("\tIter [%d/%d] Loss: %.4f" %
                      (i + 1, train_size, loss.item()))

        return loss, int(float(float(correct) / float(total)) * 100)

    # Training / Eval loop
    if RESUME:
        load_checkpoint()

    best_acc = 0
    best_loss = 0
    loss_values = []
    acc_values = []
    for epoch in range(start_epoch, n_epochs):
        print('\n-----------------------------------')
        print('Epoch: [%d/%d]' % (epoch + 1, n_epochs))
        start = time.time()

        model.train()
        (t_loss, t_acc) = train()
        loss_values.append(t_loss)
        acc_values.append(t_acc)

        print("Total loss: " + str(t_loss))
        print("Accuracy: " + str(t_acc) + "%")

        print('Time taken: %.2f sec.' % (time.time() - start))

        if t_acc > best_acc:
            print("UPDATE")
            print("UPDATE")
            print("UPDATE")
            print("UPDATE")
            print("UPDATE")
            best_acc = t_acc
            best_loss = t_loss
            util.save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'loss_per_epoch': loss_values,
                    'acc_per_epoch': acc_values,
                    'optimizer': optimizer.state_dict(),
                }, MODEL, DEPTH, case_description)

        # Decaying Learning Rate
        if (epoch + 1) % LR_DECAY_FREQ == 0:
            lr *= LR_DECAY
            optimizer = torch.optim.Adam(model.parameters(), lr=lr)
            print('Learning rate:', lr)

    fig, axs = plt.subplots(2)
    fig.suptitle('Vertically stacked subplots')
    axs[0].plot(loss_values, 'r')
    axs[1].plot(acc_values, 'b')

    if not REMOTE:
        plt.show()
    else:
        plt.savefig("plots/training.png")
コード例 #3
0
ファイル: xyz.py プロジェクト: vaibhavnayel/MVCNN-SA
from models.mvcnn import *
from models.mvcnn_att import *
from models.resnet_att import *
#import mvcnn and mvcnn_att modules here


def params(model):
    return sum(p.numel() for p in model.parameters() if p.requires_grad)


print(params(resnet18_att(num_classes=3)))
print(params(resnet34_att(num_classes=3)))
print(params(resnet50_att(num_classes=3)))
print(params(mvcnn()))
print(params(mvcnn_att()))
コード例 #4
0
                             num_classes=len(classes))
    elif args.depth == 50:
        model = resnet50_att(pretrained=args.pretrained,
                             num_classes=len(classes))
    elif args.depth == 101:
        model = resnet101_att(pretrained=args.pretrained,
                              num_classes=len(classes))
    elif args.depth == 152:
        model = resnet152_att(pretrained=args.pretrained,
                              num_classes=len(classes))
    else:
        raise Exception(
            'Specify number of layers for resnet in command line. --resnet N')
    print('Using ' + args.model + str(args.depth))
elif args.model == 'mvcnn':
    model = mvcnn(pretrained=args.pretrained, num_classes=len(classes))
    print('Using ' + args.model)

else:
    model = mvcnn_att(pretrained=args.pretrained, num_classes=len(classes))
    print('Using ' + args.model)

model.to(device)
cudnn.benchmark = True

# print('Running on ' + str(device))

logger = Logger('logs/' + START)

# Loss and Optimizer
lr = args.lr
コード例 #5
0
    if args.depth == 18:
        model = resnet18(pretrained=args.pretrained, num_classes=40)
    elif args.depth == 34:
        model = resnet34(pretrained=args.pretrained, num_classes=40)
    elif args.depth == 50:
        model = resnet50(pretrained=args.pretrained, num_classes=40)
    elif args.depth == 101:
        model = resnet101(pretrained=args.pretrained, num_classes=40)
    elif args.depth == 152:
        model = resnet152(pretrained=args.pretrained, num_classes=40)
    else:
        raise Exception(
            'Specify number of layers for resnet in command line. --resnet N')
    print('Using ' + args.model + str(args.depth))
else:
    model = mvcnn(pretrained=args.pretrained, num_classes=40)
    print('Using ' + args.model)

model.to(device)
cudnn.benchmark = True

print('Running on ' + str(device))

# The above code mostly sets up stuff. Now is the important logic
###########
PATH = "checkpoint/model_from_pete.tar"
loaded_model = torch.load(PATH)
model.load_state_dict(loaded_model['state_dict'])
model.eval()

correct = 0