示例#1
0
def main():

    # You can add any args you want here
    parser = argparse.ArgumentParser(description='Hyperparams')
    parser.add_argument('--model_path',
                        nargs='?',
                        type=str,
                        default='./models/fc_cls.pkl',
                        help='Path to the saved model')
    parser.add_argument('--n_epoch',
                        nargs='?',
                        type=int,
                        default=10,
                        help='# of the epochs')
    parser.add_argument('--batch_size',
                        nargs='?',
                        type=int,
                        default=2,
                        help='Batch Size')
    parser.add_argument('--l_rate',
                        nargs='?',
                        type=float,
                        default=1e-3,
                        help='Learning Rate')

    args = parser.parse_args()

    zoomout = Zoomout().float()
    zoomout = zoomout.to(device=device)

    # we will not train the feature extractor
    for param in zoomout.parameters():
        param.requires_grad = False

    fc_classifier = torch.load(args.model_path)
    fc_classifier = fc_classifier.to(device=device)

    classifier = DenseClassifier(fc_model=fc_classifier).float()
    classifier = classifier.to(device=device)

    optimizer = optim.Adam(classifier.parameters(),
                           lr=args.l_rate)  # Start in range [1e-3, 1e-4].

    dataset_train = PascalVOC(split='train')
    dataset_val = PascalVOC(split='val')

    train_loader = torch.utils.data.DataLoader(dataset_train,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=4)

    val_loader = torch.utils.data.DataLoader(dataset_val,
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=4)

    for epoch in range(args.n_epoch):
        print(epoch)
        train(args, zoomout, classifier, train_loader, optimizer, epoch)
        val(args, zoomout, classifier, val_loader)
示例#2
0
def main():
    # You can add any args you want here
    parser = argparse.ArgumentParser(description='Hyperparams')
    parser.add_argument('--model_path',
                        nargs='?',
                        type=str,
                        default='zoomoutscratch_pascal_1_6.pkl',
                        help='Path to the saved model')
    parser.add_argument('--n_epoch',
                        nargs='?',
                        type=int,
                        default=10,
                        help='# of the epochs')
    parser.add_argument('--batch_size',
                        nargs='?',
                        type=int,
                        default=2,
                        help='Batch Size')
    parser.add_argument('--l_rate',
                        nargs='?',
                        type=float,
                        default=1e-4,
                        help='Learning Rate')

    args = parser.parse_args()

    zoomout = Zoomout().float()

    # we will not train the feature extractor
    for param in zoomout.parameters():
        param.requires_grad = False
    fc_classifier = torch.load("./models/fc_cls.pkl")
    # Use this for first time training !
    ##classifier = DenseClassifier(fc_model=fc_classifier).float()
    classifier = torch.load("./models/full_model.pkl")
    """
       TODO: Pick an optimizer.
       Reasonable optimizer: Adam with learning rate 1e-4.  Start in range [1e-3, 1e-4].
    """
    ##optimizer = optim.Adam(classifier.parameters(), lr=1e-4)

    optimizer = optim.SGD(classifier.parameters(), lr=1e-4)
    dataset_train = PascalVOC(split='train')
    dataset_val = PascalVOC(split='val')

    train_loader = torch.utils.data.DataLoader(dataset_train,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=4)

    val_loader = torch.utils.data.DataLoader(dataset_val,
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=1)

    for epoch in range(args.n_epoch):
        train(args, zoomout, classifier, train_loader, optimizer, epoch)
示例#3
0
def main():
    zoomout = Zoomout().cpu().float()
    for param in zoomout.parameters():
        param.requires_grad = False

    dataset_train = PascalVOC(split='train')

    features, labels = extract_samples(zoomout, dataset_train)
    np.save("./features/feats_x.npy", features)
    np.save("./features/feats_y.npy", labels)
def main():

    np.random.seed(seed=0)

    # Initialize batch job arguments
    parser = argparse.ArgumentParser(description='Hyperparameters')
    parser.add_argument('--feature_path',
                        nargs='?',
                        type=str,
                        default='/scratch/jonathantan/cv/features',
                        help='Directory to save hypercols to')
    parser.add_argument('--out_path',
                        nargs='?',
                        type=str,
                        default='/scratch/jonathantan/cv/results',
                        help='Directory to save results to')
    parser.add_argument('--n_epoch',
                        nargs='?',
                        type=int,
                        default=10,
                        help='# of epochs')
    parser.add_argument('--batch_size',
                        nargs='?',
                        type=int,
                        default=1,
                        help='Batch size')
    parser.add_argument('--l_rate',
                        nargs='?',
                        type=float,
                        default=1e-3,
                        help='Learning rate')
    parser.add_argument('--use_gpu',
                        nargs='?',
                        type=bool,
                        default=True,
                        help='Whether to use GPU if available')
    args = parser.parse_args()

    # Setup for GPU
    if args.use_gpu and torch.cuda.is_available():
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')
    print("Using device:", device)

    # Initialize hypercolumn feature extractor
    zoomout = Zoomout().float().to(device=device)
    for param in zoomout.parameters():
        param.requires_grad = False

    # Load data, extract features, save features
    for split in ('train', 'val', 'test'):
        save_features(args, zoomout, split)
示例#5
0
def main():
    zoomout = Zoomout().cpu().float()
    for param in zoomout.parameters():
        param.requires_grad = False
    dataset_train = PascalVOC(split='train')
    features, labels = extract_samples(zoomout, dataset_train)
    np.save("./features/feats_x.npy", features)
    np.save("./features/feats_y.npy", labels)
    dataset_x = features
    means = np.mean(dataset_x, dim=0)
    stds = np.std(dataset_x, dim=0)
    np.save("./features/mean.npy", means)
    np.save("./features/std.npy", stds)
示例#6
0
def main():
    zoomout = Zoomout().cpu().float()
    for param in zoomout.parameters():
        param.requires_grad = False

    dataset_train = PascalVOC(split='train')

    features, labels, mean, std = extract_samples(zoomout, dataset_train)

    np.save("./features/feats_x.npy", features)
    np.save("./features/feats_y.npy", labels)
    np.save("./features/mean_global.npy", mean)
    np.save("./features/std_global.npy", std)

    feats_mean = features.mean(dim=0).view(1, 1472)
    feats_std = features.std(dim=0).view(1, 1472)
    np.save("./features/mean_sample.npy", feats_mean)
    np.save("./features/std_sample.npy", feats_std)
示例#7
0
def main():

    np.random.seed(seed=0)

    # Initialize batch job arguments
    parser = argparse.ArgumentParser(description='Hyperparameters')
    parser.add_argument('--load_saved_model',
                        nargs='?',
                        type=bool,
                        default=False,
                        help='If true, loads existing most_recent_model.pt')
    parser.add_argument('--model_path',
                        nargs='?',
                        type=str,
                        default='./models',
                        help='Path to the saved models')
    parser.add_argument('--feature_path',
                        nargs='?',
                        type=str,
                        default='/scratch/jonathantan/cv/features',
                        help='Path to the saved hypercol features')
    parser.add_argument('--out_path',
                        nargs='?',
                        type=str,
                        default='/scratch/jonathantan/cv/results',
                        help='Directory to save results to')
    parser.add_argument('--n_epoch',
                        nargs='?',
                        type=int,
                        default=3,
                        help='# of epochs')
    parser.add_argument('--batch_size',
                        nargs='?',
                        type=int,
                        default=1,
                        help='Batch size')
    parser.add_argument('--l_rate',
                        nargs='?',
                        type=float,
                        default=1e-3,
                        help='Learning rate')
    parser.add_argument('--n_hidden',
                        nargs='?',
                        type=int,
                        default=1024,
                        help='# of hidden units in MLP')
    parser.add_argument('--use_gpu',
                        nargs='?',
                        type=bool,
                        default=True,
                        help='Whether to use GPU if available')
    args = parser.parse_args()

    # Setup GPU settings
    if args.use_gpu and torch.cuda.is_available():
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')
    print("Training using", device)

    # Setup feature extractor; no learning required
    zoomout = Zoomout().float().to(device=device)
    for param in zoomout.parameters():
        param.requires_grad = False

    # Load fully connected classifier
    fc_classifier = FCClassifier(device=device, n_hidden=args.n_hidden).float()
    fc_classifier.load_state_dict(
        torch.load(os.path.join(args.model_path, "best_fc_dict.pt")))

    # Load previous model state if pretrained
    classifier = DenseClassifier(fc_model=fc_classifier,
                                 device=device,
                                 n_hidden=args.n_hidden).float()
    saved_model_path = os.path.join(args.model_path, "most_recent_model.pt")
    if args.load_saved_model and os.path.exists(saved_model_path):
        print("Loading saved state and continuing training...")
        classifier.load_state_dict(torch.load(saved_model_path))

    # Set up optimizer
    optimizer = optim.Adam(classifier.parameters(), lr=args.l_rate)

    # Use inverse class weights for loss function - true distribution is 0.95-0.05
    WEIGHTS = torch.Tensor([0.05, 0.95]).to(device=device)
    loss_fn = nn.CrossEntropyLoss(weight=WEIGHTS)
    del WEIGHTS

    # Setup datasets
    dataset_train = xView2(split='train')
    dataset_val = xView2(split='val')
    dataset_test = xView2(split='test')

    # Wrap data in dataloader classes
    train_loader = data.DataLoader(dataset_train,
                                   batch_size=args.batch_size,
                                   num_workers=0,
                                   shuffle=True)
    val_loader = data.DataLoader(dataset_val,
                                 batch_size=args.batch_size,
                                 num_workers=0,
                                 shuffle=False)
    test_loader = data.DataLoader(dataset_test,
                                  batch_size=args.batch_size,
                                  num_workers=0,
                                  shuffle=False)
    del dataset_train, dataset_val, dataset_test

    # Load saved model performance if available
    saved_losses_path = os.path.join(args.model_path,
                                     "most_recent_model_state.npy")
    if args.load_saved_model and os.path.exists(saved_losses_path):
        train_losses, val_losses = np.load(saved_losses_path)
        train_losses, val_losses = train_losses.tolist(), val_losses.tolist()
    else:
        train_losses, val_losses = [], []

    # Main training loop
    start_epoch = len(train_losses)
    for epoch in range(start_epoch, start_epoch + args.n_epoch):
        train_loss = train(args, zoomout, classifier, train_loader, optimizer,
                           loss_fn, device, epoch)
        val_loss = test(args,
                        zoomout,
                        classifier,
                        val_loader,
                        loss_fn,
                        device,
                        val=True)
        train_losses.append(train_loss)
        val_losses.append(val_loss)

    # Run best model on test set
    print("Validation with best model:")
    test(args, zoomout, best_model, test_loader, loss_fn, device, val=False)

    # Export learning curve and save current loss performance
    plot_learning_curve(args, train_losses, val_losses)
    np.save(os.path.join(args.model_path, "most_recent_model_state.npy"),
            (train_losses, val_losses))

    # Save best model
    SAVE_PATH = os.path.join(args.model_path, "best_model_dict.pt")
    torch.save(best_model.state_dict(), SAVE_PATH)
示例#8
0
def main():
    # You can add any args you want here
    parser = argparse.ArgumentParser(description='Hyperparams')
    parser.add_argument('--model_path',
                        nargs='?',
                        type=str,
                        default='./models/zoomoutscratch_pascal_1_6.pkl',
                        help='Path to the saved model')
    parser.add_argument('--pretrained_viz',
                        nargs='?',
                        type=bool,
                        default=False,
                        help='Viz using saved model, No training')
    parser.add_argument('--n_epoch',
                        nargs='?',
                        type=int,
                        default=10,
                        help='# of the epochs')
    parser.add_argument('--batch_size',
                        nargs='?',
                        type=int,
                        default=2,
                        help='Batch Size')
    parser.add_argument('--l_rate',
                        nargs='?',
                        type=float,
                        default=1e-4,
                        help='Learning Rate')

    args = parser.parse_args()

    zoomout = Zoomout().float()

    # we will not train the feature extractor
    for param in zoomout.parameters():
        param.requires_grad = False

    fc_classifier = torch.load('./models/fc_cls.pkl')
    classifier = DenseClassifier(fc_model=fc_classifier).float()
    """
       TODO: Pick an optimizer.
       Reasonable optimizer: Adam with learning rate 1e-4.  Start in range [1e-3, 1e-4].
    """
    optimizer = optim.SGD(classifier.parameters(), lr=args.l_rate)

    dataset_train = PascalVOC(split='train')
    dataset_val = PascalVOC(split='val')

    train_loader = torch.utils.data.DataLoader(dataset_train,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=4)

    val_loader = torch.utils.data.DataLoader(dataset_val,
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=4)

    if args.pretrained_viz == True:
        print('Vizualization of val dataset on pretrained model...')
        classifier = torch.load(args.model_path)
        val(args, zoomout, classifier, val_loader, epoch=1)
    else:
        for epoch in range(args.n_epoch):
            if epoch == 0:
                val(args, zoomout, classifier, val_loader,
                    epoch)  # directly using weights without fine-tuning
                train(args, zoomout, classifier, train_loader, optimizer,
                      epoch)
                # val(args, zoomout, classifier, val_loader, epoch) # skip for the first epoch
            else:
                train(args, zoomout, classifier, train_loader, optimizer,
                      epoch)
                val(args, zoomout, classifier, val_loader, epoch)