コード例 #1
0
ファイル: Cifar10.py プロジェクト: zijunwei/RRSVM_pytorch
def get_cifar10_datasets(args, train_portion=1.0):

    kwargs = {'num_workers': 4, 'pin_memory': True} if args.cuda else {'num_workers': 4}
    dataset_root = dir_utils.get_dir(os.path.join(os.path.expanduser('~'), 'datasets', 'RRSVM_datasets'))

    train_set = datasets.CIFAR10(root=dataset_root, train=True, download=True, transform=transform_train)

    if train_portion < 1.0:
        np.random.seed(args.seed or 0)
        n_samples = len(train_set)
        categorical_labels = list(set(train_set.train_labels))
        n_categories = len(categorical_labels)
        # evenly sample:
        selected_indices = []
        for idx in range(n_categories):
            categorical_idx = [i for i in range(n_samples) if train_set.train_labels[i] == categorical_labels[idx]]
            n_categorical_samples = len(categorical_idx)
            indices = np.random.permutation(n_categorical_samples)
            relative_indices = indices[:][: int(n_categorical_samples * train_portion)]

            s_selected_indices = [categorical_idx[i] for i in relative_indices]
            selected_indices.extend(s_selected_indices)

        train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.train_batch_size,
                                                   sampler=SubsetRandomSampler(selected_indices), **kwargs)
    else:

        train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.train_batch_size, shuffle=True, **kwargs)

    testset = datasets.CIFAR10(root=dataset_root, train=False, download=True, transform=transform_test)
    test_loader = torch.utils.data.DataLoader(testset, batch_size=args.test_batch_size, shuffle=False, **kwargs)
    return train_loader, test_loader
コード例 #2
0
def test(epoch):
    global best_acc
    net.eval()
    test_loss = 0
    correct = 0
    total = 0
    total_n = 0
    pbar = progressbar.ProgressBar(max_value=n_testdata // test_batch_size)
    for batch_idx, (inputs, targets) in enumerate(testloader):
        if use_cuda:
            inputs, targets = inputs.cuda(), targets.cuda()
        inputs, targets = Variable(inputs, volatile=True), Variable(targets)
        outputs = net(inputs)
        loss = criterion(outputs, targets)

        test_loss += loss.data[0]
        _, predicted = torch.max(outputs.data, 1)
        total += targets.size(0)
        correct += predicted.eq(targets.data).cpu().sum()
        pbar.update(batch_idx)
        total_n = batch_idx + 1
    # Save checkpoint.
    t_acc = 100. * correct / total
    t_loss = test_loss / (total_n)

    w_line = '\nVal:\t{:d}\t{:.04f}\t{:.04f}\n'.format(epoch, t_loss, t_acc)
    print(w_line)
    save_dir = dir_utils.get_dir('./snapshots/cifar10_letnet_{:s}'.format(
        args.model))
    log_file = os.path.join(save_dir, 'log.txt')
    if not os.path.isfile(log_file):
        with open(log_file, 'w') as f:
            f.write(w_line)
    else:
        with open(log_file, 'a') as f:
            f.write(w_line)
    sys.stdout.flush()

    if t_acc > best_acc:
        print('Saving..')
        state = {
            'net': net.state_dict(),
            'optimizer': optimizer.state_dict(),
            'acc': t_acc,
            'loss': t_loss,
            'epoch': epoch,
        }

        torch.save(state, os.path.join(save_dir, 'ckpt.t7'))
        best_acc = t_acc
コード例 #3
0
def main():
    global args, best_mAP
    args = parser.parse_args()

    if args.finetune:
        print("=> using pre-trained model")
        pretrained = True
    else:
        print("=> creating model from new")
        pretrained = False

    if args.model.lower() == 'orig':
        print("Using Original Model")
        useRRSVM = False
    elif args.model.lower() == 'rrsvm':
        print("Using RRSVM Model")
        useRRSVM = True
    else:
        raise NotImplemented

    model = inception_v3.inception_v3(pretrained, useRRSVM=useRRSVM)

    print("Number of Params in InceptionV3-{:s}\t{:d}".format(args.model, sum([p.data.nelement() for p in model.parameters()])))

    criterion = nn.CrossEntropyLoss()

    optimizer = torch.optim.SGD(filter(lambda p:p.requires_grad,  model.parameters()), args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    p_constraint = False
    if args.positive_constraint:
        p_constraint = True

    use_cuda = torch.cuda.is_available() and (args.gpu_id is not None or args.multiGpu)

    if use_cuda:
        if args.multiGpu:
            if args.gpu_id is None: # using all the GPUs
                device_count = torch.cuda.device_count()
                print("Using ALL {:d} GPUs".format(device_count))
                model = nn.DataParallel(model, device_ids=[i for i in range(device_count)]).cuda()
            else:
                print("Using GPUs: {:s}".format(args.gpu_id))
                device_ids = [int(x) for x in args.gpu_id]
                model = nn.DataParallel(model, device_ids=device_ids).cuda()


        else:
            torch.cuda.set_device(int(args.gpu_id))
            model.cuda()

        criterion.cuda()
        cudnn.benchmark = True

    global save_dir
    save_dir = './snapshots/ImageNet_Inceptionv3_{:s}'.format(args.model.upper())
    if args.positive_constraint:
        save_dir = save_dir + '_p'
    if args.finetune:
        save_dir = save_dir + '_finetune'

    save_dir = dir_utils.get_dir(save_dir)

    # optionally resume from a checkpoint
    if args.resume:

        # if os.path.isfile(args.resume):
        ckpt_filename = 'model_best.ckpt.t7'
        assert os.path.isfile(os.path.join(save_dir, ckpt_filename)), 'Error: no checkpoint directory found!'

        checkpoint = torch.load(os.path.join(save_dir, ckpt_filename), map_location=lambda storage, loc: storage)
        args.start_epoch = checkpoint['epoch']
        best_prec1 = checkpoint['prec1']
        model.load_state_dict(checkpoint['state_dict'])
        # TODO: check how to load optimizer correctly
        optimizer.load_state_dict(checkpoint['optimizer'])
        print("=> loading checkpoint '{}', epoch: {:d}".format(ckpt_filename, args.start_epoch))

    else:
        print('==> Training with NO History..')
        if os.path.isfile(os.path.join(save_dir, 'log.txt')):
            os.remove(os.path.join(save_dir, 'log.txt'))

    user_root = os.path.expanduser('~')
    dataset_path = os.path.join(user_root, 'datasets/imagenet12')
    traindir = os.path.join(dataset_path, 'train')
    valdir = os.path.join(dataset_path, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_dataset = datasets.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomSizedCrop(299),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ]))

    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=args.batch_size, shuffle=True,
        num_workers=args.workers, pin_memory=True)

    val_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder(valdir, transforms.Compose([
            transforms.Scale(341),
            transforms.CenterCrop(299),
            transforms.ToTensor(),
            normalize,
        ])),
        batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True)

    if args.evaluate:
        validate(val_loader, model, criterion, use_cuda)
        return

    for epoch in range(args.start_epoch, args.n_epochs):
        # if args.distributed:
        #     train_sampler.set_epoch(epoch)
        adjust_learning_rate(optimizer, epoch, args.finetune)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch, p_constraint, use_cuda)

        # evaluate on validation set
        prec1, prec5 = validate(val_loader, model, criterion, use_cuda)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint({
            'epoch': epoch + 1,
            'state_dict': model.state_dict(),
            'prec1': prec1,
            'prec5': prec5,
            'optimizer': optimizer.state_dict(),
        }, is_best, filename=os.path.join(save_dir, '{:04d}_checkpoint.pth.tar'.format(epoch)))
コード例 #4
0
def main():
    global args, best_mAP
    args = parser.parse_args()
    useRRSVM = True
    use_cuda = cuda_model.ifUseCuda(args.gpu_id, args.multiGpu)

    model = Network.getRes101Model(eval=False,
                                   gpu_id=args.gpu_id,
                                   multiGpu=args.multiGpu,
                                   useRRSVM=useRRSVM)

    print("Number of Params in ResNet101\t{:d}".format(
        sum([p.data.nelement() for p in model.parameters()])))

    #TODO: add weight
    # criterion = nn.CrossEntropyLoss()
    # criterion = nn.MultiLabelSoftMarginLoss(weight=torch.FloatTensor([10,1]).cuda())
    # you need to modify this to satisfy the papers w_p =10 and w_n = 1
    criterion = Network.WeightedBCEWithLogitsLoss(
        weight=torch.FloatTensor([1, 10]))
    # criterion = nn.BCEWithLogitsLoss()
    if use_cuda:
        criterion.cuda()
        cudnn.benchmark = True

    optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad,
                                       model.parameters()),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    # optimizer = torch.optim.(filter(lambda p:p.requires_grad,  model.parameters()), args.lr)

    lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
                                                        milestones=[20, 40])

    global save_dir
    save_dir = './snapshots/HICO_ResNet101_wBCE'
    save_dir = dir_utils.get_dir(save_dir)

    # optionally resume from a checkpoint
    if args.resume:
        # if os.path.isfile(args.resume):
        ckpt_filename = 'model_best.pth.tar'
        assert os.path.isfile(os.path.join(
            save_dir, ckpt_filename)), 'Error: no checkpoint directory found!'

        checkpoint = torch.load(os.path.join(save_dir, ckpt_filename),
                                map_location=lambda storage, loc: storage)
        # args.start_epoch = checkpoint['epoch']
        best_mAP = checkpoint['mAP']
        args.start_epoch = 0
        model.load_state_dict(checkpoint['state_dict'], strict=False)
        # TODO: check how to load optimizer correctly
        # optimizer.load_state_dict(checkpoint['optimizer'])
        print(
            "=> loading checkpoint '{}', epoch: {:d}, current Precision: {:.04f}"
            .format(ckpt_filename, args.start_epoch, best_mAP))

    train_loader = torch.utils.data.DataLoader(HICODataLoader.HICODataset(
        split='train', transform=HICODataLoader.HICO_train_transform()),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    test_loader = torch.utils.data.DataLoader(HICODataLoader.HICODataset(
        split='test', transform=HICODataLoader.HICO_val_transform()),
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              num_workers=args.workers,
                                              pin_memory=True)

    if args.evaluate:
        print("Evaluation Only")
        mAP, loss = validate(test_loader, model, criterion, use_cuda)

        return
    avg_train_losses = []
    avg_test_losses = []
    for epoch in range(args.start_epoch, args.epochs):

        lr_scheduler.step(epoch)
        print('Epoch\t{:d}\t LR lr {:.5f}'.format(
            epoch, optimizer.param_groups[0]['lr']))

        # train for one epoch
        _, avg_train_loss = train(train_loader, model, criterion, optimizer,
                                  epoch, use_cuda)

        # evaluate on validation set
        mAP, avg_test_loss = validate(test_loader, model, criterion, use_cuda)

        # remember best prec@1 and save checkpoint
        is_best = mAP > best_mAP
        best_mAP = max(mAP, best_mAP)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'mAP': mAP,
                'optimizer': optimizer.state_dict(),
            },
            is_best,
            filename=os.path.join(save_dir,
                                  '{:04d}_checkpoint.pth.tar'.format(epoch)))
        avg_train_losses.append(avg_train_loss)
        avg_test_losses.append(avg_test_loss)

        loss_record = {'train': avg_train_losses, 'test': avg_test_losses}
        with open(os.path.join(save_dir, 'loss.pkl'), 'wb') as handle:
            pkl.dump(loss_record, handle, protocol=pkl.HIGHEST_PROTOCOL)
コード例 #5
0
        model.cuda()

    criterion.cuda()
    cudnn.benchmark = True


# Model


save_dir = './snapshots/{:s}_{:s}'.format(args.dataset.lower(), identifier)
if args.positive_constraint:
    save_dir = './snapshots/{:s}_{:s}_p'.format(args.dataset.lower(), identifier)
if args.id is not None:
    save_dir = save_dir+args.id

save_dir = dir_utils.get_dir(save_dir)
if not args.verbose:
        log_file = os.path.join(save_dir, 'log.txt')
        sys.stdout = open(log_file, "w")

if args.resume:
    # Load checkpoint.
    print('==> Resuming from checkpoint..')
    assert os.path.isfile(os.path.join(save_dir, 'ckpt.t7')), 'Error: no checkpoint directory found!'
    checkpoint = torch.load(os.path.join(save_dir, 'ckpt.t7'), map_location=lambda storage, loc: storage)
    state_dict = checkpoint['net']
    model.load_state_dict(state_dict)
    best_acc = checkpoint['acc']
    start_epoch = checkpoint['epoch']
else:
    print('==> Training from scratch..')
コード例 #6
0
    default=10,
    metavar='N',
    help='how many batches to wait before logging training status')
parser.add_argument(
    '--pool-method',
    default='RRSVM',
    help='pooling method (max|RRSVM|SoftRRSVM) (default: RRSVM)')

args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()

torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

dataset_root = dir_utils.get_dir(
    os.path.join(os.path.expanduser('~'), 'datasets', 'RRSVM_datasets'))
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}

train_size = 6000  # use a subset of training data
train_set = datasets.MNIST(dataset_root,
                           train=True,
                           download=True,
                           transform=transforms.Compose([
                               transforms.ToTensor(),
                               transforms.Normalize((0.1307, ), (0.3081, ))
                           ]))
indices = torch.randperm(len(train_set))
train_indices = indices[:][:train_size or None]
train_loader = torch.utils.data.DataLoader(
    train_set,
    batch_size=args.batch_size,
コード例 #7
0
ファイル: demo.py プロジェクト: shwarzes89/ViewEvaluationNet
parser.add_argument(
    '--resume',
    '-r',
    default=
    'snapshots/MTweak3-FullVGG-1024x512/params/best-5000-0.55-0.77.pth.tar',
    type=str,
    help='resume from checkpoint')

if __name__ == '__main__':

    args = parser.parse_args()
    identifier = 'MTweak3-FullVGG'

    running_name = '{:s}-{:d}x{:d}'.format(identifier, args.l1, args.l2)

    save_dir = dir_utils.get_dir('./snapshots/{:s}'.format(running_name))
    save_file = os.path.join(save_dir, '{:s}.txt'.format(running_name))

    param_save_dir = dir_utils.get_dir(os.path.join(save_dir, 'params'))

    ckpt_file = args.resume
    if ckpt_file is not None:
        if not os.path.isfile(ckpt_file):
            print("CKPT {:s} NOT EXIST".format(ckpt_file))
            sys.exit(-1)
        print("load from {:s}".format(ckpt_file))

        single_pass_net = CompositionNet(pretrained=False,
                                         LinearSize1=args.l1,
                                         LinearSize2=args.l2)
        siamese_net = SiameseNet(single_pass_net)
コード例 #8
0
    return TopNp

if __name__ == '__main__':
    # Test save_ckpt_topN_state:
    import numpy as np
    import py_utils.dir_utils as dir_utils
    np.random.seed(0)

    n_iter = 100
    x_len = 20
    TopN = 10

    for t in range(100):
        x = np.random.rand(10)
        TopNp = []
        dir_name = dir_utils.get_dir('Test')
        for x_i in x:
            epoch_state = {
                'performance': x_i
            }
            TopNp = save_ckpt_topN_state(epoch_state, x_i, dir_name, TopNp, TopN)

        # Check correctness:
        save_name_pattern = '{:04d}.pth.tar'
        saved_x = []
        for idx in range(TopN):
            ckpt = torch.load(os.path.join(dir_name, save_name_pattern.format(idx+1)), map_location=lambda storage, loc: storage)
            saved_x.append(ckpt['performance'])

        x_sorted = - np.sort(-x)
        Flag = True
コード例 #9
0
ファイル: path_vars.py プロジェクト: zijunwei/RRSVM_pytorch
import os
import sys
import py_utils.dir_utils as dir_utils
user_root = os.path.expanduser('~')
dataset_root = os.path.join(user_root, 'datasets', 'RRSVM_dataset')
dataset_root = dir_utils.get_dir(dataset_root)
print "DEBUG"
コード例 #10
0
                                       download=True,
                                       transform=transform_test)
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=test_batch_size,
                                         shuffle=False,
                                         num_workers=2)
n_testdata = len(testset)
print('# testing: {:d}\t batch size: {:d}, # batch: {:d}'.format(
    n_testdata, test_batch_size, n_testdata // test_batch_size))

classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
           'ship', 'truck')

# Model

save_dir = dir_utils.get_dir('./snapshots/cifar10_letnet_{:s}'.format(
    args.model))

if args.resume:
    # Load checkpoint.
    print('==> Resuming from checkpoint..')
    assert os.path.isfile(os.path.join(
        save_dir, 'ckpt.t7')), 'Error: no checkpoint directory found!'
    checkpoint = torch.load(os.path.join(save_dir, 'ckpt.t7'),
                            map_location=lambda storage, loc: storage)
    net_struct = model[args.model.upper()]
    net = net_struct()
    state_dict = checkpoint['net']
    net.load_state_dict(state_dict)

    best_acc = checkpoint['acc']
    start_epoch = checkpoint['epoch']
コード例 #11
0
def get_minst_datasets(args, train_portion=1.0):
    kwargs = {
        'num_workers': 1,
        'pin_memory': True
    } if args.cuda else {
        'num_workers': 1
    }

    dataset_root = dir_utils.get_dir(
        os.path.join(os.path.expanduser('~'), 'datasets', 'RRSVM_datasets'))

    # train_size = 6000  # use a subset of training data
    train_set = datasets.MNIST(dataset_root,
                               train=True,
                               download=True,
                               transform=transforms.Compose([
                                   transforms.ToTensor(),
                                   transforms.Normalize((0.1307, ), (0.3081, ))
                               ]))

    if train_portion < 1.0:
        np.random.seed(args.seed or 0)
        n_samples = len(train_set)
        categories_labels = list(set(train_set.train_labels.numpy()))
        n_categories = len(categories_labels)
        # evenly sample:
        selected_indices = []
        for idx in range(n_categories):

            categorical_idx = [
                i for i in range(n_samples)
                if train_set.train_labels[i] == categories_labels[idx]
            ]
            n_categorical_samples = len(categorical_idx)
            indices = np.random.permutation(n_categorical_samples)
            relative_indices = indices[:][:int(n_categorical_samples *
                                               train_portion)]

            s_selected_indices = [categorical_idx[i] for i in relative_indices]
            selected_indices.extend(s_selected_indices)

        train_loader = torch.utils.data.DataLoader(
            train_set,
            batch_size=args.train_batch_size,
            sampler=SubsetRandomSampler(selected_indices),
            **kwargs)
    else:
        train_loader = torch.utils.data.DataLoader(
            train_set, batch_size=args.train_batch_size, **kwargs)

    test_loader = torch.utils.data.DataLoader(datasets.MNIST(
        dataset_root,
        train=False,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])),
                                              batch_size=args.test_batch_size,
                                              shuffle=False,
                                              **kwargs)
    return train_loader, test_loader