Exemplo n.º 1
0
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()

    sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print('Initializing image data manager')
    dm = DataManager(args, use_gpu)
    trainloader, testloader = dm.return_dataloaders()

    model = Model(scale_cls=args.scale_cls, num_classes=args.num_classes)
    # load the model
    checkpoint = torch.load(args.resume)
    model.load_state_dict(checkpoint['state_dict'])
    print("Loaded checkpoint from '{}'".format(args.resume))

    if use_gpu:
        model = model.cuda()

    test(model, testloader, use_gpu)
Exemplo n.º 2
0
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    args.save_dir = osp.join(args.save_dir, str(args.nExemplars) + '-shot')
    sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))
    print("==========\nArgs:{}\n==========".format(args))
    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")
    print('Initializing image data manager')
    dm = DataManager(args, use_gpu)
    trainloader, testloader = dm.return_dataloaders()
    model = Model(args)
    criterion = CrossEntropyLoss()
    optimizer = init_optimizer(args.optim, model.parameters(), args.lr,
                               args.weight_decay)
    if use_gpu:
        model = model.cuda()
    start_time = time.time()
    train_time = 0
    best_acc = -np.inf
    best_epoch = 0
    print("==> Start training")
    for epoch in range(args.max_epoch):
        learning_rate = adjust_learning_rate(optimizer, epoch, args.LUT_lr)
        start_train_time = time.time()
        train(epoch, model, criterion, optimizer, trainloader, learning_rate,
              use_gpu)
        train_time += round(time.time() - start_train_time)
        if epoch == 0 or epoch > (args.stepsize[0] -
                                  1) or (epoch + 1) % 10 == 0:
            acc = test(model, testloader, use_gpu)
            is_best = acc > best_acc
            if is_best:
                best_acc = acc
                best_epoch = epoch + 1
            save_checkpoint(
                {
                    'state_dict': model.state_dict(),
                    'acc': acc,
                    'epoch': epoch,
                }, is_best,
                osp.join(args.save_dir,
                         'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))
            print("==> Test 5-way Best accuracy {:.2%}, achieved at epoch {}".
                  format(best_acc, best_epoch))
    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print(
        "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".
        format(elapsed, train_time))
    print("==========\nArgs:{}\n==========".format(args))
Exemplo n.º 3
0
def main(test_dataset_name):
    args.dataset = test_dataset_name
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()

    sys.stdout = Logger(
        osp.join(
            args.save_dir,
            f'log_test_{args.dataset}_{args.nKnovel}way_{args.nExemplars}shot16.txt'
        ))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print('Initializing image data manager')

    dm = DataManager(args, use_gpu)
    trainloader, testloader = dm.return_dataloaders()
    # if model_type == 'FM_LRPcos':
    #     model = ModelwithFMLRPcos(scale_cls=args.scale_cls, num_classes=args.num_classes)
    # elif model_type == 'FM_LRPattcos':
    #     model = ModelwithFMLRPAttendedcos(scale_cls=args.scale_cls, num_classes=args.num_classes)
    # else:
    # model = ModelwithLRP(scale_cls=args.scale_cls, num_classes=args.num_classes)
    model = Model(scale_cls=args.scale_cls, num_classes=args.num_classes)
    # load the model
    checkpoint = torch.load(args.resume)
    model.load_state_dict(checkpoint['state_dict'])
    # print(checkpoint['state_dict'])
    print("Loaded checkpoint from '{}'".format(args.resume))

    if use_gpu:
        model = model.cuda()

    test(model, testloader, use_gpu)
Exemplo n.º 4
0
def extract_image_encoder_features():
    parser = argparse.ArgumentParser(
        description='Test image model with 5-way classification')
    # Datasets
    parser.add_argument('-d', '--dataset', type=str, default='miniImagenet')
    parser.add_argument('--load', default=False)
    parser.add_argument('-j',
                        '--workers',
                        default=4,
                        type=int,
                        help="number of data loading workers (default: 4)")
    parser.add_argument('--height',
                        type=int,
                        default=84,
                        help="height of an image (default: 84)")
    parser.add_argument('--width',
                        type=int,
                        default=84,
                        help="width of an image (default: 84)")
    # Optimization options
    parser.add_argument('--train-batch',
                        default=4,
                        type=int,
                        help="train batch size")
    parser.add_argument('--test-batch',
                        default=1,
                        type=int,
                        help="test batch size")
    # Architecture
    parser.add_argument('--num_classes', type=int, default=64)
    parser.add_argument('--scale_cls', type=int, default=7)
    parser.add_argument(
        '--save-dir',
        type=str,
        default=
        '/home/sunjiamei/work/fewshotlearning/fewshot-CAN-master/result/miniImageNet/CAM/5-shot-seed1-resnet12-lrpscore-proto-test-224'
    )
    parser.add_argument(
        '--resume',
        type=str,
        default=
        '/home/sunjiamei/work/fewshotlearning/fewshot-CAN-master/result/miniImageNet/CAM/5-shot-seed1-resnet12-lrpscore-proto-test-224/best_model.pth.tar',
        metavar='PATH')
    # FewShot settting
    parser.add_argument('--nKnovel',
                        type=int,
                        default=5,
                        help='number of novel categories')
    parser.add_argument('--nExemplars',
                        type=int,
                        default=5,
                        help='number of training examples per novel category.')
    parser.add_argument(
        '--train_nTestNovel',
        type=int,
        default=6 * 5,
        help='number of test examples for all the novel category when training'
    )
    parser.add_argument('--train_epoch_size',
                        type=int,
                        default=1200,
                        help='number of episodes per epoch when training')
    parser.add_argument(
        '--nTestNovel',
        type=int,
        default=16 * 5,
        help='number of test examples for all the novel category')
    parser.add_argument('--epoch_size',
                        type=int,
                        default=2000,
                        help='number of batches per epoch')
    # Miscs
    parser.add_argument('--phase', default='test', type=str)
    parser.add_argument('--seed', type=int, default=1)
    parser.add_argument('--gpu-devices', default='0', type=str)
    args = parser.parse_args()
    testdataset = setdataset.Setdataset(args.dataset)
    testset = testdataset.test
    model = Model(scale_cls=args.scale_cls, num_classes=args.num_classes)
    # load the model
    model.cuda()
    checkpoint = torch.load(args.resume)
    model.load_state_dict(checkpoint['state_dict'])
    transform_test = T.Compose([
        T.Resize((int(args.height * 1.15), int(args.width * 1.15)),
                 interpolation=3),
        T.CenterCrop(args.height),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])
    feature_file = os.path.join(args.save_dir, 'features',
                                args.dataset + '.hdf5')
    if os.path.exists(feature_file):
        print('the feature file exists, we will return none')
    else:
        # extract features
        print(
            f'==extracting features of {args.dataset} with model {args.resume}'
        )
        save_features(model, testset, feature_file, transform_test)
        print('feature_saved to:', feature_file)
Exemplo n.º 5
0
    )

cudnn.benchmark = True
torch.cuda.manual_seed_all(1)
torch.manual_seed(1)

txt_save_path = os.path.join(outf, 'opt_resutls.txt')
F_txt = open(txt_save_path, 'a+')
print(opt)
print(opt, file=F_txt)
ngpu = int(opt.ngpu)
global best_prec1, epoch_index
best_prec1 = 0
epoch_index = 0

model = Model(opt.scale_class, opt.num_class).cuda()
criterion = CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(),
                             lr=opt.lr,
                             betas=(opt.beta1, 0.9))
# optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4, nesterov=True)

if opt.resume:
    if os.path.isfile(opt.resume):
        print("=> loading checkpoint '{}'".format(opt.resume))
        checkpoint = torch.load(opt.resume)
        epoch_index = checkpoint['epoch_index']
        best_prec1 = checkpoint['best_prec1']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        print("=> loaded checkpoint '{}' (epoch {})".format(
Exemplo n.º 6
0
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()

    model_name = '{}-{}shot-{}kernel-{}group'.format(args.dataset,
                                                     args.nExemplars,
                                                     args.kernel, args.groups)

    if args.suffix is not None:
        model_name = model_name + '-{}'.format(args.suffix)

    save_dir = os.path.join(args.save_dir, model_name)
    os.makedirs(save_dir, exist_ok=True)

    sys.stdout = Logger(osp.join(save_dir, 'log_train.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print('Initializing image data manager')
    dm = DataManager(args, use_gpu)
    trainloader, testloader = dm.return_dataloaders()

    model = Model(num_classes=args.num_classes,
                  groups=args.groups,
                  kernel=args.kernel)
    #model = nn.DataParallel(model)
    criterion = CrossEntropyLoss()
    optimizer = init_optimizer(args.optim, model.parameters(), args.lr,
                               args.weight_decay)

    if use_gpu:
        model = model.cuda()

    start_time = time.time()
    train_time = 0
    best_acc = -np.inf
    best_epoch = 0
    print("==> Start training")

    warmup_epoch = 5
    scheduler = warmup_scheduler(base_lr=args.lr,
                                 iter_per_epoch=len(trainloader),
                                 max_epoch=args.max_epoch + warmup_epoch,
                                 multi_step=[],
                                 warmup_epoch=warmup_epoch)

    for epoch in range(args.max_epoch + warmup_epoch):
        start_train_time = time.time()
        train(epoch, model, criterion, optimizer, trainloader, scheduler,
              use_gpu)
        train_time += round(time.time() - start_train_time)

        if epoch == 0 or epoch > (args.stepsize[0] -
                                  1) or (epoch + 1) % 10 == 0:
            acc = test(model, testloader, use_gpu)
            is_best = acc > best_acc

            if is_best:
                best_acc = acc
                best_epoch = epoch + 1

            if is_best or epoch > args.max_epoch:
                save_checkpoint(
                    {
                        'state_dict': model.state_dict(),
                        'acc': acc,
                        'epoch': epoch,
                    }, is_best,
                    osp.join(save_dir,
                             'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))

            print("==> Test 5-way Best accuracy {:.2%}, achieved at epoch {}".
                  format(best_acc, best_epoch))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print(
        "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".
        format(elapsed, train_time))
    print("==========\nArgs:{}\n==========".format(args))
Exemplo n.º 7
0
        label2inds[label].append(idx)
    return label2inds


file = '/media/ZT/FSL_data/MiniImagenet/miniImageNet_category_split_train_phase_train.pickle'
with open(file, 'rb') as fo:
    dataset = pickle.load(fo, encoding='iso-8859-1')
data = dataset['data']
labels = dataset['labels']
labels2inds = buildLabelIndex(labels)
labelIds = sorted(labels2inds.keys())  # (0~63)
transform = T.Compose([
    T.ToTensor(),
    T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
model = Model(args)
checkpoint = torch.load(
    osp.join(args.resume,
             str(args.nExemplars) + '-shot', 'best_model.pth.tar'))
model.load_state_dict(checkpoint['state_dict'])
model = model.cuda()
model.eval()
feas = []
with torch.no_grad():
    for i in labelIds:
        ids = labels2inds[i]
        imgs = []
        for j in ids:
            img = data[j]
            img = Image.fromarray(img)
            img = transform(img)