Exemplo n.º 1
0
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()

 #   sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")
        
    print('Initializing image data manager')
    dm = DataManager(args, use_gpu)
    trainloader, testloader = dm.return_dataloaders()

    model = Model(num_classes=args.num_classes, groups=args.groups, kernel=args.kernel)
    # load the model
    checkpoint = torch.load(args.resume)
    model.load_state_dict(checkpoint['state_dict'])
    print("Loaded checkpoint from '{}'".format(args.resume))

    if use_gpu:
        model = model.cuda()

    test(model, testloader, use_gpu)
def main(test_dataset_name):
    args.dataset = test_dataset_name
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()

    sys.stdout = Logger(
        osp.join(
            args.save_dir,
            f'log_test_{args.dataset}_{args.nKnovel}way_{args.nExemplars}shot_transductive.txt'
        ))
    model = Model(scale_cls=args.scale_cls, num_classes=args.num_classes)
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print('Initializing image data manager')

    dm = DataManager(args, use_gpu)
    trainloader, testloader = dm.return_dataloaders()

    checkpoint = torch.load(args.resume)
    model.load_state_dict(checkpoint['state_dict'])
    # print(checkpoint['state_dict'])
    print("Loaded checkpoint from '{}'".format(args.resume))

    if use_gpu:
        model = model.cuda()

    test_trans(model, testloader, 35, use_gpu)
Exemplo n.º 3
0
def extract_image_encoder_features():
    parser = argparse.ArgumentParser(
        description='Test image model with 5-way classification')
    # Datasets
    parser.add_argument('-d', '--dataset', type=str, default='miniImagenet')
    parser.add_argument('--load', default=False)
    parser.add_argument('-j',
                        '--workers',
                        default=4,
                        type=int,
                        help="number of data loading workers (default: 4)")
    parser.add_argument('--height',
                        type=int,
                        default=84,
                        help="height of an image (default: 84)")
    parser.add_argument('--width',
                        type=int,
                        default=84,
                        help="width of an image (default: 84)")
    # Optimization options
    parser.add_argument('--train-batch',
                        default=4,
                        type=int,
                        help="train batch size")
    parser.add_argument('--test-batch',
                        default=1,
                        type=int,
                        help="test batch size")
    # Architecture
    parser.add_argument('--num_classes', type=int, default=64)
    parser.add_argument('--scale_cls', type=int, default=7)
    parser.add_argument(
        '--save-dir',
        type=str,
        default=
        '/home/sunjiamei/work/fewshotlearning/fewshot-CAN-master/result/miniImageNet/CAM/5-shot-seed1-resnet12-lrpscore-proto-test-224'
    )
    parser.add_argument(
        '--resume',
        type=str,
        default=
        '/home/sunjiamei/work/fewshotlearning/fewshot-CAN-master/result/miniImageNet/CAM/5-shot-seed1-resnet12-lrpscore-proto-test-224/best_model.pth.tar',
        metavar='PATH')
    # FewShot settting
    parser.add_argument('--nKnovel',
                        type=int,
                        default=5,
                        help='number of novel categories')
    parser.add_argument('--nExemplars',
                        type=int,
                        default=5,
                        help='number of training examples per novel category.')
    parser.add_argument(
        '--train_nTestNovel',
        type=int,
        default=6 * 5,
        help='number of test examples for all the novel category when training'
    )
    parser.add_argument('--train_epoch_size',
                        type=int,
                        default=1200,
                        help='number of episodes per epoch when training')
    parser.add_argument(
        '--nTestNovel',
        type=int,
        default=16 * 5,
        help='number of test examples for all the novel category')
    parser.add_argument('--epoch_size',
                        type=int,
                        default=2000,
                        help='number of batches per epoch')
    # Miscs
    parser.add_argument('--phase', default='test', type=str)
    parser.add_argument('--seed', type=int, default=1)
    parser.add_argument('--gpu-devices', default='0', type=str)
    args = parser.parse_args()
    testdataset = setdataset.Setdataset(args.dataset)
    testset = testdataset.test
    model = Model(scale_cls=args.scale_cls, num_classes=args.num_classes)
    # load the model
    model.cuda()
    checkpoint = torch.load(args.resume)
    model.load_state_dict(checkpoint['state_dict'])
    transform_test = T.Compose([
        T.Resize((int(args.height * 1.15), int(args.width * 1.15)),
                 interpolation=3),
        T.CenterCrop(args.height),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])
    feature_file = os.path.join(args.save_dir, 'features',
                                args.dataset + '.hdf5')
    if os.path.exists(feature_file):
        print('the feature file exists, we will return none')
    else:
        # extract features
        print(
            f'==extracting features of {args.dataset} with model {args.resume}'
        )
        save_features(model, testset, feature_file, transform_test)
        print('feature_saved to:', feature_file)
Exemplo n.º 4
0
epoch_index = 0

model = Model(opt.scale_class, opt.num_class).cuda()
criterion = CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(),
                             lr=opt.lr,
                             betas=(opt.beta1, 0.9))
# optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4, nesterov=True)

if opt.resume:
    if os.path.isfile(opt.resume):
        print("=> loading checkpoint '{}'".format(opt.resume))
        checkpoint = torch.load(opt.resume)
        epoch_index = checkpoint['epoch_index']
        best_prec1 = checkpoint['best_prec1']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        print("=> loaded checkpoint '{}' (epoch {})".format(
            opt.resume, checkpoint['epoch_index']))
        print("=> loaded checkpoint '{}' (epoch {})".format(
            opt.resume, checkpoint['epoch_index']),
              file=F_txt)
    else:
        print("=> no checkpoint found at '{}'".format(opt.resume))
        print("=> no checkpoint found at '{}'".format(opt.resume), file=F_txt)

if opt.ngpu > 1:
    model = nn.DataParallel(model, range(opt.ngpu))

# print the architecture of the network
print(model)
Exemplo n.º 5
0
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()

    sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")
        
    print('Initializing image data manager')
    dm = DataManager(args, use_gpu)
    trainloader, testloader = dm.return_dataloaders()

    model = Model(scale_cls=args.scale_cls, num_classes=args.num_classes)
    criterion = CrossEntropyLoss()
    optimizer = init_optimizer(args.optim, model.parameters(), args.lr, args.weight_decay)
    if args.resume:
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        print(f'loaded checkpoing from {args.resume}')
    if use_gpu:
        model = model.cuda()

    start_time = time.time()
    train_time = 0
    best_acc = -np.inf
    best_epoch = 0
    print("==> Start training")

    for epoch in range(args.max_epoch):
        learning_rate = adjust_learning_rate(optimizer, epoch, args.LUT_lr)
        # learning_rate = args.lr
        start_train_time = time.time()
        train(epoch, model, criterion, optimizer, trainloader, learning_rate, use_gpu)
        train_time += round(time.time() - start_train_time)
        
        if epoch == 0 or epoch > (args.stepsize[0]-1) or (epoch + 1) % 20 == 0:
            acc = test(model, testloader, use_gpu)
            is_best = acc > best_acc
            
            if is_best:
                best_acc = acc
                best_epoch = epoch + 1
            
            save_checkpoint({
                'state_dict': model.state_dict(),
                'acc': acc,
                'epoch': epoch,
            }, is_best, osp.join(args.save_dir, 'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))

            print("==> Test 5-way Best accuracy {:.2%}, achieved at epoch {}".format(best_acc, best_epoch))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print("Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".format(elapsed, train_time))
    print("==========\nArgs:{}\n==========".format(args))