parser.add_argument( '--whitenm', type=float, default=1.0, help='whitening multiplier, default is 1.0 (i.e. no multiplication)') args = parser.parse_args() args.iscuda = common.torch_set_gpu(args.gpu) if args.aqe is not None: args.aqe = {'k': args.aqe[0], 'alpha': args.aqe[1]} if args.adba is not None: args.adba = {'k': args.adba[0], 'alpha': args.adba[1]} dl.download_dataset(args.dataset) dataset = datasets.create(args.dataset) print("Test dataset:", dataset) net = load_model(args.checkpoint, args.iscuda) if args.whiten: net.pca = net.pca[args.whiten] args.whiten = { 'whitenp': args.whitenp, 'whitenv': args.whitenv, 'whitenm': args.whitenm } else: net.pca = None args.whiten = None
parser.add_argument('--batch_size', type=int, default=320, help='size of batch imags') parser.add_argument('--epochs', type=int, default=300, help='train epochs') parser.add_argument('--saved', type=str, default='./experiments/ICIAR+no_pretrained/', help='train epochs') args = parser.parse_args() gpu_ids = args.gpu[0].split(',') gpu_ids = [int(i) for i in gpu_ids] args.iscuda = common.torch_set_gpu(gpu_ids) if args.aqe is not None: args.aqe = {'k': args.aqe[0], 'alpha': args.aqe[1]} if args.adba is not None: args.adba = {'k': args.adba[0], 'alpha': args.adba[1]} #load dataset dataset = datasets.create(args.dataset) test_dataset = datasets.create(args.test_dataset) #load model print("With %s Train Model:" %(args.dataset)) net, start_epoch = load_model(args.checkpoint, args.iscuda) net.cuda() device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") net = torch.nn.DataParallel(net,device_ids = [0]) net.to(device) #define optimizer parameter net.pca = net.module.pca net.preprocess = net.module.preprocess net.iscuda = net.module.iscuda criterion = APLoss()
help='number of thread workders') parser.add_argument('--gpu', type=int, default=0, nargs='+', help='GPU ids') parser.add_argument('--cache', type=str, required=True, help='path to cache files') args = parser.parse_args() assert args.buffer_size % args.batch_size == 0 args.iscuda = common.torch_set_gpu(args.gpu) train_set = datasets.create('Landmarks_clean') val_set = datasets.create('RParis6K') model_options = { 'arch': args.arch, 'out_dim': args.out_dim, 'pooling': args.pooling, 'gemp': args.gemp } start_epoch = 0 if os.path.isfile(args.resume): checkpoint = common.load_checkpoint(args.resume, args.iscuda) net = nets.create_model(pretrained='', **model_options) net = common.switch_model_to_cuda(net, args.iscuda, checkpoint) net.load_state_dict(checkpoint['state_dict'])
parser.add_argument('--aqe', type=int, nargs='+', help='alpha-query expansion paramenters') parser.add_argument('--adba', type=int, nargs='+', help='alpha-database augmentation paramenters') parser.add_argument('--whitenp', type=float, default=0.25, help='whitening power, default is 0.5 (i.e., the sqrt)') parser.add_argument('--whitenv', type=int, default=None, help='number of components, default is None (i.e. all components)') parser.add_argument('--whitenm', type=float, default=1.0, help='whitening multiplier, default is 1.0 (i.e. no multiplication)') args = parser.parse_args() args.iscuda = common.torch_set_gpu(args.gpu) if args.aqe is not None: args.aqe = {'k': args.aqe[0], 'alpha': args.aqe[1]} if args.adba is not None: args.adba = {'k': args.adba[0], 'alpha': args.adba[1]} dl.download_dataset(args.dataset) dataset = datasets.create(args.dataset) print("Test dataset:", dataset) net = load_model(args.checkpoint, args.iscuda) if args.center_bias: assert hasattr(net,'center_bias') net.center_bias = args.center_bias if hasattr(net, 'module') and hasattr(net.module,'center_bias'): net.module.center_bias = args.center_bias if args.whiten and not hasattr(net, 'pca'): # Learn PCA if necessary if os.path.exists(args.whiten): with open(args.whiten, 'rb') as f: net.pca = pkl.load(f)