cuda = not opt.nocuda if cuda and not torch.cuda.is_available(): raise Exception("No GPU found, please run with --nocuda") device = torch.device("cuda" if cuda else "cpu") random.seed(opt.seed) np.random.seed(opt.seed) torch.manual_seed(opt.seed) if cuda: torch.cuda.manual_seed(opt.seed) print('===> Loading dataset(s)') if opt.mode.lower() == 'train': whole_train_set = dataset.get_whole_training_set() whole_training_data_loader = DataLoader(dataset=whole_train_set, num_workers=opt.threads, batch_size=opt.cacheBatchSize, shuffle=False, pin_memory=cuda) train_set = dataset.get_training_query_set(opt.margin) print('====> Training query set:', len(train_set)) whole_test_set = dataset.get_whole_val_set() print('===> Evaluating on val set, query count:', whole_test_set.dbStruct.numQ) elif opt.mode.lower() == 'test': if opt.split.lower() == 'test': whole_test_set = dataset.get_whole_test_set()
cuda = not opt.nocuda if cuda and not torch.cuda.is_available(): raise Exception("No GPU found, please run with --nocuda") device = torch.device("cuda" if cuda else "cpu") random.seed(opt.seed) np.random.seed(opt.seed) torch.manual_seed(opt.seed) if cuda: torch.cuda.manual_seed(opt.seed) print('===> Loading dataset(s)') if opt.mode.lower() == 'train': # a list of train sets whole_train_set = dataset.get_whole_training_set(opt.arch.lower()) whole_training_data_loader = [ DataLoader(dataset=set, num_workers=opt.threads, batch_size=opt.cacheBatchSize, shuffle=False, pin_memory=cuda) for set in whole_train_set ] # a list of train sets train_set_list = dataset.get_training_query_set( opt.arch.lower(), opt.margin) print('====> Training query set:', len(train_set_list[0])) whole_test_set = dataset.get_whole_val_set(opt.arch.lower()) print('===> Evaluating on val set, query count:',