def testmodel(mymodel, args, cuda_gpu): mytraindata = myDataset(path=args.json_file, height=args.height, width=args.width, autoaugment=args.autoaugment) mytrainloader = torch.utils.data.DataLoader(mytraindata, batch_size=1, shuffle=False) gnd = loadquery(args.valdata_dir) mymodel.eval() with torch.no_grad(): print('>> Extracting descriptors for query images...') qloader = torch.utils.data.DataLoader(ImagesFromList( root='', images=[i['queryimgid'] for i in gnd], imsize=mytraindata.height, transform=mytraindata.transform), batch_size=1, shuffle=False, num_workers=0, pin_memory=True) # qoolvecs = torch.zeros(args.classnum, len(gnd)).cuda() qoolvecs = torch.zeros(OUTPUT_DIM[args.backbone], len(gnd)).cuda() lenq = len(qloader) for i, input in enumerate(qloader): out = mymodel(input.cuda()) qoolvecs[:, i] = out.data.squeeze() if (i + 1) % 10 == 0: print('\r>>>> {}/{} done...'.format(i + 1, lenq), end='') print('') poolvecs = torch.zeros(OUTPUT_DIM[args.backbone], len(mytrainloader)).cuda() idlist = [] print('>> Extracting descriptors for database images...') for index, data in enumerate(mytrainloader): batch_x, batch_y, batch_id = data idlist.append(batch_id[0]) if cuda_gpu: batch_x = batch_x.cuda() batch_x = batch_x.float() # batch_x, batch_y = Variable(batch_x), Variable(batch_y) out = mymodel(batch_x) poolvecs[:, index] = out if (index + 1) % 10 == 0: print('\r>>>> {}/{} done...'.format(index + 1, len(mytrainloader)), end='') vecs = poolvecs.cpu().numpy() qvecs = qoolvecs.cpu().numpy() # search, rank, and print scores = np.dot(vecs.T, qvecs) ranks = np.argsort(-scores, axis=0) dataset = args.json_file.split('/')[-1].replace("all.json", "") compute_map_and_print(dataset, ranks, gnd, idlist)
def testmultibranch(args,cuda_gpu): args.train_dir='/mnt/sdb/shibaorong/logs/paris/triplet/usmine/withclass_cluster11/parameter_61.pkl' mymodel = builGraph.getModel(args.backbone, args.classnum, args.gpu, 'extractor', cuda_gpu=cuda_gpu, pretrained=False) if os.path.exists(args.train_dir): checkpoint = torch.load(args.train_dir) mymodel.load_state_dict(checkpoint['model_state_dict']) mytraindata = myDataset(path=args.json_file, height=args.height, width=args.width, autoaugment=args.autoaugment) mytrainloader = torch.utils.data.DataLoader(mytraindata, batch_size=1, shuffle=False) gnd = loadquery(args.valdata_dir) mymodel.eval() with torch.no_grad(): poolvecs = torch.zeros(OUTPUT_DIM[args.backbone], len(mytrainloader)).cuda() idlist = [] print('>> Extracting descriptors for database images...') for index, data in enumerate(mytrainloader): batch_x, batch_y, batch_id = data idlist.append(batch_id[0]) if cuda_gpu: batch_x = batch_x.cuda() batch_x = batch_x.float() # batch_x, batch_y = Variable(batch_x), Variable(batch_y) out = mymodel(batch_x) #out=torch.cat((out1,out2),-1) poolvecs[:, index] = out if (index + 1) % 10 == 0: print('\r>>>> {}/{} done...'.format(index + 1, len(mytrainloader)), end='') qindexs=np.arange(len(mytrainloader))[np.in1d(idlist,[i['queryimgid'] for i in gnd])] newgnd=[idlist[i] for i in qindexs] g=[[i['queryimgid'] for i in gnd].index(j) for j in newgnd] gnd=[gnd[i] for i in g] vecs = poolvecs.cpu().numpy() qvecs = vecs[:,qindexs] # search, rank, and print scores = np.dot(vecs.T, qvecs) ranks = np.argsort(-scores, axis=0) dataset = args.json_file.split('/')[-1].replace("all.json", "") compute_map_and_print(dataset, ranks, gnd, idlist)
def testTriplet(params, transform): mytraindata = OnlineTripletData(path=params['data_dir'], autoaugment=params['autoaugment'], outputdim=params['class_num'], imsize=params['height'], transform=transform) cuda_gpu = torch.cuda.is_available() miningmodel = builGraph.getModel(params['modelName'], params['class_num'], params['Gpu'], 'triplet', cuda_gpu=cuda_gpu) gnd = loadquery(params['valdata_dir']) if os.path.exists(params['train_dir']): checkpoint = torch.load(params['train_dir']) miningmodel.load_state_dict(checkpoint['model_state_dict']) miningmodel.eval() with torch.no_grad(): print('>> Extracting descriptors for query images...') qloader = torch.utils.data.DataLoader(ImagesFromList( root='', images=[i['queryimgid'] for i in gnd], imsize=mytraindata.imsize, transform=mytraindata.transform), batch_size=1, shuffle=False, num_workers=0, pin_memory=True) qoolvecs = torch.zeros(params['class_num'], len(gnd)).cuda() for i, input in enumerate(qloader): out, _ = miningmodel(input.cuda()) qoolvecs[:, i] = out.data.squeeze() if (i + 1) % mytraindata.print_freq == 0 or ( i + 1) == mytraindata.qsize: print('\r>>>> {}/{} done...'.format(i + 1, mytraindata.qsize), end='') print('') print('>> Extracting descriptors for data images...') dloader = torch.utils.data.DataLoader(ImagesFromList( root='', images=[i['filenames'] for i in mytraindata.data], imsize=mytraindata.imsize, transform=mytraindata.transform), batch_size=1, shuffle=False, num_workers=0, pin_memory=True) poolvecs = torch.zeros(params['class_num'], len(mytraindata.data)).cuda() idlist = [i['filenames'] for i in mytraindata.data] for i, input in enumerate(dloader): out, _ = miningmodel(input.cuda()) poolvecs[:, i] = out.data.squeeze() if (i + 1) % mytraindata.print_freq == 0 or ( i + 1) == mytraindata.qsize: print('\r>>>> {}/{} done...'.format(i + 1, mytraindata.qsize), end='') print('') vecs = poolvecs.cpu().numpy() qvecs = qoolvecs.cpu().numpy() # search, rank, and print scores = np.dot(vecs.T, qvecs) ranks = np.argsort(-scores, axis=0) dataset = params['data_dir'].split('/')[-1].replace("train.json", "") compute_map_and_print(dataset, ranks, gnd, idlist)
def testOnlinepair(params, transform): mytraindata = myDataset(path=params['data_dir'], height=params['height'], width=params['width'], autoaugment=params['autoaugment'], transform=transform) mytrainloader = torch.utils.data.DataLoader(mytraindata, batch_size=1, shuffle=False) gnd = loadquery(params['valdata_dir']) cuda_gpu = torch.cuda.is_available() '''mymodel = builGraph.getModel(params['modelName'], params['class_num'], params['Gpu'], params['model_type'],cuda_gpu=cuda_gpu)''' mymodel = builGraph.getModel(params['modelName'], params['class_num'], params['Gpu'], 'triplet', cuda_gpu=cuda_gpu) if os.path.exists(params['train_dir']): checkpoint = torch.load(params['train_dir']) mymodel.load_state_dict(checkpoint['model_state_dict']) mymodel.eval() with torch.no_grad(): print('>> Extracting descriptors for query images...') qloader = torch.utils.data.DataLoader(ImagesFromList( root='', images=[i['queryimgid'] for i in gnd], imsize=mytraindata.height, transform=mytraindata.transform), batch_size=1, shuffle=False, num_workers=0, pin_memory=True) qoolvecs = torch.zeros(params['class_num'], len(gnd)).cuda() lenq = len(qloader) for i, input in enumerate(qloader): out, _, _ = mymodel(input.cuda(), input.cuda(), input.cuda()) qoolvecs[:, i] = out[0].data.squeeze() if (i + 1) % 10 == 0: print('\r>>>> {}/{} done...'.format(i + 1, lenq), end='') print('') poolvecs = torch.zeros(params['class_num'], len(mytrainloader)).cuda() idlist = [] for index, data in enumerate(mytrainloader): batch_x, batch_y, batch_id = data idlist.append(batch_id[0]) if cuda_gpu: batch_x = batch_x.cuda() batch_y = batch_y.cuda() batch_x = batch_x.float() # batch_x, batch_y = Variable(batch_x), Variable(batch_y) out, _, _ = mymodel(batch_x, batch_x, batch_x) poolvecs[:, index] = out[0] vecs = poolvecs.cpu().numpy() qvecs = qoolvecs.cpu().numpy() # search, rank, and print scores = np.dot(vecs.T, qvecs) ranks = np.argsort(-scores, axis=0) dataset = params['data_dir'].split('/')[-1].replace("train.json", "") compute_map_and_print(dataset, ranks, gnd, idlist) '''relu_ip1_list = []
def testOnlinepair(args,cuda_gpu,type='extractor',similartype='dot'): mymodel = builGraph.getModel(args.backbone, args.classnum, args.gpu, type, cuda_gpu=cuda_gpu, pretrained=False) if os.path.exists(args.train_dir): print(args.train_dir) checkpoint = torch.load(args.train_dir,map_location='cpu') mymodel.load_state_dict(checkpoint['model_state_dict']) for index,jfile in enumerate(args.json_file): dataset = jfile.split('/')[-1].replace("all.json", "") mytraindata = myDataset(path=jfile, height=args.height, width=args.width, autoaugment=args.autoaugment) mytrainloader = torch.utils.data.DataLoader(mytraindata, batch_size=1, shuffle=False) gnd = loadquery(args.valdata_dir[index]) mymodel.eval() with torch.no_grad(): poolvecs = torch.zeros(OUTPUT_DIM[args.backbone], len(mytrainloader)).cuda() idlist = [] print('>> Extracting descriptors for {} images...'.format(dataset)) for index, data in enumerate(mytrainloader): batch_x, batch_y, batch_id = data idlist.append(batch_id[0]) if cuda_gpu: batch_x = batch_x.cuda() batch_x = batch_x.float() # batch_x, batch_y = Variable(batch_x), Variable(batch_y) out = mymodel(batch_x) poolvecs[:, index] = out if (index + 1) % 10 == 0: print('\r>>>> {}/{} done...'.format(index + 1, len(mytrainloader)), end='') qindexs = np.arange(len(mytrainloader))[np.in1d(idlist, [i['queryimgid'] for i in gnd])] newgnd = [idlist[i] for i in qindexs] g = [[i['queryimgid'] for i in gnd].index(j) for j in newgnd] gnd = [gnd[i] for i in g] vecs = poolvecs.cpu().numpy() '''pca = PCA(whiten=True,n_components=1000,random_state=732) vecst=pca.fit_transform(np.transpose(vecs)) vecst=l2n(totensor(vecst)) vecs=np.transpose(tonumpy(vecst))''' qvecs = vecs[:, qindexs] # search, rank, and print if similartype=='dot': scores = np.dot(vecs.T, qvecs) ranks = np.argsort(-scores, axis=0) elif similartype=='euclidean': dis=np.zeros([vecs.shape[1],qvecs.shape[1]]) for j in range(qvecs.shape[1]): d = (vecs - np.reshape(qvecs[:, j], (qvecs.shape[0], 1))) ** 2 disj = np.sum(d, axis=0) dis[:, j] = disj ranks=np.argsort(dis,axis=0) compute_map_and_print(dataset, ranks, gnd, idlist)