def getQfeature(imgpath, modelconfig, trainconfig): img = Image.open(imgpath).convert('RGB') img = img.resize((modelconfig.width, modelconfig.height)) img = np.array(img) transform = transforms.Compose([ transforms.ToTensor( ), # 函数接受PIL Image或numpy.ndarray,将其先由HWC转置为CHW格式,再转为float后每个像素除以255. transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) img = transform(img) img = torch.unsqueeze(img, dim=0) cuda_gpu = torch.cuda.is_available() mymodel = builGraph.getModel(modelconfig.backbone, modelconfig.num_classes, trainconfig.gpus, modelconfig.modeltype, cuda_gpu=cuda_gpu) if os.path.exists(trainconfig.train_dir): checkpoint = torch.load(trainconfig.train_dir) mymodel.load_state_dict(checkpoint['model_state_dict']) mymodel.eval() with torch.no_grad(): if cuda_gpu: batch_x = img.cuda() batch_x = batch_x.float() out, features = mymodel(batch_x) reluip = features['ip1'].cpu().numpy() logits = out.cpu().numpy() binarvalues = toBinaryString(logits) return binarvalues, reluip
def buildfeaturelib(train_config, model_config): transform = transforms.Compose( [transforms.ToTensor(), # 函数接受PIL Image或numpy.ndarray,将其先由HWC转置为CHW格式,再转为float后每个像素除以255. transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) mytraindata = myDataset(path=train_config.data_dir, height=model_config.height, width=model_config.width, autoaugment=model_config.autoaugment, transform=transform) mytrainloader = torch.utils.data.DataLoader(mytraindata, batch_size=1, shuffle=False) cuda_gpu = torch.cuda.is_available() mymodel = builGraph.getModel(model_config.backbone, model_config.num_classes, train_config.gpus, model_config.modeltype, cuda_gpu=cuda_gpu) if os.path.exists(train_config.train_dir): checkpoint = torch.load(train_config.train_dir) mymodel.load_state_dict(checkpoint['model_state_dict']) relu_ip1_list = [] logits_list = [] id_list = [] label_list = [] batch_idx=len(mytrainloader)//1 #,batch_x,batch_y,batch_id mymodel.eval() with torch.no_grad(): for index,data in enumerate(mytrainloader): batch_x,batch_y,batch_id=data if cuda_gpu: batch_x = batch_x.cuda() batch_y = batch_y.cuda() batch_x = batch_x.float() #batch_x, batch_y = Variable(batch_x), Variable(batch_y) out,features = mymodel(batch_x) reluip=features['ip1'].cpu().numpy() logits=out.cpu().numpy() batch_y=batch_y.cpu().numpy() binarvalues=toBinaryString(logits) for binary in binarvalues: logits_list.append(binary) for unit in reluip: relu_ip1_list.append(unit) for id in batch_id: id_list.append(id) for label in batch_y: label_list.append(str(label)) if index%10==0: tool.save(id_list,logits_list,relu_ip1_list,label_list,tofile) print('Step %d, %.3f%% extracted.' % (index, (index + 1)/ batch_idx * 100))
def trainLandmark(params, transform): mytraindata = heatmapDataset(path=params['data_dir'], height=params['height'], width=params['width'], autoaugment=params['autoaugment'], transform=transform) mytrainloader = torch.utils.data.DataLoader( mytraindata, batch_size=params['BATCH_SIZE'], shuffle=True) cuda_gpu = torch.cuda.is_available() mymodel = builGraph.getModel(params['modelName'], params['class_num'], params['Gpu'], params['model_type'], cuda_gpu=cuda_gpu) optimizer = torch.optim.Adam(mymodel.parameters()) startepoch = 0 if os.path.exists(params['train_dir']): checkpoint = torch.load(params['train_dir']) mymodel.load_state_dict(checkpoint['model_state_dict']) optimizer.load_state_dict(checkpoint['optimizer_state_dict']) optimizer.load_state_dict(checkpoint['optimizer_state_dict']) startepoch = checkpoint['epoch'] lendata = len(mytrainloader) for epoch in range(startepoch, params['maxepoch']): print('epoch {}'.format(epoch + 1)) train_loss = 0. for i, sample in enumerate(mytrainloader): iter_start_time = time.time() for key in sample: if isinstance(sample[key], list): continue sample[key] = sample[key].cuda().float() out = mymodel(sample) loss = buildLoss.getGLEMloss(sample, out, params['class_num']) train_loss += loss.item() optimizer.zero_grad() loss.backward() optimizer.step() t = time.time() - iter_start_time if (i + 1) % 100 == 0: print('Epoch [{}/{}], Step [{}], Loss: {:.4f}, Time:{:.3f}'. format(epoch + 1, params['maxepoch'], i + 1, loss.item(), t)) pklword = params['train_dir'].split('/')[-1] newpkl = 'parameter_%02d.pkl' % (epoch + 1) path = params['train_dir'].replace(pklword, newpkl) torch.save( { 'epoch': epoch, 'model_state_dict': mymodel.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'loss': loss }, path) train_loss = train_loss / (lendata) print('epoch Train Loss: {:.6f}'.format(train_loss))
def main(): config.load_cfg_fom_args("Train a cls model.") cfg.freeze() global args global min_loss global step args = parser.parse_args() cuda_gpu = torch.cuda.is_available() mytraindata = generalclsDataset(args.data_dir, cfg) mytrainloader = DataLoaderX(mytraindata, batch_size=args.batch_size, shuffle=True, num_workers=0) mymodel = builGraph.getModel(args.backbone, args.classnum, args.gpu, 'retrieval', cuda_gpu=cuda_gpu, pretrained=True) #mymodel=models.resnet50(pretrained=True).cuda() if args.optimizer == 'gd': optimizer = torch.optim.SGD(mymodel.parameters(), lr=args.LR) else: optimizer = torch.optim.Adam(mymodel.parameters(), lr=args.LR) Arcloss = torch.nn.DataParallel(ArcMarginLoss( args.classnum, in_features=OUTPUT_DIM[args.backbone]), device_ids=args.gpu).cuda() thisloss = nn.CrossEntropyLoss() startepoch = 0 if os.path.exists(args.train_dir): print(args.train_dir, flush=True) checkpoint = torch.load(args.train_dir, map_location='cpu') print(mymodel.named_parameters(), flush=True) mymodel.load_state_dict(checkpoint['model_state_dict']) Arcloss.load_state_dict(checkpoint['arcface_state_dict']) print(mymodel.named_parameters(), flush=True) optimizer.load_state_dict(checkpoint['optimizer_state_dict']) startepoch = checkpoint['epoch'] + 1 min_loss = checkpoint['loss'] if 'step' in checkpoint: step = checkpoint['step'] for epoch in range(startepoch, args.maxepoch): _learning_rate_schedule(optimizer, epoch, args.maxepoch, args.LR) trainclassification(mymodel, epoch, optimizer, thisloss, mytrainloader, Arcloss) #_learning_rate_schedule(optimizer,epoch,args.maxepoch,args.LR) #test(mymodel,mytrainloader) writer.close()
def testLandmark(params, transform): mytraindata = heatmapDataset(path=params['data_dir'], height=params['height'], width=params['width'], autoaugment=params['autoaugment'], transform=transform) mytrainloader = torch.utils.data.DataLoader(mytraindata, batch_size=1, shuffle=False) cuda_gpu = torch.cuda.is_available() mymodel = builGraph.getModel(params['modelName'], params['class_num'], params['Gpu'], params['model_type'], cuda_gpu=cuda_gpu) if os.path.exists(params['train_dir']): checkpoint = torch.load(params['train_dir']) mymodel.load_state_dict(checkpoint['model_state_dict']) mymodel.eval() test_step = len(mytrainloader) print('Evaluating.....') with torch.no_grad(): evaluator = Evaluator() for i, sample in enumerate(mytrainloader): for key in sample: if isinstance(sample[key], list): continue sample[key] = sample[key].cuda().float() out = mymodel(sample) evaluator.add(out, sample) predict_pos, true_pos, landmark_vis = evaluator.getpredictpos() writeTotruejsons(sample, true_pos, landmark_vis) resultsList.append( writeToResultJsons(sample, predict_pos, landmark_vis)) print('Val Step [{}/{}]'.format(i + 1, test_step)) if i > 500: break #results=evaluator.evaluate() json_name = 'short_sleeve_top_true.json' with open(json_name, 'w') as f: json.dump(dataset_true, f) json_name = 'short_sleeve_top_test.json' with open(json_name, 'w') as f: json.dump(resultsList, f) '''print(
def main(): global args global min_loss args = parser.parse_args() cuda_gpu = torch.cuda.is_available() mytraindata = myDataset(path=args.data_dir, autoaugment=args.autoaugment) mytrainloader = torch.utils.data.DataLoader(mytraindata, batch_size=args.batch_size, shuffle=True) mymodel = builGraph.getModel(args.backbone, args.classnum, args.gpu, 'glem', cuda_gpu=cuda_gpu, pretrained=True) for para in mymodel.module._baselayer.parameters(): para.requires_grad = False if args.optimizer == 'gd': optimizer = torch.optim.SGD(mymodel.parameters(), lr=args.LR) else: #optimizer = torch.optim.Adam(mymodel.parameters()) optimizer = torch.optim.Adam( filter(lambda p: p.requires_grad, mymodel.parameters())) thisloss = torch.nn.CrossEntropyLoss() startepoch = 0 scheduler = lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.8) if os.path.exists(args.train_dir): checkpoint = torch.load(args.train_dir) mymodel.load_state_dict(checkpoint['model_state_dict']) optimizer.load_state_dict(checkpoint['optimizer_state_dict']) startepoch = checkpoint['epoch'] + 1 min_loss = checkpoint['loss'] if 'scheduler' in checkpoint: scheduler = checkpoint['scheduler'] for epoch in range(startepoch, args.maxepoch): scheduler.step() print('epoch eval {}'.format(epoch)) testargs = partest.parse_args() pklword = args.train_dir.split('/')[-1] newpkl = 'parameter_%02d.pkl' % (epoch) path = args.train_dir.replace(pklword, newpkl) print(path) testargs.train_dir = path if epoch != startepoch: testOnlinepair(testargs, cuda_gpu, type='glemextractor') trainGLEM(mymodel, epoch, cuda_gpu, optimizer, mytrainloader, thisloss, scheduler)
def testmultibranch(args,cuda_gpu): args.train_dir='/mnt/sdb/shibaorong/logs/paris/triplet/usmine/withclass_cluster11/parameter_61.pkl' mymodel = builGraph.getModel(args.backbone, args.classnum, args.gpu, 'extractor', cuda_gpu=cuda_gpu, pretrained=False) if os.path.exists(args.train_dir): checkpoint = torch.load(args.train_dir) mymodel.load_state_dict(checkpoint['model_state_dict']) mytraindata = myDataset(path=args.json_file, height=args.height, width=args.width, autoaugment=args.autoaugment) mytrainloader = torch.utils.data.DataLoader(mytraindata, batch_size=1, shuffle=False) gnd = loadquery(args.valdata_dir) mymodel.eval() with torch.no_grad(): poolvecs = torch.zeros(OUTPUT_DIM[args.backbone], len(mytrainloader)).cuda() idlist = [] print('>> Extracting descriptors for database images...') for index, data in enumerate(mytrainloader): batch_x, batch_y, batch_id = data idlist.append(batch_id[0]) if cuda_gpu: batch_x = batch_x.cuda() batch_x = batch_x.float() # batch_x, batch_y = Variable(batch_x), Variable(batch_y) out = mymodel(batch_x) #out=torch.cat((out1,out2),-1) poolvecs[:, index] = out if (index + 1) % 10 == 0: print('\r>>>> {}/{} done...'.format(index + 1, len(mytrainloader)), end='') qindexs=np.arange(len(mytrainloader))[np.in1d(idlist,[i['queryimgid'] for i in gnd])] newgnd=[idlist[i] for i in qindexs] g=[[i['queryimgid'] for i in gnd].index(j) for j in newgnd] gnd=[gnd[i] for i in g] vecs = poolvecs.cpu().numpy() qvecs = vecs[:,qindexs] # search, rank, and print scores = np.dot(vecs.T, qvecs) ranks = np.argsort(-scores, axis=0) dataset = args.json_file.split('/')[-1].replace("all.json", "") compute_map_and_print(dataset, ranks, gnd, idlist)
def main(): global args args = parser.parse_args() cuda_gpu = torch.cuda.is_available() mymodel = builGraph.getModel(args.backbone, args.classnum, args.gpu, 'triplet', cuda_gpu=cuda_gpu) if os.path.exists(args.train_dir): checkpoint = torch.load(args.train_dir) mymodel.load_state_dict(checkpoint['model_state_dict']) testtriplet(args, mymodel, cuda_gpu)
def main(): global args global min_loss args=parser.parse_args() cuda_gpu = torch.cuda.is_available() mymodel = builGraph.getModel(args.backbone, args.classnum, args.gpu, 'multitrain', cuda_gpu=cuda_gpu) if args.optimizer == 'sgd': optimizer = torch.optim.SGD(mymodel.parameters(), lr=args.LR) else: optimizer = torch.optim.Adam(mymodel.parameters()) scheduler = lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.8) startepoch = 0 if os.path.exists(args.finetune_dir): checkpoint = torch.load(args.finetune_dir,map_location='cpu') mymodel.load_state_dict(checkpoint['model_state_dict']) optimizer.load_state_dict(checkpoint['optimizer_state_dict']) startepoch = checkpoint['epoch']+1 min_loss = checkpoint['loss'] if 'scheduler' in checkpoint: scheduler = checkpoint['scheduler'] for epoch in range(startepoch, args.maxepoch): if (epoch+1)%5==0: print('epoch eval {}'.format(epoch)) testargs = partest.parse_args() pklword = args.train_dir.split('/')[-1] newpkl = 'parameter_%02d.pkl' % (epoch) path = args.train_dir.replace(pklword, newpkl) print(path) testargs.train_dir = path testargs.json_file=args.jsonfile testargs.valdata_dir=args.valdata_dir testargs.gpu=args.gpu testargs.classnum=args.classnum testmodel(testargs, cuda_gpu,type='base') mytraindata = miningTripletData(args,transform=eyetransfrom(args.height)) traintriandcls(mymodel,epoch,cuda_gpu,optimizer,mytraindata,scheduler) scheduler.step()
def main(): global args global min_loss global step args = parser.parse_args() cuda_gpu = torch.cuda.is_available() mymodel = builGraph.getModel(args.backbone, args.classnum, args.gpu, 'retrieval', cuda_gpu=cuda_gpu) if args.optimizer == 'gd': optimizer = torch.optim.SGD(mymodel.parameters(), lr=args.LR) else: optimizer = torch.optim.Adam(mymodel.parameters()) scheduler = lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.8) startepoch = 0 if os.path.exists(args.train_dir): print(args.train_dir, flush=True) checkpoint = torch.load(args.train_dir, map_location='cpu') print(mymodel.named_parameters(), flush=True) mymodel.load_state_dict(checkpoint['model_state_dict']) print(mymodel.named_parameters(), flush=True) optimizer.load_state_dict(checkpoint['optimizer_state_dict']) startepoch = checkpoint['epoch'] + 1 min_loss = checkpoint['loss'] if 'step' in checkpoint: step = checkpoint['step'] mytraindata = CartoonDataset(args.data_dir) for epoch in range(startepoch, args.maxepoch): _learning_rate_schedule(optimizer, epoch, args.maxepoch, args.LR) mytraindata.create_epoch() mytrainloader = torch.utils.data.DataLoader(mytraindata, batch_size=args.batch_size, shuffle=True) trainSiamese(mymodel, epoch, cuda_gpu, optimizer, mytrainloader, scheduler)
def main(): global args global min_loss args = parser.parse_args() cuda_gpu = torch.cuda.is_available() mytraindata = GoclassDataset(path=args.data_dir, autoaugment=args.autoaugment) mytrainloader = DataLoaderX(mytraindata, batch_size=args.batch_size, shuffle=True, num_workers=15) mymodel = builGraph.getModel(args.backbone, args.classnum, args.gpu, 'base', cuda_gpu=cuda_gpu, pretrained=True) if args.optimizer == 'gd': optimizer = torch.optim.SGD(mymodel.parameters(), lr=args.LR) else: optimizer = torch.optim.Adam(mymodel.parameters()) thisloss = DataParallelCriterion(torch.nn.CrossEntropyLoss()) startepoch = 0 scheduler = lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.8) if os.path.exists(args.train_dir): checkpoint = torch.load(args.train_dir, map_location='cpu') mymodel.load_state_dict(checkpoint['model_state_dict']) optimizer.load_state_dict(checkpoint['optimizer_state_dict']) startepoch = checkpoint['epoch'] + 1 min_loss = checkpoint['loss'] if 'scheduler' in checkpoint: scheduler = checkpoint['scheduler'] for epoch in range(startepoch, args.maxepoch): trainclassification(mymodel, epoch, optimizer, thisloss, mytrainloader, scheduler) scheduler.step()
def testLandmark(params, transform): mytraindata = heatmapDataset(path=params['valdata_dir'], height=params['height'], width=params['width'], autoaugment=params['autoaugment'], transform=transform) mytrainloader = torch.utils.data.DataLoader(mytraindata, batch_size=1, shuffle=False) cuda_gpu = torch.cuda.is_available() mymodel = builGraph.getModel(params['modelName'], params['class_num'], params['Gpu'], params['model_type'], cuda_gpu=cuda_gpu) item = mytraindata.items[0] url = item['url'] if os.path.exists(params['train_dir']): checkpoint = torch.load(params['train_dir']) mymodel.load_state_dict(checkpoint['model_state_dict']) mymodel.eval() test_step = len(mytrainloader) print('Evaluating.....') with torch.no_grad(): evaluator = Evaluator() for i, sample in enumerate(mytrainloader): for key in sample: if isinstance(sample[key], list): continue sample[key] = sample[key].cuda().float() url = sample['url'] landmarkpos = sample['orglandmarkpos'] transfrompos = sample['landmark_pos'] img = sample['image'][0].cpu().numpy() img = (img / 2 + 0.5) * 255 img = img.astype(np.int) img = img.transpose((1, 2, 0)) out = mymodel(sample) score_map = out['lm_pos_map'] count = score_map.size(0) [x1s, y1s, x2s, y2s] = sample['bounding_box'] for index in range(count): urli = url[index] while True: try: resp = requests.get(urli, stream=True).raw origimg = np.asarray(bytearray(resp.read()), dtype="uint8") origimg = cv2.imdecode(origimg, cv2.IMREAD_COLOR) origimg = cv2.cvtColor(origimg, cv2.COLOR_BGR2RGB) except Exception as e: print(e) continue break s = score_map[index, :, :, :] x1 = int(x1s[index].cpu().numpy()) y1 = int(y1s[index].cpu().numpy()) x2 = int(x2s[index].cpu().numpy()) y2 = int(y2s[index].cpu().numpy()) orh = y2 - y1 orw = x2 - x1 predict_pos = eval(s) ppos = predict_pos * [orh / 224, orw / 224] ppos += [x1, y1] ppos = [[int(i) for i in j] for j in ppos] printdot(origimg, ppos, 'origintest.jpg') true_pos = landmarkpos[index, :, :].int() printdot(origimg, true_pos, 'truetest.jpg') trans_pos = transfrompos[index, :, :].cpu().numpy() transpos = trans_pos * [orw / 224, orh / 224] transpos += [x1, y1] transpos = [[int(i) for i in j] for j in transpos] printdot(origimg, transpos, 'transtruetest.jpg') print('Val Step [{}/{}]'.format(i + 1, test_step)) results = evaluator.evaluate()
def testGEM(args, cuda_gpu): mymodel = builGraph.getModel(args.backbone, args.classnum, args.gpu, 'retrieval', cuda_gpu=cuda_gpu, pretrained=False) if os.path.exists(args.train_dir): checkpoint = torch.load(args.train_dir) mymodel.load_state_dict(checkpoint['model_state_dict']) mytraindata = myDataset(path=args.json_file, height=args.height, width=args.width, autoaugment=args.autoaugment) mytrainloader = torch.utils.data.DataLoader(mytraindata, batch_size=1, shuffle=False) gnd = loadquery(args.valdata_dir) mymodel.eval() with torch.no_grad(): print('>> Extracting descriptors for query images...') qloader = torch.utils.data.DataLoader(ImagesFromList( root='', images=[i['queryimgid'] for i in gnd], imsize=mytraindata.height, transform=mytraindata.transform), batch_size=1, shuffle=False, num_workers=0, pin_memory=True) #qoolvecs = torch.zeros(args.classnum, len(gnd)).cuda() qoolvecs = torch.zeros(OUTPUT_DIM[args.backbone], len(gnd)).cuda() lenq = len(qloader) for i, input in enumerate(qloader): out = mymodel(input.cuda(), need_feature=False) qoolvecs[:, i] = out.data.squeeze() if (i + 1) % 10 == 0: print('\r>>>> {}/{} done...'.format(i + 1, lenq), end='') print('') poolvecs = torch.zeros(OUTPUT_DIM[args.backbone], len(mytrainloader)).cuda() idlist = [] print('>> Extracting descriptors for database images...') for index, data in enumerate(mytrainloader): batch_x, batch_y, batch_id = data idlist.append(batch_id[0]) if cuda_gpu: batch_x = batch_x.cuda() batch_x = batch_x.float() # batch_x, batch_y = Variable(batch_x), Variable(batch_y) out = mymodel(batch_x, need_feature=False) poolvecs[:, index] = out if (index + 1) % 10 == 0: print('\r>>>> {}/{} done...'.format(index + 1, len(mytrainloader)), end='') vecs = poolvecs.cpu().numpy() qvecs = qoolvecs.cpu().numpy() # search, rank, and print scores = np.dot(vecs.T, qvecs) ranks = np.argsort(-scores, axis=0) dataset = args.json_file.split('/')[-1].replace("all.json", "") compute_map_and_print(dataset, ranks, gnd, idlist)
def testOnlinepair(params, transform): mytraindata = myDataset(path=params['data_dir'], height=params['height'], width=params['width'], autoaugment=params['autoaugment'], transform=transform) mytrainloader = torch.utils.data.DataLoader(mytraindata, batch_size=1, shuffle=False) gnd = loadquery(params['valdata_dir']) cuda_gpu = torch.cuda.is_available() '''mymodel = builGraph.getModel(params['modelName'], params['class_num'], params['Gpu'], params['model_type'],cuda_gpu=cuda_gpu)''' mymodel = builGraph.getModel(params['modelName'], params['class_num'], params['Gpu'], 'triplet', cuda_gpu=cuda_gpu) if os.path.exists(params['train_dir']): checkpoint = torch.load(params['train_dir']) mymodel.load_state_dict(checkpoint['model_state_dict']) mymodel.eval() with torch.no_grad(): print('>> Extracting descriptors for query images...') qloader = torch.utils.data.DataLoader(ImagesFromList( root='', images=[i['queryimgid'] for i in gnd], imsize=mytraindata.height, transform=mytraindata.transform), batch_size=1, shuffle=False, num_workers=0, pin_memory=True) qoolvecs = torch.zeros(params['class_num'], len(gnd)).cuda() lenq = len(qloader) for i, input in enumerate(qloader): out, _, _ = mymodel(input.cuda(), input.cuda(), input.cuda()) qoolvecs[:, i] = out[0].data.squeeze() if (i + 1) % 10 == 0: print('\r>>>> {}/{} done...'.format(i + 1, lenq), end='') print('') poolvecs = torch.zeros(params['class_num'], len(mytrainloader)).cuda() idlist = [] for index, data in enumerate(mytrainloader): batch_x, batch_y, batch_id = data idlist.append(batch_id[0]) if cuda_gpu: batch_x = batch_x.cuda() batch_y = batch_y.cuda() batch_x = batch_x.float() # batch_x, batch_y = Variable(batch_x), Variable(batch_y) out, _, _ = mymodel(batch_x, batch_x, batch_x) poolvecs[:, index] = out[0] vecs = poolvecs.cpu().numpy() qvecs = qoolvecs.cpu().numpy() # search, rank, and print scores = np.dot(vecs.T, qvecs) ranks = np.argsort(-scores, axis=0) dataset = params['data_dir'].split('/')[-1].replace("train.json", "") compute_map_and_print(dataset, ranks, gnd, idlist) '''relu_ip1_list = []
def testSiamese(params, transform): mytraindata = SiameseData(path=params['data_dir'], height=params['height'], width=params['width'], autoaugment=params['autoaugment'], transform=transform) mytrainloader = torch.utils.data.DataLoader( mytraindata, batch_size=params['BATCH_SIZE'], shuffle=True) cuda_gpu = torch.cuda.is_available() mymodel = builGraph.getModel(params['modelName'], params['featuredim'], params['Gpu'], params['model_type'], cuda_gpu=cuda_gpu) if params['train_method'] == 'gd': optimizer = torch.optim.SGD(mymodel.parameters(), lr=params['LR']) else: optimizer = torch.optim.Adam(mymodel.parameters()) startepoch = 0 if os.path.exists(params['train_dir']): checkpoint = torch.load(params['train_dir']) mymodel.load_state_dict(checkpoint['model_state_dict']) optimizer.load_state_dict(checkpoint['optimizer_state_dict']) optimizer.load_state_dict(checkpoint['optimizer_state_dict']) startepoch = checkpoint['epoch'] for epoch in range(startepoch, params['maxepoch']): print('epoch {}'.format(epoch + 1)) for image1, image2, label in mytrainloader: train_loss = 0. train_acc = 0. if cuda_gpu: image1 = image1.cuda() image2 = image2.cuda() label = label.cuda() image1 = image1.float() image2 = image2.float() image1, image2, label = Variable(image1), Variable( image2), Variable(label) out1, out2 = mymodel(image1, image2) out = [out1, out2] loss = buildLoss.getSiameseloss(out1, out2, label) train_loss += loss.item() optimizer.zero_grad() loss.backward() optimizer.step() #prediction = torch.argmax(out, 1) #train_acc += (prediction == batch_y).sum().float() print('Train Loss: {:.6f}'.format(train_loss / (len(image1)))) torch.save( { 'epoch': epoch, 'model_state_dict': mymodel.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'loss': loss }, params['train_dir'])
def trainOnlinepair(params, transform): minloss = float("inf") mytraindata = myDataset(path=params['data_dir'], height=params['height'], width=params['width'], autoaugment=params['autoaugment'], transform=transform) mytrainloader = torch.utils.data.DataLoader( mytraindata, batch_size=params['BATCH_SIZE'], shuffle=True) cuda_gpu = torch.cuda.is_available() mymodel = builGraph.getModel(params['modelName'], params['class_num'], params['Gpu'], params['model_type'], cuda_gpu=cuda_gpu) if params['train_method'] == 'gd': optimizer = torch.optim.SGD(mymodel.parameters(), lr=params['LR']) else: optimizer = torch.optim.Adam(mymodel.parameters(), lr=0.001) startepoch = 0 if os.path.exists(params['train_dir']): checkpoint = torch.load(params['train_dir']) mymodel.load_state_dict(checkpoint['model_state_dict']) optimizer.load_state_dict(checkpoint['optimizer_state_dict']) startepoch = checkpoint['epoch'] minloss = checkpoint['loss'] lendata = len(mytrainloader) for epoch in range(startepoch, params['maxepoch']): print('epoch {}'.format(epoch + 1)) train_loss = 0. for index, data in enumerate(mytrainloader): batch_x, batch_y, _ = data batch_y = to_Onehot(batch_y, params['class_num']) if cuda_gpu: batch_x = batch_x.cuda() batch_y = batch_y.cuda() batch_x = batch_x.float() batch_x, batch_y = Variable(batch_x), Variable(batch_y) out, _ = mymodel(batch_x) loss = buildLoss.getDshloss(out, batch_y, params['class_num']) train_loss += loss.item() optimizer.zero_grad() loss.backward() optimizer.step() if (index + 1) % 10 == 0: print('Train Loss: {:.6f}'.format(loss)) pklword = params['train_dir'].split('/')[-1] newpkl = 'parameter_%02d.pkl' % (epoch + 1) path = params['train_dir'].replace(pklword, newpkl) train_loss = train_loss / lendata is_best = train_loss < minloss save_checkpoint( { 'epoch': epoch, 'model_state_dict': mymodel.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'loss': train_loss }, is_best, path) print('epoch Train Loss: {:.6f}'.format(train_loss))
def testTriplet(params, transform): mytraindata = OnlineTripletData(path=params['data_dir'], autoaugment=params['autoaugment'], outputdim=params['class_num'], imsize=params['height'], transform=transform) cuda_gpu = torch.cuda.is_available() miningmodel = builGraph.getModel(params['modelName'], params['class_num'], params['Gpu'], 'triplet', cuda_gpu=cuda_gpu) gnd = loadquery(params['valdata_dir']) if os.path.exists(params['train_dir']): checkpoint = torch.load(params['train_dir']) miningmodel.load_state_dict(checkpoint['model_state_dict']) miningmodel.eval() with torch.no_grad(): print('>> Extracting descriptors for query images...') qloader = torch.utils.data.DataLoader(ImagesFromList( root='', images=[i['queryimgid'] for i in gnd], imsize=mytraindata.imsize, transform=mytraindata.transform), batch_size=1, shuffle=False, num_workers=0, pin_memory=True) qoolvecs = torch.zeros(params['class_num'], len(gnd)).cuda() for i, input in enumerate(qloader): out, _ = miningmodel(input.cuda()) qoolvecs[:, i] = out.data.squeeze() if (i + 1) % mytraindata.print_freq == 0 or ( i + 1) == mytraindata.qsize: print('\r>>>> {}/{} done...'.format(i + 1, mytraindata.qsize), end='') print('') print('>> Extracting descriptors for data images...') dloader = torch.utils.data.DataLoader(ImagesFromList( root='', images=[i['filenames'] for i in mytraindata.data], imsize=mytraindata.imsize, transform=mytraindata.transform), batch_size=1, shuffle=False, num_workers=0, pin_memory=True) poolvecs = torch.zeros(params['class_num'], len(mytraindata.data)).cuda() idlist = [i['filenames'] for i in mytraindata.data] for i, input in enumerate(dloader): out, _ = miningmodel(input.cuda()) poolvecs[:, i] = out.data.squeeze() if (i + 1) % mytraindata.print_freq == 0 or ( i + 1) == mytraindata.qsize: print('\r>>>> {}/{} done...'.format(i + 1, mytraindata.qsize), end='') print('') vecs = poolvecs.cpu().numpy() qvecs = qoolvecs.cpu().numpy() # search, rank, and print scores = np.dot(vecs.T, qvecs) ranks = np.argsort(-scores, axis=0) dataset = params['data_dir'].split('/')[-1].replace("train.json", "") compute_map_and_print(dataset, ranks, gnd, idlist)
def trainClassification(params, transform): mytraindata = myDataset(path=params['data_dir'], height=params['height'], width=params['width'], autoaugment=params['autoaugment'], transform=transform) mytrainloader = torch.utils.data.DataLoader( mytraindata, batch_size=params['BATCH_SIZE'], shuffle=True) cuda_gpu = torch.cuda.is_available() mymodel = builGraph.getModel(params['modelName'], params['class_num'], params['Gpu'], params['model_type'], cuda_gpu=cuda_gpu) if params['train_method'] == 'gd': optimizer = torch.optim.SGD(mymodel.parameters(), lr=params['LR']) else: optimizer = torch.optim.Adam(mymodel.parameters()) startepoch = 0 if os.path.exists(params['train_dir']): checkpoint = torch.load(params['train_dir']) mymodel.load_state_dict(checkpoint['model_state_dict']) optimizer.load_state_dict(checkpoint['optimizer_state_dict']) optimizer.load_state_dict(checkpoint['optimizer_state_dict']) startepoch = checkpoint['epoch'] for epoch in range(startepoch, params['maxepoch']): print('epoch {}'.format(epoch + 1)) for batch_x, batch_y, _ in mytrainloader: train_loss = 0. train_acc = 0. if cuda_gpu: batch_x = batch_x.cuda() batch_y = batch_y.cuda() batch_x = batch_x.float() batch_x, batch_y = Variable(batch_x), Variable(batch_y) out, _ = mymodel(batch_x) loss = buildLoss.getloss(out, batch_y) train_loss += loss.item() optimizer.zero_grad() loss.backward() optimizer.step() torch.cuda.empty_cache() prediction = torch.argmax(out, 1) train_acc += (prediction == batch_y).sum().float() print('Train Loss: {:.6f}, Acc: {:.6f}'.format( train_loss / (len(batch_x)), train_acc / (len(batch_x)))) torch.save( { 'epoch': epoch, 'model_state_dict': mymodel.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'loss': loss }, params['train_dir'])
def trainTriplet(params, transform): mytraindata = OnlineTripletData(path=params['data_dir'], autoaugment=params['autoaugment'], outputdim=params['class_num'], imsize=params['height'], transform=transform) #mytraindata = myDataset(path=params['data_dir'], autoaugment=params['autoaugment'],transform=transform) mytrainloader = torch.utils.data.DataLoader( mytraindata, batch_size=params['BATCH_SIZE'], shuffle=True) cuda_gpu = torch.cuda.is_available() minloss = float("inf") #mymodel = builGraph.getModel(params['modelName'], params['class_num'], params['Gpu'], # params['model_type'], cuda_gpu=cuda_gpu) miningmodel = builGraph.getModel(params['modelName'], params['class_num'], params['Gpu'], 'onlinepair', cuda_gpu=cuda_gpu) if params['train_method'] == 'gd': optimizer = torch.optim.SGD(miningmodel.parameters(), lr=params['LR']) else: optimizer = torch.optim.Adam(miningmodel.parameters()) startepoch = 0 if os.path.exists(params['train_dir']): checkpoint = torch.load(params['train_dir']) miningmodel.load_state_dict(checkpoint['model_state_dict']) optimizer.load_state_dict(checkpoint['optimizer_state_dict']) startepoch = checkpoint['epoch'] minloss = checkpoint['loss'] for epoch in range(startepoch, params['maxepoch']): print('epoch {}'.format(epoch + 1)) record = 0 mytrainloader.dataset.create_epoch_tuples(miningmodel) print(minloss) miningmodel.train() miningmodel.apply(set_batchnorm_eval) tloss = 0. for i, (input, plabel, nlabel) in enumerate(mytrainloader): if input is None or plabel is None or nlabel is None: continue train_loss = 0. iter_start_time = time.time() nq = len(input[0]) ni = len(input) optimizer.zero_grad() for q in range(nq): output = torch.zeros(params['class_num'], ni).cuda() f = torch.zeros(OUTPUT_DIM[params['modelName']], ni).cuda() for imi in range(ni): # compute output vector for image imi in_ = Variable(input[imi][q].unsqueeze(0).cuda().float()) out, features = miningmodel(in_) f[:, imi] = features['ip1'].squeeze() output[:, imi] = out.squeeze() p = plabel[q] n = nlabel[q] loss = buildLoss.getTripletloss(output, f, p, n) train_loss += loss.item() loss.backward() if (q + 1) % 10 == 0: optimizer.step() optimizer.zero_grad() torch.cuda.empty_cache() t = time.time() - iter_start_time if (i + 1) % 10 == 0: print('Epoch [{}/{}], Step [{}], Loss: {:.4f}, Time:{:.3f}'. format(epoch + 1, params['maxepoch'], i + 1, train_loss, t)) tloss += train_loss record += 1 tloss /= record pklword = params['train_dir'].split('/')[-1] newpkl = 'parameter_%02d.pkl' % (epoch + 1) path = params['train_dir'].replace(pklword, newpkl) is_best = tloss < minloss if is_best: minloss = tloss save_checkpoint( { 'epoch': epoch, 'model_state_dict': miningmodel.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'loss': tloss }, is_best, path) print('epoch Train Loss: {:.6f}'.format(tloss)) '''record = index
def testmodel(args, cuda_gpu, type='extractor', similartype='dot'): mymodel = builGraph.getModel(args.backbone, args.classnum, args.gpu, type, cuda_gpu=cuda_gpu, pretrained=False) if os.path.exists(args.train_dir): print(args.train_dir) checkpoint = torch.load(args.train_dir, map_location='cpu') mymodel.load_state_dict(checkpoint['model_state_dict']) print(similartype) mydatabasedata = FolderDataset(args.data_dir, mode='test') mydatabaseloader = torch.utils.data.DataLoader(mydatabasedata, batch_size=args.batch_size, num_workers=20, shuffle=False) myquerydata = FolderDataset(args.valdata_dir, mode='test') myqueryloader = torch.utils.data.DataLoader(myquerydata, batch_size=args.batch_size, num_workers=20, shuffle=False) queryimgs = [] mymodel.eval() with torch.no_grad(): print('>> Extracting descriptors for query images...') lenq = len(myqueryloader) qoolvecs = torch.zeros(OUTPUT_DIM[args.backbone], lenq).cuda() for i, (input, imgpath) in enumerate(myqueryloader): out = mymodel(input.cuda()) if isinstance(out, list): out = torch.cat(out, dim=0) qoolvecs[:, i] = out.data.squeeze() imgpath = str(imgpath[0]).split('/')[-1] queryimgs.append(imgpath) if (i + 1) % 10 == 0: print('\r>>>> {}/{} done...'.format(i + 1, lenq), end='') print('') poolvecs = torch.zeros(OUTPUT_DIM[args.backbone], len(mydatabaseloader)).cuda() idlist = [] print('>> Extracting descriptors for database images...') for index, data in enumerate(mydatabaseloader): batch_x, batch_id = data idd = batch_id[0].split('/')[-1] idlist.append(idd) if cuda_gpu: batch_x = batch_x.cuda() batch_x = batch_x.float() out = mymodel(batch_x) if isinstance(out, list): out = torch.cat(out, dim=0) poolvecs[:, index] = out if (index + 1) % 10 == 0: print('\r>>>> {}/{} done...'.format(index + 1, len(mydatabaseloader)), end='') vecs = poolvecs.cpu().numpy() qvecs = qoolvecs.cpu().numpy() if similartype == 'dot': scores = np.dot(vecs.T, qvecs) ranks = np.argsort(-scores, axis=0) elif similartype == 'euclidean': dis = np.zeros([vecs.shape[1], qvecs.shape[1]]) for j in range(qvecs.shape[1]): d = (vecs - np.reshape(qvecs[:, j], (qvecs.shape[0], 1)))**2 disj = np.sum(d, axis=0) dis[:, j] = disj ranks = np.argsort(dis, axis=0) #compute_map_and_print(dataset, ranks, gnd, idlist) output = 'thissubmission.csv' fout = open(output, 'w') formats = '{0[0]},{{%s}}' % (','.join( ['{0[%s]}' % str(i + 1) for i in range(10)])) for j in range(ranks.shape[1]): record = ranks[:, j] qrcnt = queryimgs[j].strip().split('/')[-1] rf_res = [idlist[i] for i in record] olist = [qrcnt] + [it.strip().split('/')[-1] for it in rf_res[:10]] out = formats.format(olist) + '\n' print(out) fout.write(out) # out.encode('utf-8') fout.close()
def testmodel(args, cuda_gpu, type='extractor', similartype='dot'): mymodel = builGraph.getModel(args.backbone, args.classnum, args.gpu, type, cuda_gpu=cuda_gpu, pretrained=False) if os.path.exists(args.train_dir): print(args.train_dir) checkpoint = torch.load(args.train_dir, map_location='cpu') mymodel.load_state_dict(checkpoint['model_state_dict']) print(similartype) mytraindata = myDataset(path=args.json_file, height=args.height, width=args.width, autoaugment=args.autoaugment) mytrainloader = torch.utils.data.DataLoader(mytraindata, batch_size=args.batch_size, num_workers=10, shuffle=False) gnd = parseeye(args.json_file, args.valdata_dir) #gnd=random.sample(gnd,50) mymodel.eval() with torch.no_grad(): print('>> Extracting descriptors for query images...') qloader = torch.utils.data.DataLoader(ImagesFromList( root='', images=[i['queryimgid'] for i in gnd], imsize=mytraindata.height, transform=mytraindata.transform), batch_size=args.batch_size, shuffle=False, num_workers=0, pin_memory=True) if type == 'base': qoolvecs = torch.zeros(args.classnum, len(gnd)).cuda() elif type == 'extractor': qoolvecs = torch.zeros(OUTPUT_DIM[args.backbone], len(gnd)).cuda() lenq = len(qloader) train_acc = 0. for i, input in enumerate(qloader): out = mymodel(input.cuda()) if isinstance(out, list): out = torch.cat(out, dim=0) qoolvecs[:, i] = out.data.squeeze() prediction = torch.argmax(out, 1) if (i + 1) % 10 == 0: print('\r>>>> {}/{} done...'.format(i + 1, lenq), end='') print('') if type == 'extractor': poolvecs = torch.zeros(OUTPUT_DIM[args.backbone], len(mytrainloader)).cuda() elif type == 'base': poolvecs = torch.zeros(args.classnum, len(mytrainloader)).cuda() idlist = [] train_acc = 0 print('>> Extracting descriptors for database images...') for index, data in enumerate(mytrainloader): batch_x, batch_y, batch_id = data idlist.append(batch_id[0]) if cuda_gpu: batch_x = batch_x.cuda() batch_y = batch_y.cuda() batch_x = batch_x.float() # batch_x, batch_y = Variable(batch_x), Variable(batch_y) out = mymodel(batch_x) if isinstance(out, list): out = torch.cat(out, dim=0) prediction = torch.argmax(out, 1) train_acc += (prediction == batch_y).sum().float() acc = train_acc / len(batch_x) poolvecs[:, index] = out if (index + 1) % 10 == 0: print('\r>>>> {}/{} done...'.format(index + 1, len(mytrainloader)), end='') vecs = poolvecs.cpu().numpy() qvecs = qoolvecs.cpu().numpy() # search, rank, and print #scores = np.dot(vecs.T, qvecs) #ranks = np.argsort(-scores, axis=0) if similartype == 'dot': scores = np.dot(vecs.T, qvecs) ranks = np.argsort(-scores, axis=0) elif similartype == 'euclidean': dis = np.zeros([vecs.shape[1], qvecs.shape[1]]) for j in range(qvecs.shape[1]): d = (vecs - np.reshape(qvecs[:, j], (qvecs.shape[0], 1)))**2 disj = np.sum(d, axis=0) dis[:, j] = disj ranks = np.argsort(dis, axis=0) #compute_map_and_print(dataset, ranks, gnd, idlist) '''scale = [5,10,20,30,40,50,60] reranks = ranks for s in scale: rerankvec = np.zeros(qvecs.shape) for i in range(qvecs.shape[1]): features = np.asarray([vecs[:, j] for j in reranks[:s, i]]) rerankvec[:, i] = np.average(features, axis=0) scores = np.dot(vecs.T, rerankvec) + scores reranks = np.argsort(-scores, axis=0) ranks=reranks''' print('RQE.....................') map = 0. mrr = 0. nq = len(gnd) # number of queries aps = np.zeros(nq) nempty = 0 for i in np.arange(nq): qgnd = np.array(gnd[i]['ok']) # no positive images, skip from the average if qgnd.shape[0] == 0: aps[i] = float('nan') nempty += 1 continue try: qgndj = np.array(gnd[i]['junk']) except: qgndj = np.empty(0) r = [idlist[j] for j in ranks[:, i]] # sorted positions of positive and junk images (0 based) pos = np.arange(ranks.shape[0])[np.in1d(r, qgnd)] junk = np.arange(ranks.shape[0])[np.in1d(r, qgndj)] k = 0 ij = 0 if len(junk): # decrease positions of positives based on the number of # junk images appearing before them ip = 0 while (ip < len(pos)): while (ij < len(junk) and pos[ip] > junk[ij]): k += 1 ij += 1 pos[ip] = pos[ip] - k ip += 1 # compute ap ap = compute_ap(pos, len(qgnd)) mr = 1 / (pos[0] + 1) map = map + ap mrr = mrr + mr aps[i] = ap # compute precision @ k pos += 1 # get it to 1-based map = map / (nq - nempty) mrr = mrr / (nq - nempty) print(type) print('>> {}: mAP {:.2f}'.format('eye', np.around(map * 100, decimals=2))) print('>> {}: MRR {:.2f}'.format('eye', np.around(mrr * 100, decimals=2))) return map, mrr
def testOnlinepair(args,cuda_gpu,type='extractor',similartype='dot'): mymodel = builGraph.getModel(args.backbone, args.classnum, args.gpu, type, cuda_gpu=cuda_gpu, pretrained=False) if os.path.exists(args.train_dir): print(args.train_dir) checkpoint = torch.load(args.train_dir,map_location='cpu') mymodel.load_state_dict(checkpoint['model_state_dict']) for index,jfile in enumerate(args.json_file): dataset = jfile.split('/')[-1].replace("all.json", "") mytraindata = myDataset(path=jfile, height=args.height, width=args.width, autoaugment=args.autoaugment) mytrainloader = torch.utils.data.DataLoader(mytraindata, batch_size=1, shuffle=False) gnd = loadquery(args.valdata_dir[index]) mymodel.eval() with torch.no_grad(): poolvecs = torch.zeros(OUTPUT_DIM[args.backbone], len(mytrainloader)).cuda() idlist = [] print('>> Extracting descriptors for {} images...'.format(dataset)) for index, data in enumerate(mytrainloader): batch_x, batch_y, batch_id = data idlist.append(batch_id[0]) if cuda_gpu: batch_x = batch_x.cuda() batch_x = batch_x.float() # batch_x, batch_y = Variable(batch_x), Variable(batch_y) out = mymodel(batch_x) poolvecs[:, index] = out if (index + 1) % 10 == 0: print('\r>>>> {}/{} done...'.format(index + 1, len(mytrainloader)), end='') qindexs = np.arange(len(mytrainloader))[np.in1d(idlist, [i['queryimgid'] for i in gnd])] newgnd = [idlist[i] for i in qindexs] g = [[i['queryimgid'] for i in gnd].index(j) for j in newgnd] gnd = [gnd[i] for i in g] vecs = poolvecs.cpu().numpy() '''pca = PCA(whiten=True,n_components=1000,random_state=732) vecst=pca.fit_transform(np.transpose(vecs)) vecst=l2n(totensor(vecst)) vecs=np.transpose(tonumpy(vecst))''' qvecs = vecs[:, qindexs] # search, rank, and print if similartype=='dot': scores = np.dot(vecs.T, qvecs) ranks = np.argsort(-scores, axis=0) elif similartype=='euclidean': dis=np.zeros([vecs.shape[1],qvecs.shape[1]]) for j in range(qvecs.shape[1]): d = (vecs - np.reshape(qvecs[:, j], (qvecs.shape[0], 1))) ** 2 disj = np.sum(d, axis=0) dis[:, j] = disj ranks=np.argsort(dis,axis=0) compute_map_and_print(dataset, ranks, gnd, idlist)
def testmodel(args, cuda_gpu, type='multitrain', similartype='dot'): res = {} mymodel = builGraph.getModel(args.backbone, args.classnum, args.gpu, type, cuda_gpu=cuda_gpu, pretrained=False) if os.path.exists(args.train_dir): print(args.train_dir) checkpoint = torch.load(args.train_dir, map_location='cpu') mymodel.load_state_dict(checkpoint['model_state_dict']) print(similartype) mytraindata = myDataset(path=args.json_file, height=args.height, width=args.width, autoaugment=args.autoaugment) mytrainloader = torch.utils.data.DataLoader(mytraindata, batch_size=args.batch_size, num_workers=50, shuffle=False) gnd = parseeye(args.json_file, args.valdata_dir) mymodel.eval() with torch.no_grad(): print('>> Extracting descriptors for query images...') qloader = torch.utils.data.DataLoader(ImagesFromList( root='', images=[i['queryimgid'] for i in gnd], imsize=mytraindata.height, transform=mytraindata.transform), batch_size=args.batch_size, shuffle=False, num_workers=50, pin_memory=True) cqoolvecs = torch.zeros(args.classnum, len(gnd)).cuda() eqoolvecs = torch.zeros(OUTPUT_DIM[args.backbone], len(gnd)).cuda() iqoolvecs = torch.zeros(OUTPUT_DIM[args.backbone], len(gnd)).cuda() lenq = len(qloader) for i, input in enumerate(qloader): out = mymodel(input.cuda()) if isinstance(out, list): out = torch.cat(out, dim=0) cqoolvecs[:, i] = out['out'] eqoolvecs[:, i] = out['feature'] iqoolvecs[:, i] = out['intersect'] if (i + 1) % 10 == 0: print('\r>>>> {}/{} done...'.format(i + 1, lenq), end='') print('') epoolvecs = torch.zeros(OUTPUT_DIM[args.backbone], len(mytrainloader)).cuda() ipoolvecs = torch.zeros(OUTPUT_DIM[args.backbone], len(mytrainloader)).cuda() cpoolvecs = torch.zeros(args.classnum, len(mytrainloader)).cuda() idlist = [] print('>> Extracting descriptors for database images...') for index, data in enumerate(mytrainloader): batch_x, batch_y, batch_id = data idlist.append(batch_id[0]) if cuda_gpu: batch_x = batch_x.cuda() batch_x = batch_x.float() # batch_x, batch_y = Variable(batch_x), Variable(batch_y) out = mymodel(batch_x) if isinstance(out, list): out = torch.cat(out, dim=0) cpoolvecs[:, index] = out['out'] epoolvecs[:, index] = out['feature'] ipoolvecs[:, index] = out['intersect'] if (index + 1) % 10 == 0: print('\r>>>> {}/{} done...'.format(index + 1, len(mytrainloader)), end='') cvecs = cpoolvecs.cpu().numpy() evecs = epoolvecs.cpu().numpy() ivecs = ipoolvecs.cpu().numpy() cqvecs = cqoolvecs.cpu().numpy() eqvecs = eqoolvecs.cpu().numpy() iqvecs = iqoolvecs.cpu().numpy() # search, rank, and print #scores = np.dot(vecs.T, qvecs) #ranks = np.argsort(-scores, axis=0) cscores = np.dot(cvecs.T, cqvecs) cranks = np.argsort(-cscores, axis=0) escores = np.dot(evecs.T, eqvecs) eranks = np.argsort(-escores, axis=0) iscores = np.dot(ivecs.T, iqvecs) iranks = np.argsort(-iscores, axis=0) '''cscores = torch.mm(cpoolvecs.t(), cqoolvecs) cranks = torch.argsort(-cscores, axis=0).cpu().numpy() escores = torch.mm(epoolvecs.t(), eqoolvecs) eranks = torch.argsort(-escores, axis=0).cpu().numpy() iscores = torch.mm(ipoolvecs.t(), iqoolvecs) iranks = torch.argsort(-iscores, axis=0).cpu().numpy()''' rrank = [cranks, eranks, iranks] for index, ranks in enumerate(rrank): if index == 0: print('base................') elif index == 1: print('extractor.....................') else: print('intersect......................') map = 0. mrr = 0. nq = len(gnd) # number of queries aps = np.zeros(nq) nempty = 0 for i in np.arange(nq): qgnd = np.array(gnd[i]['ok']) # no positive images, skip from the average if qgnd.shape[0] == 0: aps[i] = float('nan') nempty += 1 continue try: qgndj = np.array(gnd[i]['junk']) except: qgndj = np.empty(0) r = [idlist[j] for j in ranks[:, i]] # sorted positions of positive and junk images (0 based) pos = np.arange(ranks.shape[0])[np.in1d(r, qgnd)] junk = np.arange(ranks.shape[0])[np.in1d(r, qgndj)] k = 0 ij = 0 if len(junk): # decrease positions of positives based on the number of # junk images appearing before them ip = 0 while (ip < len(pos)): while (ij < len(junk) and pos[ip] > junk[ij]): k += 1 ij += 1 pos[ip] = pos[ip] - k ip += 1 # compute ap ap = compute_ap(pos, len(qgnd)) mr = 1 / (pos[0] + 1) map = map + ap mrr = mrr + mr aps[i] = ap # compute precision @ k pos += 1 # get it to 1-based map = map / (nq - nempty) mrr = mrr / (nq - nempty) print(type) print('>> {}: mAP {:.2f}'.format('eye', np.around(map * 100, decimals=2))) print('>> {}: MRR {:.2f}'.format('eye', np.around(mrr * 100, decimals=2))) res[mmap[index]] = {'MAP': map, 'MRR': mrr} return res