def do_inference(Cfg, model, val_loader, num_query): device = "cuda" logger = logging.getLogger('{}.test'.format(Cfg.PROJECT_NAME)) logger.info("Enter inferencing") evaluator = R1_mAP(num_query, max_rank=50, feat_norm=Cfg.FEAT_NORM, method=Cfg.TEST_METHOD) evaluator.reset() if device: if torch.cuda.device_count() > 1: print('Using {} GPUs for inference'.format( torch.cuda.device_count())) model = nn.DataParallel(model) model.to(device) model.eval() for iter, (img, vid, camid) in enumerate(val_loader): with torch.no_grad(): img = img.to(device) feat = model(img) evaluator.update((feat, vid, camid)) cmc, mAP, distmat, vids, camids = evaluator.compute() np.save(Cfg.DIST_MAT, distmat) np.save(Cfg.VIDS, vids) np.save(Cfg.CAMIDS, camids) logger.info("Validation Results") logger.info("mAP: {:.1%}".format(mAP)) for r in [1, 5, 10]: logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1]))
def do_inference(cfg, model, val_loader, num_query): device = "cuda" logger = logging.getLogger("reid_baseline.test") logger.info("Enter inferencing") if cfg.TEST.EVAL: evaluator = R1_mAP_eval(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM) else: evaluator = R1_mAP(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM, reranking=cfg.TEST.RE_RANKING, reranking_track=cfg.TEST.RE_RANKING_TRACK) evaluator.reset() if device: if torch.cuda.device_count() > 1: print('Using {} GPUs for inference'.format( torch.cuda.device_count())) model = nn.DataParallel(model) model.to(device) model.eval() img_path_list = [] a = tqdm(total=(55168 + 29758) / cfg.TEST.IMS_PER_BATCH) for n_iter, (img, pid, camid, trackid, imgpath) in enumerate(val_loader): a.update(1) with torch.no_grad(): img = img.to(device) feat1 = model(img) img_vflip = tF.vflip(img) img_hflip = tF.hflip(img) img_90 = tF.vflip(torch.transpose(img, 2, 3)) # img_180 = tF.vflip(tF.hflip(img)) # img_270 = tF.hflip(torch.transpose(img, 2, 3)) feat2 = model(img_vflip) feat3 = model(img_hflip) feat4 = model(img_90) # feat4 = model(img_90) # feat5 = model(img_180) # feat6 = model(img_270) feat = (feat1 + feat2 + feat3 + feat4) / 4 if cfg.TEST.EVAL: evaluator.update((feat, pid, imgpath)) else: evaluator.update((feat, pid, camid, trackid, imgpath)) img_path_list.extend(imgpath) if cfg.TEST.EVAL: evaluator.compute(cfg.MODEL.NAME) # cmc, mAP, _, _, _, _, _ = evaluator.compute() # logger.info("Validation Results ") # logger.info("mAP: {:.1%}".format(mAP)) # for r in [1, 5, 10]: # logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1])) else: distmat, img_name_q, img_name_g, qfeats, gfeats = evaluator.compute( cfg.OUTPUT_DIR) np.save(os.path.join(cfg.OUTPUT_DIR, cfg.TEST.DIST_MAT), distmat) print('over')
def do_inference(cfg, model, val_loader, num_query): device = "cuda" logger = logging.getLogger("reid_baseline.test") logger.info("Enter inferencing") if cfg.TEST.EVAL: evaluator = R1_mAP_eval(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM) else: evaluator = R1_mAP(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM, reranking=cfg.TEST.RE_RANKING, reranking_track=cfg.TEST.RE_RANKING_TRACK) evaluator.reset() if device: if torch.cuda.device_count() > 1: print('Using {} GPUs for inference'.format( torch.cuda.device_count())) model = nn.DataParallel(model) model.to(device) model.eval() img_path_list = [] for n_iter, (img, pid, camid, trackid, imgpath) in enumerate(val_loader): with torch.no_grad(): img = img.to(device) if cfg.TEST.FLIP_FEATS == 'on': print('flip') feat = torch.FloatTensor(img.size(0), 2048).zero_().cuda() for i in range(2): if i == 1: inv_idx = torch.arange(img.size(3) - 1, -1, -1).long().cuda() img = img.index_select(3, inv_idx) f = model(img) feat = feat + f else: feat = model(img) if cfg.TEST.EVAL: evaluator.update((feat, pid, camid)) else: evaluator.update((feat, pid, camid, trackid, imgpath)) img_path_list.extend(imgpath) if cfg.TEST.EVAL: evaluator.compute(name=cfg.MODEL.NAME, K=cfg.DATALOADER.NUM_INSTANCE, height=cfg.INPUT.SIZE_TRAIN[0]) # logger.info("Validation Results ") # logger.info("mAP: {:.1%}".format(mAP)) # for r in [1, 5, 10]: # logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1])) else: distmat, img_name_q, img_name_g, qfeats, gfeats = evaluator.compute( cfg.OUTPUT_DIR) np.save(os.path.join(cfg.OUTPUT_DIR, cfg.TEST.DIST_MAT), distmat) print('over')
def do_inference_d4(cfg, model, val_loader, num_query): device = "cuda" logger = logging.getLogger("reid_baseline.test") logger.info("Enter inferencing") if cfg.TEST.EVAL: evaluator = R1_mAP_eval(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM) else: evaluator = R1_mAP(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM, reranking=cfg.TEST.RE_RANKING, reranking_track=cfg.TEST.RE_RANKING_TRACK) evaluator.reset() from ttach.aliases import d4_transform trans = d4_transform() if device: if torch.cuda.device_count() > 1: print('Using {} GPUs for inference'.format( torch.cuda.device_count())) model = nn.DataParallel(model) model.to(device) model.eval() img_path_list = [] a = tqdm(total=(55168 + 29758) / cfg.TEST.IMS_PER_BATCH) for n_iter, (img, pid, camid, trackid, imgpath) in enumerate(val_loader): a.update(1) with torch.no_grad(): img = img.to(device) imgs = [] for t in trans: imgs.append(t.augment_image(img)) feat = model(imgs[0]) for im in imgs[1:]: feat += model(im) feat = feat / 8 if cfg.TEST.EVAL: evaluator.update((feat, pid, imgpath)) else: evaluator.update((feat, pid, camid, trackid, imgpath)) img_path_list.extend(imgpath) if cfg.TEST.EVAL: evaluator.compute(cfg.MODEL.ID) # cmc, mAP, _, _, _, _, _ = evaluator.compute() # logger.info("Validation Results ") # logger.info("mAP: {:.1%}".format(mAP)) # for r in [1, 5, 10]: # logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1])) else: distmat, img_name_q, img_name_g, qfeats, gfeats = evaluator.compute( cfg.OUTPUT_DIR) np.save(os.path.join(cfg.OUTPUT_DIR, cfg.TEST.DIST_MAT), distmat) print('over')
def eval_fun(cfg, model, val_loader, num_query_green, num_query_normal, device): for index, loader in enumerate(val_loader): if index == 0: reranking_parameter = cfg.TEST.RE_RANKING_PARAMETER_GREEN evaluator = R1_mAP(num_query_green, max_rank=200, feat_norm=cfg.TEST.FEAT_NORM, reranking=cfg.TEST.RE_RANKING) else: reranking_parameter = cfg.TEST.RE_RANKING_PARAMETER_NORMAL evaluator = R1_mAP(num_query_normal, max_rank=200, feat_norm=cfg.TEST.FEAT_NORM, reranking=cfg.TEST.RE_RANKING) evaluator.reset() for n_iter, (img, pid, camid, imgpath) in enumerate(loader): with torch.no_grad(): img = img.to(device) if cfg.TEST.FLIP_FEATS == 'on': feat = torch.FloatTensor( img.size(0), cfg.MODEL.FEAT_SIZE).zero_().cuda() for i in range(2): if i == 1: inv_idx = torch.arange(img.size(3) - 1, -1, -1).long().cuda() img = img.index_select(3, inv_idx) f = model(img) feat = feat + f else: feat = model(img) evaluator.update((feat, imgpath)) data, distmat, img_name_q, img_name_g = evaluator.compute( reranking_parameter) if index == 0: data_1 = data res_dict = {**data_1, **data} return res_dict
def do_inference(cfg, model, val_loader, num_query): device = "cuda" logger = logging.getLogger('{}.test'.format(cfg.PROJECT_NAME)) logger.info("Enter inferencing") evaluator = R1_mAP(num_query, max_rank=50, feat_norm=cfg.FEAT_NORM, \ method=cfg.TEST_METHOD, reranking=cfg.RERANKING) evaluator.reset() if device: if torch.cuda.device_count() > 1: print('Using {} GPUs for inference'.format( torch.cuda.device_count())) model = nn.DataParallel(model) model.to(device) model.eval() img_path_list = [] for n_iter, (img, pid, camid, imgpath) in enumerate(val_loader): with torch.no_grad(): img = img.to(device) if cfg.FLIP_FEATS == 'on': feat = torch.FloatTensor(img.size(0), 2048).zero_().cuda() for i in range(2): if i == 1: inv_idx = torch.arange(img.size(3) - 1, -1, -1).long().cuda() img = img.index_select(3, inv_idx) f = model(img) feat = feat + f else: feat = model(img) evaluator.update((feat, pid, camid)) img_path_list.extend(imgpath) cmc, mAP, distmat, pids, camids, qfeats, gfeats = evaluator.compute() np.save(os.path.join(cfg.LOG_DIR, cfg.DIST_MAT), distmat) np.save(os.path.join(cfg.LOG_DIR, cfg.PIDS), pids) np.save(os.path.join(cfg.LOG_DIR, cfg.CAMIDS), camids) np.save(os.path.join(cfg.LOG_DIR, cfg.IMG_PATH), img_path_list[num_query:]) torch.save(qfeats, os.path.join(cfg.LOG_DIR, cfg.Q_FEATS)) torch.save(gfeats, os.path.join(cfg.LOG_DIR, cfg.G_FEATS)) logger.info("Validation Results") logger.info("mAP: {:.1%}".format(mAP)) for r in [1, 5, 10]: logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1]))
def do_inference(cfg, model, val_loader_green, val_loader_normal, num_query_green, num_query_normal): device = "cuda" logger = logging.getLogger("reid_baseline.test") logger.info("Enter inferencing") if device: if torch.cuda.device_count() > 1: print('Using {} GPUs for inference'.format( torch.cuda.device_count())) # torch.distributed.init_process_group(backend='nccl', init_method='tcp://localhost:23436', rank=0, world_size=1) # model = DistributedDataParallel(model) # model = model.cuda() # model = nn.parallel.DistributedDataParallel(model) model = nn.DataParallel(model) model = model.cuda() model.to(device) model.eval() val_loader = [val_loader_green, val_loader_normal] for index, loader in enumerate(val_loader): if index == 0: subfix = '1' reranking_parameter = cfg.TEST.RE_RANKING_PARAMETER_GREEN evaluator = R1_mAP(num_query_green, max_rank=200, feat_norm=cfg.TEST.FEAT_NORM, reranking=cfg.TEST.RE_RANKING) else: subfix = '2' reranking_parameter = cfg.TEST.RE_RANKING_PARAMETER_NORMAL evaluator = R1_mAP(num_query_normal, max_rank=200, feat_norm=cfg.TEST.FEAT_NORM, reranking=cfg.TEST.RE_RANKING) evaluator.reset() DISTMAT_PATH = os.path.join(cfg.OUTPUT_DIR, "distmat_{}.npy".format(subfix)) QUERY_PATH = os.path.join(cfg.OUTPUT_DIR, "query_path_{}.npy".format(subfix)) GALLERY_PATH = os.path.join(cfg.OUTPUT_DIR, "gallery_path_{}.npy".format(subfix)) # feat_imagepath_list = [] # FEATS_IMAGEPATH_LIST_PATH = os.path.join( # "/home/zjf/naic_code/data/feats_imagepath_list", "curricularface_m05s40_{}.npy".format(subfix)) for n_iter, (img, pid, camid, imgpath) in enumerate(loader): with torch.no_grad(): img = img.to(device) if cfg.TEST.FLIP_FEATS == 'on': feat = torch.FloatTensor( img.size(0), cfg.MODEL.FEAT_SIZE).zero_().cuda() for i in range(2): if i == 1: inv_idx = torch.arange(img.size(3) - 1, -1, -1).long().cuda() img = img.index_select(3, inv_idx) f = model(img) feat = feat + f else: feat = model(img) evaluator.update((feat, imgpath)) # feat_imagepath_list.append((feat, imgpath)) # np.save(FEATS_IMAGEPATH_LIST_PATH, feat_imagepath_list) data, distmat, img_name_q, img_name_g = evaluator.compute( reranking_parameter) np.save(DISTMAT_PATH, distmat) np.save(QUERY_PATH, img_name_q) np.save(GALLERY_PATH, img_name_g) if index == 0: data_1 = data data_all = {**data_1, **data} nowTime = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S') with open(os.path.join(cfg.OUTPUT_DIR, 'result_{}.json'.format(nowTime)), 'w', encoding='utf-8') as fp: json.dump(data_all, fp)
def do_inference_train(cfg, model, val_loader, num_query): device = "cuda" logger = logging.getLogger("reid_baseline.test") logger.info("Enter inferencing") if cfg.TEST.EVAL: evaluator = R1_mAP_eval(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM) else: evaluator = R1_mAP(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM, reranking=cfg.TEST.RE_RANKING, reranking_track=cfg.TEST.RE_RANKING_TRACK) evaluator.reset() if device: # if torch.cuda.device_count() > 1: # print('Using {} GPUs for inference'.format(torch.cuda.device_count())) # model = nn.DataParallel(model) model.to(device) model.eval() img_path_list = [] if cfg.TEST.FLIP_FEATS == 'on': print("use flip test................") else: print("Not use flip test................") # print(val_loader) train = [] train_feat = torch.FloatTensor(1, 2048).zero_().cuda() temp_pid = 0 cnt_pid = 0 for n_iter, (img, pid, camid, imgpath) in enumerate(tqdm(val_loader)): # print(img, pid, camid, imgpath) with torch.no_grad(): img = img.to(device) if cfg.TEST.FLIP_FEATS == 'on': # print('flip_aug') feat = torch.FloatTensor(img.size(0), 2048).zero_().cuda() for i in range(2): if i == 1: inv_idx = torch.arange(img.size(3) - 1, -1, -1).long().cuda() img = img.index_select(3, inv_idx) f = model(img) feat = feat + f else: feat = model(img) if cfg.TEST.EVAL: evaluator.update((feat, pid, camid)) else: evaluator.update((feat, pid, camid, trackid, imgpath)) for num, i in enumerate(pid): # print(num,len(pid)) if i != temp_pid: train.append(train_feat / cnt_pid) print("get!", cnt_pid, "pid", pid[num - 1]) cnt_pid = 1 train_feat = feat[num] else: train_feat += feat[num] cnt_pid += 1 temp_pid = i if n_iter == len(val_loader) - 1 and num == len(pid) - 1: train.append(train_feat / cnt_pid) img_path_list.extend(imgpath) # 保存训练集权重 torch.save(train, "train.pth") if cfg.TEST.EVAL: cmc, mAP, _, _, _, _, _ = evaluator.compute() logger.info("Validation Results ") logger.info("mAP: {:.1%}".format(mAP)) for r in [1, 5, 10]: logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1])) else: distmat, img_name_q, img_name_g, qfeats, gfeats = evaluator.compute( cfg.OUTPUT_DIR) np.save(os.path.join(cfg.OUTPUT_DIR, cfg.TEST.DIST_MAT), distmat) print('over')
def do_inference(cfg, model, val_loader, num_query): device = "cuda" logger = logging.getLogger("reid_baseline.test") logger.info("Enter inferencing") if cfg.TEST.EVAL: evaluator = R1_mAP_eval(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM) else: evaluator = R1_mAP(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM, reranking=cfg.TEST.RE_RANKING, reranking_track=cfg.TEST.RE_RANKING_TRACK) evaluator.reset() if device: if torch.cuda.device_count() > 1: print('Using {} GPUs for inference'.format( torch.cuda.device_count())) model = nn.DataParallel(model) model.to(device) model.eval() img_path_list = [] if cfg.TEST.FLIP_FEATS == 'on': print("use flip test................") else: print("Not use flip test................") # print(val_loader) for n_iter, (img1, img2, pid, camid, imgpath) in enumerate(tqdm(val_loader)): with torch.no_grad(): scr_img, v_img = img1, img2 scr_img = scr_img.to(device) v_img = v_img.to(device) if cfg.TEST.FLIP_FEATS == 'on': # print('flip_aug') feat = torch.FloatTensor(scr_img.size(0), 3072).zero_().cuda() # feat = torch.FloatTensor(img.size(0), 768).zero_().cuda() for i in range(2): if i == 1: inv_idx = torch.arange(scr_img.size(3) - 1, -1, -1).long().cuda() scr_img = scr_img.index_select(3, inv_idx) f = model(scr_img) feat = feat + f #verticle img f = model(v_img) feat = feat + f else: feat = model(img) # print(feat.shape) # print(feat) if cfg.TEST.EVAL: evaluator.update((feat, pid, camid)) else: evaluator.update((feat, pid, camid, trackid, imgpath)) img_path_list.extend(imgpath) # print(img, pid, camid, imgpath) # with torch.no_grad(): # # # # # img = img.to(device) # # # # if cfg.TEST.FLIP_FEATS == 'on': # # # print('flip_aug') # # feat = torch.FloatTensor(img.size(0), 3072).zero_().cuda() # # for i in range(2): # # if i == 1: # # inv_idx = torch.arange(img.size(3) - 1, -1, -1).long().cuda() # # img = img.index_select(3, inv_idx) # # f = model(img) # # feat = torch.cat((f1,f),1) # # # print(feat.shape) # # # feat = feat + f # # if i==0: # # f1 = model(img) # # print(feat.shape) # # if cfg.TEST.FLIP_FEATS == 'on': # # print('flip_aug') # feat = torch.FloatTensor(img.size(0), 3072).zero_().cuda() # # feat = torch.FloatTensor(img.size(0), 768).zero_().cuda() # for i in range(2): # if i == 1: # inv_idx = torch.arange(img.size(3) - 1, -1, -1).long().cuda() # img = img.index_select(3, inv_idx) # f = model(img) # feat = feat + f # else: # feat = model(img) # # print(feat.shape) # # # # # # # print(feat) # # # # # # # # if cfg.TEST.EVAL: # evaluator.update((feat, pid, camid)) # else: # evaluator.update((feat, pid, camid, trackid, imgpath)) # img_path_list.extend(imgpath) if cfg.TEST.EVAL: cmc, mAP, _, _, _, _, _ = evaluator.compute() logger.info("Validation Results ") logger.info("mAP: {:.1%}".format(mAP)) for r in [1, 5, 10]: logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1])) else: distmat, img_name_q, img_name_g, qfeats, gfeats = evaluator.compute( cfg.OUTPUT_DIR) np.save(os.path.join(cfg.OUTPUT_DIR, cfg.TEST.DIST_MAT), distmat) print('over')
def do_train(Cfg, model, center_criterion, train_loader, val_loader, optimizer, optimizer_center, scheduler, loss_fn, num_query): log_period = Cfg.LOG_PERIOD checkpoint_period = Cfg.CHECKPOINT_PERIOD eval_period = Cfg.EVAL_PERIOD output_dir = Cfg.LOG_DIR device = "cuda" epochs = Cfg.MAX_EPOCHS logger = logging.getLogger('{}.train'.format(Cfg.PROJECT_NAME)) logger.info('start training') if device: if torch.cuda.device_count() > 1: print('Using {} GPUs for training'.format( torch.cuda.device_count())) model = nn.DataParallel(model) model.to(device) loss_meter = AverageMeter() acc_meter = AverageMeter() evaluator = R1_mAP(num_query, max_rank=50, feat_norm=Cfg.FEAT_NORM) #train for epoch in range(1, epochs + 1): start_time = time.time() loss_meter.reset() acc_meter.reset() evaluator.reset() model.train() for iter, (img, vid) in enumerate(train_loader): optimizer.zero_grad() optimizer_center.zero_grad() img = img.to(device) target = vid.to(device) score, feat = model(img, target) loss = loss_fn(score, feat, target) loss.backward() optimizer.step() if 'center' in Cfg.LOSS_TYPE: for param in center_criterion.parameters(): param.grad.data *= (1. / Cfg.CENTER_LOSS_WEIGHT) optimizer_center.step() acc = (score.max(1)[1] == target).float().mean() loss_meter.update(loss.item(), img.shape[0]) acc_meter.update(acc, 1) if (iter + 1) % log_period == 0: logger.info( "Epoch[{}] Iteration[{}/{}] Loss: {:.3f}, Acc: {:.3f}, Base Lr: {:.2e}" .format(epoch, (iter + 1), len(train_loader), loss_meter.avg, acc_meter.avg, scheduler.get_lr()[0])) end_time = time.time() time_per_batch = (end_time - start_time) / (iter + 1) logger.info( "Epoch {} done. Time per batch: {:.3f}[s] Speed: {:.1f}[samples/s]" .format(epoch, time_per_batch, train_loader.batch_size / time_per_batch)) scheduler.step() if epoch % checkpoint_period == 0: torch.save(model.state_dict(), output_dir + Cfg.MODEL_NAME + '_{}.pth'.format(epoch)) if epoch % eval_period == 0: model.eval() for iter, (img, vid, camid) in enumerate(val_loader): with torch.no_grad(): img = img.to(device) feat = model(img) evaluator.update((feat, vid, camid)) cmc, mAP, _, _, _, _ = evaluator.compute() logger.info("Validation Results - Epoch: {}".format(epoch)) logger.info("mAP: {:.1%}".format(mAP)) for r in [1, 5, 10]: logger.info("CMC curve, Rank-{:<3}:{:.1%}".format( r, cmc[r - 1]))
def do_inference(cfg, model, val_loader_green, val_loader_normal, num_query_green, num_query_normal): device = "cuda" logger = logging.getLogger("reid_baseline.test") logger.info("Enter inferencing") if device: if torch.cuda.device_count() > 1: print('Using {} GPUs for inference'.format( torch.cuda.device_count())) model = nn.DataParallel(model) model.to(device) model.eval() val_loader = [val_loader_green, val_loader_normal] for index, loader in enumerate(val_loader): if index == 0: subfix = '1' reranking_parameter = [14, 4, 0.4] evaluator = R1_mAP(num_query_green, max_rank=200, feat_norm=cfg.TEST.FEAT_NORM, reranking=cfg.TEST.RE_RANKING) else: subfix = '2' reranking_parameter = [10, 3, 0.6] evaluator = R1_mAP(num_query_normal, max_rank=200, feat_norm=cfg.TEST.FEAT_NORM, reranking=cfg.TEST.RE_RANKING) evaluator.reset() DISTMAT_PATH = os.path.join(cfg.OUTPUT_DIR, "distmat_{}.npy".format(subfix)) QUERY_PATH = os.path.join(cfg.OUTPUT_DIR, "query_path_{}.npy".format(subfix)) GALLERY_PATH = os.path.join(cfg.OUTPUT_DIR, "gallery_path_{}.npy".format(subfix)) for n_iter, (img, pid, camid, imgpath) in enumerate(loader): with torch.no_grad(): img = img.to(device) if cfg.TEST.FLIP_FEATS == 'on': feat = torch.FloatTensor(img.size(0), 2048).zero_().cuda() for i in range(2): if i == 1: inv_idx = torch.arange(img.size(3) - 1, -1, -1).long().cuda() img = img.index_select(3, inv_idx) f = model(img) feat = feat + f else: feat = model(img) evaluator.update((feat, imgpath)) data, distmat, img_name_q, img_name_g = evaluator.compute( reranking_parameter) np.save(DISTMAT_PATH, distmat) np.save(QUERY_PATH, img_name_q) np.save(GALLERY_PATH, img_name_g) if index == 0: data_1 = data data_all = {**data_1, **data} nowTime = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S') with open(os.path.join(cfg.OUTPUT_DIR, 'result_{}.json'.format(nowTime)), 'w', encoding='utf-8') as fp: json.dump(data_all, fp)
def do_inference_multi(cfg, model, #val_loader_green, val_loader_normal, #num_query_green, num_query_normal): device = "cuda" logger = logging.getLogger("reid_baseline.test") logger.info("Enter inferencing") if device: if torch.cuda.device_count() > 1: print('Using {} GPUs for inference'.format(torch.cuda.device_count())) model = nn.DataParallel(model) model.to(device) model.eval() fullloader, Centerloader,Ltloader, Rtloader, Lbloader, Rbloader = val_loader_normal #12 3 0.6 reranking_parameter = [30, 4, 0.8] for index,(loader0,loader1,loader2,loader3,loader4,loader5) in enumerate(zip([fullloader],[Centerloader],[Ltloader], [Rtloader], [Lbloader], [Rbloader])): if index == 0: subfix = '1' #reranking_parameter = [14, 4, 0.4] evaluator = R1_mAP(num_query_normal, max_rank=200, feat_norm=cfg.TEST.FEAT_NORM, reranking=cfg.TEST.RE_RANKING) #reranking_parameter = [10, 3, 0.6] #evaluator = R1_mAP(num_query_normal, max_rank=200, feat_norm=cfg.TEST.FEAT_NORM, # reranking=cfg.TEST.RE_RANKING) evaluator.reset() DISTMAT_PATH = os.path.join(cfg.OUTPUT_DIR, "distmat_{}.npy".format(subfix)) QUERY_PATH = os.path.join(cfg.OUTPUT_DIR, "query_path_{}.npy".format(subfix)) GALLERY_PATH = os.path.join(cfg.OUTPUT_DIR, "gallery_path_{}.npy".format(subfix)) for n_iter, (img,img1,img2,img3,img4,img5) in enumerate(zip(loader0,loader1,loader2,loader3,loader4,loader5)): with torch.no_grad(): #img = img[0].to(device) img1 = img1[0].to(device) img2 = img2[0].to(device) img3 = img3[0].to(device) img4 = img4[0].to(device) img5 = img5[0].to(device) imgpath = img[3] img = img[0].to(device) if cfg.TEST.FLIP_FEATS != 'on': feat = torch.FloatTensor(img.size(0), 2048).zero_().cuda() for i in range(2): if i == 1: inv_idx = torch.arange(img.size(3) - 1, -1, -1).long().cuda() img = img.index_select(3, inv_idx) f = model(img) feat = feat + f f = model(img1) feat = feat + f f = model(img2) feat = feat + f f = model(img3) feat = feat + f f = model(img4) feat = feat + f f = model(img5) feat = feat + f evaluator.update((feat, imgpath)) data, distmat, img_name_q, img_name_g = evaluator.compute(reranking_parameter) np.save(DISTMAT_PATH, distmat) np.save(QUERY_PATH, img_name_q) np.save(GALLERY_PATH, img_name_g) data_all = {**data} #data_all = {**data_1, **data} nowTime = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S') with open(os.path.join(cfg.OUTPUT_DIR, 'result_{}.json'.format(nowTime)), 'w',encoding='utf-8') as fp: json.dump(data_all, fp)