def inference_to_get_feats(cfg, model, val_loader, num_query, dataset): device = cfg.MODEL.DEVICE model.to(device) logger = logging.getLogger("reid_baseline.inference") logger.info("Enter inferencing") metric = evaluator(num_query, dataset, cfg, max_rank=50) debug = True model.eval() start = time.time() with torch.no_grad(): for batch in val_loader: data, pid, camid, img_path = batch data = data.cuda() feats = model(data) if cfg.TEST.FLIP_TEST: data_flip = data.flip(dims=[3]) # NCHW feats_flip = model(data_flip) feats = (feats + feats_flip) / 2 output = [feats, pid, camid, img_path] metric.update(output) if debug: print(type(metric.feats)) feats = torch.cat(metric.feats, dim=0) if metric.feat_norm: feats = torch.nn.functional.normalize(feats, dim=1, p=2) # feats = torch.nn.functional.normalize(metric.feats, dim=1, p=2) if metric.feat_norm else metric.feats return metric.img_paths, feats
def inference(cfg, model, val_loader, num_query, dataset): device = cfg.MODEL.DEVICE model.to(device) logger = logging.getLogger("reid_baseline.inference") logger.info("Enter inferencing") metric = evaluator(num_query, dataset, cfg, max_rank=100) model.eval() start = time.time() with torch.no_grad(): for batch in val_loader: data, pid, camid, img_path = batch data = data.cuda() feats = model(data) if cfg.TEST.FLIP_TEST: data_flip = data.flip(dims=[3]) # NCHW feats_flip = model(data_flip) feats = (feats + feats_flip) / 2 output = [feats, pid, camid, img_path] metric.update(output) end = time.time() logger.info("inference takes {:.3f}s".format((end - start))) torch.cuda.empty_cache() cmc, mAP, indices_np = metric.compute() logger.info('Validation Results') logger.info("mAP: {:.1%}".format(mAP)) for r in [1, 5, 10]: logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1])) return indices_np
def inference(cfg, model, val_loader, num_query, dataset): device = cfg.MODEL.DEVICE model.to(device) #model转GPU logger = logging.getLogger("reid_baseline.inference") logger.info("Enter inferencing") metric = evaluator(num_query, dataset, cfg, max_rank=50) #reid评估指标算子 /lib/utils/reid_eval.py model.eval() #固定参数 start = time.time() with torch.no_grad(): for batch in val_loader: #分batch取出测试数据 data, pid, camid, img_path = batch #一个batch包含这些信息 data = data.cuda() #一个batch的img转GPU feats = model(data) #前向计算,得到feats特征 if cfg.TEST.FLIP_TEST: #测试技巧,flip图像后再次计算特征,与原图特征求平均 data_flip = data.flip(dims=[3]) # NCHW 水平flip? feats_flip = model(data_flip) feats = (feats + feats_flip) / 2 #求平均 #完成一个batch的特征计算 output = [feats, pid, camid, img_path] #只用于计算评估指标 metric.update(output) #更新reid评估指标算子 #结束一个batch #结束所有batch end = time.time() logger.info("inference takes {:.3f}s".format((end - start))) torch.cuda.empty_cache() cmc, mAP, indices_np = metric.compute( ) #计算reid评估指标,计算距离矩阵和特征并保存,其中indices_np是距离矩阵的列排序index,间接表示哪个gallery距离每一行的query最近 logger.info('Validation Results') logger.info("mAP: {:.1%}".format(mAP)) for r in [1, 5, 10]: logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1])) return indices_np
def validate(model, dataset, val_loader, num_query, epoch, cfg, logger): metric = evaluator(num_query, dataset, cfg, max_rank=50) model.eval() with torch.no_grad(): for batch in val_loader: data, pid, camid, img_path = batch data = data.cuda() feats = model(data) output = [feats, pid, camid, img_path] metric.update(output) cmc, mAP, _ = metric.compute() logger.info("Validation Results - Epoch: {}".format(epoch)) logger.info("mAP: {:.1%}".format(mAP)) for r in [1, 5, 10]: logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1])) return mAP