def map(wrapper):
	model = wrapper
	cfg = get_cfg()
	# test_loader, num_query = build_reid_test_loader(cfg, dataset_name="TEST", T.Compose([]))
	test_loader, num_query = build_reid_test_loader(cfg, dataset_name="TEST")

	feats = []
	pids = []
	camids = []

	for batch in test_loader:
		for image_path in batch["img_paths"]:
			t = torch.Tensor(np.array([model.infer(cv2.imread(image_path))]))
			t.to(torch.device(GPU_ID))
			feats.append(t)
		pids.extend(batch["targets"].numpy())
		camids.extend(batch["camids"].numpy())
		
	feats = torch.cat(feats, dim=0)
	q_feat = feats[:num_query]
	g_feat = feats[num_query:]
	q_pids = np.asarray(pids[:num_query])
	g_pids = np.asarray(pids[num_query:])
	q_camids = np.asarray(camids[:num_query])
	g_camids = np.asarray(camids[num_query:])

	
	distmat = 1 - torch.mm(q_feat, g_feat.t())
	distmat = distmat.numpy()
	all_cmc, all_AP, all_INP = eval_market1501(distmat, q_pids, g_pids, q_camids, g_camids, 5)
	mAP = np.mean(all_AP)
	print("mAP {}, rank-1 {}".format(mAP, all_cmc[0]))
Beispiel #2
0
 def build_test_loader(cls, cfg, dataset_name):
     """
     Returns:
         iterable
     It now calls :func:`detectron2.data.build_detection_test_loader`.
     Overwrite it if you'd like a different data loader.
     """
     return build_reid_test_loader(cfg, dataset_name)
Beispiel #3
0
    def build_test_loader(cls, cfg, dataset_name):
        """
        Returns:
            iterable
        It now calls :func:`fastreid.data.build_reid_test_loader`.
        Overwrite it if you'd like a different data loader.
        """

        mapper = []
        if cfg.INPUT.SIZE_TEST[0] > 0:
            if len(cfg.INPUT.SIZE_TEST) == 1:
                resize = cfg.INPUT.SIZE_TEST[0]
            else:
                resize = cfg.INPUT.SIZE_TEST
            mapper.append(T.Resize(resize, interpolation=3))

        mapper.extend([
            T.CenterCrop(size=cfg.INPUT.CROP_SIZE),
            ToTensor(),
        ])
        return build_reid_test_loader(cfg,
                                      dataset_name,
                                      mapper=T.Compose(mapper))
Beispiel #4
0
        for cfg in cfgs:
            self.predictors.append(DefaultPredictor(cfg))

    def run_on_loader(self, data_loader):
        for batch in data_loader:
            predictions = []
            for predictor in self.predictors:
                predictions.append(predictor(batch["images"]))
            yield torch.cat(predictions, dim=-1), batch


if __name__ == "__main__":
    args = get_parser().parse_args()
    logger = setup_logger()
    cfgs = []
    for config_file in args.config_file:
        cfg = setup_cfg(config_file, args.opts)
        cfgs.append(cfg)
    results = OrderedDict()
    for dataset_name in cfgs[0].DATASETS.TESTS:
        test_loader, num_query = build_reid_test_loader(cfgs[0], dataset_name)
        evaluator = ReidEvaluator(cfgs[0], num_query)
        feat_extract = FeatureExtraction(cfgs)
        for (feat, batch) in tqdm.tqdm(feat_extract.run_on_loader(test_loader),
                                       total=len(test_loader)):
            evaluator.process(batch, feat)
        result = evaluator.evaluate()
        results[dataset_name] = result
    print_csv_format(results)
Beispiel #5
0
 def build_evaluator(cls, cfg, dataset_name, output_dir=None):
     data_loader, num_query = build_reid_test_loader(
         cfg, dataset_name=dataset_name)
     return data_loader, ReidEvaluator(cfg, num_query, output_dir)
        default=10,
        help="maximum number of rank list to be visualized",
    )
    parser.add_argument(
        "--opts",
        help="Modify config options using the command-line 'KEY VALUE' pairs",
        default=[],
        nargs=argparse.REMAINDER,
    )
    return parser


if __name__ == '__main__':
    args = get_parser().parse_args()
    cfg = setup_cfg(args)
    test_loader, num_query = build_reid_test_loader(
        cfg, dataset_name=args.dataset_name)
    demo = FeatureExtractionDemo(cfg, parallel=args.parallel)

    logger.info("Start extracting image features")
    feats = []
    pids = []
    camids = []
    for (feat, pid, camid) in tqdm.tqdm(demo.run_on_loader(test_loader),
                                        total=len(test_loader)):
        feats.append(feat)
        pids.extend(pid)
        camids.extend(camid)

    feats = torch.cat(feats, dim=0)
    q_feat = feats[:num_query]
    g_feat = feats[num_query:]
Beispiel #7
0
    def eval_ARI_purity(cls, cfg, model, transforms=None, **kwargs):
        num_devices = comm.get_world_size()
        logger = logging.getLogger('fastreid')

        _root = os.getenv("FASTREID_DATASETS", "/root/datasets")
        evaluator = ARI_Purity_Evaluator(cfg)
        results = OrderedDict()

        if transforms is None:
            transforms = build_transforms(cfg, is_train=False)

        total_datasets = set(cfg.DATASETS.TESTS)
        for dataset_name in total_datasets:
            logger.info(f"Starting evaluating ARI on dataset {dataset_name}")

            data = DATASET_REGISTRY.get(dataset_name)(root=_root, **kwargs)
            test_items = data.train
            test_set = CommDataset(test_items, transforms, relabel=True)
            data_loader, num_query = build_reid_test_loader(cfg,
                                                            dataset_name=dataset_name,
                                                            test_set=test_set)

            total = len(data_loader)  # inference data loader must have a fixed length
            # print('data_loader len =', total)
            evaluator.reset()

            img_nums = len(test_items)

            num_warmup = min(5, total - 1)
            start_time = time.perf_counter()
            total_compute_time = 0

            with inference_context(model), torch.no_grad():
                for idx, inputs in enumerate(data_loader):
                    # print(inputs)
                    if idx == num_warmup:
                        start_time = time.perf_counter()
                        total_compute_time = 0

                    start_compute_time = time.perf_counter()
                    outputs = model(inputs)

                    # Flip test
                    if cfg.TEST.FLIP.ENABLED:
                        inputs["images"] = inputs["images"].flip(dims=[3])
                        flip_outputs = model(inputs)
                        outputs = (outputs + flip_outputs) / 2

                    if torch.cuda.is_available():
                        torch.cuda.synchronize()
                    total_compute_time += time.perf_counter() - start_compute_time

                    evaluator.process(inputs, outputs)

                    iters_after_start = idx + 1 - num_warmup * int(idx >= num_warmup)
                    seconds_per_batch = total_compute_time / iters_after_start
                    if idx >= num_warmup * 2 or seconds_per_batch > 30:
                        total_seconds_per_img = (time.perf_counter() - start_time) / iters_after_start
                        eta = datetime.timedelta(seconds=int(total_seconds_per_img * (total - idx - 1)))
                        log_every_n_seconds(
                            logging.INFO,
                            "Inference done {}/{}. {:.4f} s / batch. ETA={}".format(
                                idx + 1, total, seconds_per_batch, str(eta)
                            ),
                            n=30,
                        )

            # Measure the time only for this worker (before the synchronization barrier)
            total_time = time.perf_counter() - start_time
            total_time_str = str(datetime.timedelta(seconds=total_time))
            # NOTE this format is parsed by grep
            logger.info(
                "Total inference time: {} ({:.6f} s / batch per device, on {} devices)".format(
                    total_time_str, total_time / (total - num_warmup), num_devices
                )
            )
            total_compute_time_str = str(datetime.timedelta(seconds=int(total_compute_time)))
            logger.info(
                "Total inference pure compute time: {} ({:.6f} s / batch per device, on {} devices)".format(
                    total_compute_time_str, total_compute_time / (total - num_warmup), num_devices
                )
            )
            results_i = evaluator.evaluate()
            ARI_score, purity = results_i
            results[f'{dataset_name}/ARI'] = ARI_score
            results[f'{dataset_name}/purity'] = purity

            if comm.is_main_process():
                assert isinstance(
                    results, dict
                ), "Evaluator must return a dict on the main process. Got {} instead.".format(
                    results
                )
                logger.info(f"ARI score for {dataset_name} is {ARI_score:.4f}")
                logger.info(f"Purity for {dataset_name} is {purity:.4f}")

        return results