Ejemplo n.º 1
0
def inference(cfg, model, val_loader, num_query):
    device = cfg.MODEL.DEVICE

    logger = logging.getLogger("reid_baseline.inference")
    logger.info("Enter inferencing")
    if cfg.TEST.RE_RANKING == 'no':
        print("Create evaluator")
        evaluator = create_supervised_evaluator(
            model,
            metrics={
                'r1_mAP':
                R1_mAP(num_query, max_rank=100, feat_norm=cfg.TEST.FEAT_NORM),
                'R1_mAP_reranking':
                R1_mAP_reranking(num_query,
                                 max_rank=100,
                                 feat_norm=cfg.TEST.FEAT_NORM),
                'Track_R1_mAP':
                Track_R1_mAP(num_query,
                             max_rank=100,
                             feat_norm=cfg.TEST.FEAT_NORM)
            },
            device=device)
    elif cfg.TEST.RE_RANKING == 'yes':
        print("Create evaluator for reranking")
        evaluator = create_supervised_evaluator(
            model,
            metrics={
                'r1_mAP':
                R1_mAP(num_query, max_rank=100, feat_norm=cfg.TEST.FEAT_NORM),
                'R1_mAP_reranking':
                R1_mAP_reranking(num_query,
                                 max_rank=100,
                                 feat_norm=cfg.TEST.FEAT_NORM),
                'Track_R1_mAP':
                Track_R1_mAP(num_query,
                             max_rank=100,
                             feat_norm=cfg.TEST.FEAT_NORM)
            },
            device=device)
    else:
        print(
            "Unsupported re_ranking config. Only support for no or yes, but got {}."
            .format(cfg.TEST.RE_RANKING))

    evaluator.run(val_loader)
    cmc, mAP = evaluator.state.metrics['r1_mAP']
    re_cmc, re_mAP = evaluator.state.metrics['R1_mAP_reranking']
    track_cmc, track_mAP = evaluator.state.metrics['Track_R1_mAP']
    logger.info('Validation Results')
    logger.info("mAP: {:.1%}".format(mAP))
    for r in [1, 5, 10, 100]:
        logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1]))

    logger.info("re_mAP: {:.1%}".format(re_mAP))
    for r in [1, 5, 10, 100]:
        logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, re_cmc[r - 1]))

    logger.info("track_mAP: {:.1%}".format(track_mAP))
    for r in [1, 5, 10, 100]:
        logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, track_cmc[r - 1]))
Ejemplo n.º 2
0
def inference(
        cfg,
        model,
        val_loader,
        num_query
):
    device = cfg.MODEL.DEVICE

    logger = logging.getLogger("reid_baseline.inference")
    logger.info("Enter inferencing")
    if cfg.TEST.RE_RANKING == 'no':
        print("Create evaluator")
        if 'test_all' in cfg.TEST.TEST_MODE:
            if len(val_loader.dataset.dataset[0]) == 4: # mask no new eval
                evaluator = create_supervised_all_evaluator_with_mask(model, metrics={'r1_mAP': R1_mAP(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)},
                        seq_len=cfg.INPUT.SEQ_LEN,device=device)
            elif len(val_loader.dataset.dataset[0]) == 6: # mask , new eval
                evaluator = create_supervised_all_evaluator_with_mask_new_eval(model, metrics={'r1_mAP': R1_mAP(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM,new_eval=True)},
                        seq_len=cfg.INPUT.SEQ_LEN,device=device)
            else:
                evaluator = create_supervised_all_evaluator(model, metrics={'r1_mAP': R1_mAP(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)},
                        seq_len=cfg.INPUT.SEQ_LEN,device=device)
        else:
            if len(val_loader.dataset.dataset[0]) == 6: # mask , new eval
                evaluator = create_supervised_evaluator_with_mask_new_eval(model, metrics={'r1_mAP': R1_mAP(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM,new_eval=True)},
                        device=device)
            elif len(val_loader.dataset.dataset[0]) == 4 : # mask, no new eval
                evaluator = create_supervised_evaluator_with_mask(model, metrics={'r1_mAP': R1_mAP(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)},
                        device=device)
            else:
                evaluator = create_supervised_evaluator(model, metrics={'r1_mAP': R1_mAP(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)},
                                                device=device)
    elif cfg.TEST.RE_RANKING == 'yes': # haven't implement with mask
        print("Create evaluator for reranking")
        if 'test_all' in cfg.TEST.TEST_MODE:
            evaluator = create_supervised_all_evaluator(model, metrics={'r1_mAP': R1_mAP_reranking(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)},
                        seq_len=cfg.INPUT.SEQ_LEN,device=device)
        else:
            evaluator = create_supervised_evaluator(model, metrics={'r1_mAP': R1_mAP_reranking(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)},
                                                device=device)
    else:
        print("Unsupported re_ranking config. Only support for no or yes, but got {}.".format(cfg.TEST.RE_RANKING))

    pbar = ProgressBar(persist=True,ncols=120)
    pbar.attach(evaluator)

    evaluator.run(val_loader)
    cmc, mAP = evaluator.state.metrics['r1_mAP']
    logger.info('Validation Results')
    logger.info("mAP: {:.1%}".format(mAP))
    for r in [1, 5, 10]:
        logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1]))
Ejemplo n.º 3
0
    def get_valid_eval_map_ebll(self, cfg, source_model, current_model):
        self.validation_evaluator_map = OrderedDict()
        long = len(self.valid_dict.items())
        list_odict_items = list(self.valid_dict.items())

        for i in range(long - 1):
            name, (_, n_q) = list_odict_items[i]
            if self.re_ranking:
                metrics = {
                    "r1_mAP":
                    R1_mAP_reranking(n_q,
                                     max_rank=50,
                                     if_feat_norm=cfg.TEST.IF_FEAT_NORM)
                }
            else:
                metrics = {
                    "r1_mAP":
                    R1_mAP(n_q,
                           max_rank=50,
                           if_feat_norm=cfg.TEST.IF_FEAT_NORM)
                }

            self.validation_evaluator_map[name] = create_source_evaluator(
                source_model,
                current_model,
                metrics=metrics,
                device=self.device,
                classify_feature=self.classify_feature)

        name, (_, n_q) = list_odict_items[long - 1]
        if self.re_ranking:
            metrics = {
                "r1_mAP":
                R1_mAP_reranking(n_q,
                                 max_rank=50,
                                 if_feat_norm=cfg.TEST.IF_FEAT_NORM)
            }
        else:
            metrics = {
                "r1_mAP":
                R1_mAP(n_q, max_rank=50, if_feat_norm=cfg.TEST.IF_FEAT_NORM)
            }

        self.validation_evaluator_map[name] = create_supervised_evaluator(
            current_model,
            metrics=metrics,
            device=self.device,
            classify_feature=self.classify_feature)
Ejemplo n.º 4
0
def main():
    logger = setup_logger("duke2market", cfg.OUTPUT_DIR, 0, '0214test')
    # logger.info(cfg)
    # args = Arguments().parse()
    os.environ["CUDA_VISIBLE_DEVICES"] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    # ----load dataset------ #
    train_loader_s, _, _, num_classes = make_data_loader(cfg)
    train_loader_t, val_loader, num_query, _ = make_data_loader_target(cfg)

    cfg.DATASETS.NUM_CLASSES_S = num_classes
    my_model = Base_model(cfg, logger) # --------------
    # Evaluator
    if cfg.TEST.RE_RANKING == 'no':
        evaluator = create_supervised_evaluator(my_model.Content_Encoder,
                                            metrics={'r1_mAP': R1_mAP(num_query, max_rank=50, feat_norm='yes')}, device='cuda')
    else:
        evaluator = create_supervised_evaluator(my_model.Content_Encoder,
                                            metrics={'r1_mAP': R1_mAP_reranking(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)}, device='cuda')

    # ---------------------test------------------------#
    model_checkpoint = load_checkpoint(osp.join(working_dir, 'logs/0214_duke2market/duke2market-new.pth.tar'))
    my_model.Content_Encoder.module.load_state_dict(model_checkpoint['Content_Encoder'])
    logger.info("=> Training on {} and Testing on {}".format(cfg.DATASETS.NAMES, cfg.DATASETS.TNAMES))
    print("=> start testing. Please wait...")
    evaluator.run(val_loader)
    cmc, mAP = evaluator.state.metrics['r1_mAP']

    logger.info("mAP: {:.1%}".format(mAP))
    for i in [1, 5, 10, 20, 30, 50]:
        logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(i, cmc[i - 1]))

    logger.info("finished!")
Ejemplo n.º 5
0
def inference(cfg, model, val_loader, num_query):
    device = cfg.MODEL.DEVICE

    logger = logging.getLogger("reid_baseline.inference")
    logger.info("Enter inferencing")
    if cfg.TEST.RE_RANKING == 'no':
        print("Create evaluator")
        evaluator = create_supervised_evaluator(
            model,
            metrics={
                'r1_mAP':
                R1_mAP(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)
            },
            device=device)
    elif cfg.TEST.RE_RANKING == 'yes':
        print("Create evaluator for reranking")
        evaluator = create_supervised_evaluator(
            model,
            metrics={
                'r1_mAP':
                R1_mAP_reranking(num_query,
                                 max_rank=50,
                                 feat_norm=cfg.TEST.FEAT_NORM)
            },
            device=device)
    else:
        print(
            "Unsupported re_ranking config. Only support for no or yes, but got {}."
            .format(cfg.TEST.RE_RANKING))

    # import pdb
    # pdb.set_trace()
    evaluator.run(val_loader)
Ejemplo n.º 6
0
def inference(
    cfg,
    model,
    val_loader,
    num_query,
):
    device = cfg.MODEL.DEVICE
    log_period = cfg.SOLVER.LOG_PERIOD
    output_dir = cfg.OUTPUT_DIR

    logger = logging.getLogger("reid_baseline.inference")
    logger.info("Enter inferencing")
    if cfg.TEST.RE_RANKING == 'no':
        print("Create evaluator")
        evaluator = create_supervised_evaluator(
            model,
            metrics={
                'r1_mAP':
                R1_mAP(num_query,
                       max_rank=50,
                       feat_norm=cfg.TEST.FEAT_NORM,
                       remove_camera=True,
                       extract_feat=True)
            },
            device=device)
    elif cfg.TEST.RE_RANKING == 'yes':
        print("Create evaluator for reranking")
        evaluator = create_supervised_evaluator(
            model,
            metrics={
                'r1_mAP':
                R1_mAP_reranking(num_query,
                                 max_rank=50,
                                 feat_norm=cfg.TEST.FEAT_NORM,
                                 remove_camera=True,
                                 extract_feat=True)
            },
            device=device)
    else:
        print(
            "Unsupported re_ranking config. Only support for no or yes, but got {}."
            .format(cfg.TEST.RE_RANKING))

    @evaluator.on(Events.ITERATION_COMPLETED)
    def log_iteration(engine):
        iter = (engine.state.iteration - 1) % len(val_loader) + 1
        if iter % log_period == 0:
            logger.info("Extract Features. Iteration[{}/{}]".format(
                iter, len(val_loader)))

    evaluator.run(val_loader)
    distmat, cmc, mAP = evaluator.state.metrics['r1_mAP']

    logger.info('Validation Results')
    logger.info("mAP: {:.1%}".format(mAP))
    for r in [1, 5, 10]:
        logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1]))

    return mAP, cmc[0], cmc[4]
Ejemplo n.º 7
0
def main(mode, ckpt, logger):
    logger.info(cfg)
    os.environ["CUDA_VISIBLE_DEVICES"] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True
    # ----load dataset------ #
    train_loader_s, _, _, num_classes = make_data_loader(cfg)
    train_loader_t, val_loader, num_query, _ = make_data_loader_target(cfg)
    cfg.DATASETS.NUM_CLASSES_S = num_classes
    pj_model = BaseModel(cfg)  # --------------
    # Evaluator
    if cfg.TEST.RE_RANKING == 'no':
        evaluator = create_supervised_evaluator(pj_model.Content_Encoder,
                                            metrics={'r1_mAP': R1_mAP(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)}, device='cuda')
    else:
        evaluator = create_supervised_evaluator(pj_model.Content_Encoder,
                                            metrics={'r1_mAP': R1_mAP_reranking(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)}, device='cuda')
    start_epoch = best_top1 = 0
    # Summary_writer
    writer = SummaryWriter()
    # Start training
    if mode == 'two':
        if cfg.DATASETS.NAMES == 'dukemtmc':
            rand_src_1 = np.asarray([0, 2, 4, 6])
            rand_src_2 = np.asarray([1, 3, 5, 7])
        elif cfg.DATASETS.NAMES == 'market1501':
            rand_src_1 = np.asarray([0, 1, 4])
            rand_src_2 = np.asarray([3, 2, 5])
        elif cfg.DATASETS.NAMES == 'msmt17':
            rand_src_1 = np.asarray([1, 3, 4, 5, 9, 11, 13])
            rand_src_2 = np.asarray([0, 2, 6, 7, 8, 10, 12, 14])
        for epoch in range(start_epoch, 210):
            pj_model.two_classifier(epoch, train_loader_s, train_loader_t, writer, logger, rand_src_1, rand_src_2)
            if ((epoch+1) % 2 == 0):
                evaluator.run(val_loader)
                cmc, mAP = evaluator.state.metrics['r1_mAP']
                logger.info("Validation Results - Epoch: {}".format(epoch))
                logger.info("mAP: {:.1%}".format(mAP))
                for r in [1, 5, 10, 20]:
                    logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1]))
                is_best = cmc[0] > best_top1
                best_top1 = max(cmc[0], best_top1)
                save_checkpoint({
                'Content_encoder': pj_model.Content_Encoder.module.state_dict(),
                'Content_optimizer': pj_model.Content_optimizer.state_dict(),
                'Content_optimizer_fix': pj_model.Content_optimizer_fix.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            }, is_best, fpath=cfg.OUTPUT_DIR + 'checkpoint.pth.tar', info=ckpt+'.pth.tar')

                logger.info('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
                    format(epoch, cmc[0], best_top1, ' *' if is_best else ''))
        writer.close()
Ejemplo n.º 8
0
def get_valid_eval_map(cfg, device, model, valid, re_ranking=False, classify_feature=True):
    validation_evaluator_map = OrderedDict()
    for name, (_, n_q) in valid.items():
        if re_ranking:
            metrics = {"r1_mAP": R1_mAP_reranking(n_q, max_rank=50, if_feat_norm=cfg.TEST.IF_FEAT_NORM)}
        else:
            metrics = {"r1_mAP": R1_mAP(n_q, max_rank=50, if_feat_norm=cfg.TEST.IF_FEAT_NORM)}

        validation_evaluator_map[name] = create_supervised_evaluator(model,
                                                                     metrics=metrics,
                                                                     device=device,
                                                                     classify_feature=classify_feature)
    return validation_evaluator_map
Ejemplo n.º 9
0
def inference(cfg, model, val_loader, num_query):
    device = cfg.MODEL.DEVICE

    logger = logging.getLogger("reid_baseline.inference")
    logger.info("Enter inferencing")
    if cfg.TEST.RE_RANKING == 'no':
        print("Create evaluator")
        evaluator = create_supervised_evaluator(
            model,
            metrics={
                'r1_mAP':
                R1_mAP(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)
            },
            device=device)
        evaluator.run(val_loader)
        cmc, mAP = evaluator.state.metrics['r1_mAP']
        logger.info('Validation Results')
        logger.info("mAP: {:.1%}".format(mAP))
        for r in [1, 5, 10]:
            logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1]))

    elif cfg.TEST.RE_RANKING == 'yes':
        print("Create evaluator for reranking")
        evaluator = create_supervised_evaluator(
            model,
            metrics={
                'r1_mAP':
                R1_mAP_reranking(num_query,
                                 max_rank=50,
                                 feat_norm=cfg.TEST.FEAT_NORM)
            },
            device=device)
        evaluator.run(val_loader)
        cmc, mAP = evaluator.state.metrics['r1_mAP']
        logger.info('Validation Results')
        logger.info("mAP: {:.1%}".format(mAP))
        for r in [1, 5, 10]:
            logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1]))

    else:
        evaluator = create_supervised_evaluator(
            model,
            metrics={
                'submit':
                Submit(num_query, max_rank=200, feat_norm=cfg.TEST.FEAT_NORM)
            },
            device=device)
        evaluator.run(val_loader)
        img_mat = evaluator.state.metrics['submit']
        with open('submit.json', 'w') as f:
            json.dump(img_mat, f)
def validator(cfg, model, camera_model, val_loader, num_query):
    device = cfg.MODEL.DEVICE
    log_period = cfg.SOLVER.LOG_PERIOD

    logger = logging.getLogger("reid_baseline.inference")
    logger.info("Enter inferencing")
    if cfg.TEST.RE_RANKING == 'no':
        print("Create evaluator")
        evaluator = create_supervised_evaluator(
            model,
            camera_model,
            metrics={
                'r1_mAP':
                R1_mAP(num_query, True, False, feat_norm=cfg.TEST.FEAT_NORM)
            },
            device=device)
    elif cfg.TEST.RE_RANKING == 'yes':
        print("Create evaluator for reranking")
        evaluator = create_supervised_evaluator(
            model,
            camera_model,
            metrics={
                'r1_mAP':
                R1_mAP_reranking(num_query,
                                 True,
                                 False,
                                 max_rank=50,
                                 feat_norm=cfg.TEST.FEAT_NORM)
            },
            device=device)
    else:
        print(
            "Unsupported re_ranking config. Only support for no or yes, but got {}."
            .format(cfg.TEST.RE_RANKING))

    @evaluator.on(Events.ITERATION_COMPLETED)
    def log_training_loss(engine):
        iter = (engine.state.iteration - 1) % len(val_loader) + 1
        if iter % log_period == 0:
            logger.info("Epoch[{}] Iter[{}/{}]".format(engine.state.epoch,
                                                       iter, len(val_loader)))

    evaluator.run(val_loader)
    cmc, mAP = evaluator.state.metrics['r1_mAP']
    logger.info('Validation Results')
    logger.info("mAP: {:.1%}".format(mAP))
    for r in [1, 5, 10, 20, 50]:
        logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1]))
def inference(cfg, model, val_loader, num_query):
    device = cfg.MODEL.DEVICE

    logger = logging.getLogger("reid_baseline.inference")
    logger.info("Enter inferencing")
    if cfg.TEST.RE_RANKING == 'no':
        print("Create evaluator")
        evaluator = create_supervised_evaluator(
            model,
            metrics={
                'r1_mAP':
                R1_mAP(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)
            },
            device=device)
    elif cfg.TEST.RE_RANKING == 'yes':
        print("Create evaluator for reranking")
        evaluator = create_supervised_evaluator(
            model,
            metrics={
                'r1_mAP':
                R1_mAP_reranking(num_query,
                                 max_rank=50,
                                 feat_norm=cfg.TEST.FEAT_NORM)
            },
            device=device)
    else:
        print(
            "Unsupported re_ranking config. Only support for no or yes, but got {}."
            .format(cfg.TEST.RE_RANKING))

    @evaluator.on(Events.ITERATION_COMPLETED)
    def update(evaluator):
        print(evaluator.state.output)

    @evaluator.on(Events.EPOCH_COMPLETED)
    def calc_heatmap(engine):
        pass

    evaluator.run(val_loader)
    cmc, mAP = evaluator.state.metrics['r1_mAP']
    logger.info('Validation Results')
    logger.info("mAP: {:.1%}".format(mAP))
    for r in [1, 5, 10]:
        logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1]))
Ejemplo n.º 12
0
    def get_valid_eval_map(self, cfg, model):
        self.validation_evaluator_map = OrderedDict()
        for name, (_, n_q) in self.valid_dict.items():
            if self.re_ranking:
                metrics = {
                    "r1_mAP":
                    R1_mAP_reranking(n_q,
                                     max_rank=50,
                                     if_feat_norm=cfg.TEST.IF_FEAT_NORM)
                }
            else:
                metrics = {
                    "r1_mAP":
                    R1_mAP(n_q,
                           max_rank=50,
                           if_feat_norm=cfg.TEST.IF_FEAT_NORM)
                }

            self.validation_evaluator_map[name] = create_supervised_evaluator(
                model,
                metrics=metrics,
                device=self.device,
                classify_feature=self.classify_feature)
Ejemplo n.º 13
0
def inference(cfg, model, val_loader, num_query):
    device = cfg.MODEL.DEVICE

    logger = logging.getLogger("reid_baseline.inference")
    logger.info("Enter inferencing")
    if cfg.TEST.RE_RANKING == 'no':
        print("Create evaluator")
        evaluator = create_supervised_evaluator(
            model,
            metrics={
                'r1_mAP':
                R1_mAP(num_query,
                       max_rank=50,
                       feat_norm=cfg.TEST.FEAT_NORM,
                       fun=eval_func_with_plot)
            },
            device=device)
    elif cfg.TEST.RE_RANKING == 'yes':
        print("Create evaluator for reranking")
        evaluator = create_supervised_evaluator(
            model,
            metrics={
                'r1_mAP':
                R1_mAP_reranking(num_query,
                                 max_rank=50,
                                 feat_norm=cfg.TEST.FEAT_NORM)
            },
            device=device)
    else:
        print(
            "Unsupported re_ranking config. Only support for no or yes, but got {}."
            .format(cfg.TEST.RE_RANKING))

    dataset = init_dataset(cfg.DATASETS.NAMES, root=cfg.DATASETS.ROOT_DIR)
    transform = T.Compose([T.ToTensor()])
    val_dataset = ImageDataset(dataset.query + dataset.gallery, transform)
    plot(val_dataset, 'good_case', [[16843, 3918, 6980], [7005, 4783, 15962]])
Ejemplo n.º 14
0
def inference(cfg, model, val_loader, num_query, datasets):
    device = cfg.MODEL.DEVICE
    aligned_test = cfg.MODEL.ALIGNED
    adjust_rerank = cfg.TEST.ADJUST_RERANK
    pcb_test = cfg.MODEL.PCB
    ggdist_path = cfg.TEST.SAVE_DIST_GG
    qqdist_path = cfg.TEST.SAVE_DIST_QQ
    qgdist_path = cfg.TEST.SAVE_DIST_QG
    savedist_path = [ggdist_path, qqdist_path, qgdist_path]
    merge = cfg.TEST.MERGE
    new_pcb_test = cfg.MODEL.NEW_PCB

    logger = logging.getLogger("reid_baseline.inference")
    logger.info("Enter inferencing")
    if cfg.TEST.RE_RANKING == 'no':
        print("Create evaluator")
        evaluator = create_supervised_evaluator(
            model,
            aligned_test,
            pcb_test,
            new_pcb_test,
            metrics={
                'r1_mAP':
                R1_mAP(num_query,
                       aligned_test,
                       datasets,
                       max_rank=50,
                       feat_norm=cfg.TEST.FEAT_NORM)
            },
            device=device)
    elif cfg.TEST.RE_RANKING == 'yes':
        print("Create evaluator for reranking")
        evaluator = create_supervised_evaluator(
            model,
            aligned_test,
            pcb_test,
            new_pcb_test,
            metrics={
                'r1_mAP':
                R1_mAP_reranking(num_query,
                                 datasets,
                                 aligned_test,
                                 pcb_test,
                                 new_pcb_test,
                                 adjust_rerank,
                                 savedist_path,
                                 merge,
                                 max_rank=50,
                                 feat_norm=cfg.TEST.FEAT_NORM)
            },
            device=device)
    else:
        print(
            "Unsupported re_ranking config. Only support for no or yes, but got {}."
            .format(cfg.TEST.RE_RANKING))

    evaluator.run(val_loader)
    cmc, mAP = evaluator.state.metrics['r1_mAP']
    logger.info('Validation Results')
    logger.info("mAP: {:.1%}".format(mAP))
    for r in [1, 5, 10]:
        logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1]))
Ejemplo n.º 15
0
def inference(cfg, model, val_loader, num_query):
    device = cfg.MODEL.DEVICE

    logger = logging.getLogger("TEST clothing change re-id")
    logger.info("Enter inferencing")
    if cfg.TEST.RE_RANKING == 'no':
        print("Create evaluator")
        evaluator1 = create_supervised_evaluator(
            model,
            metrics={
                'r1_mAP_longterm':
                R1_mAP_longterm(num_query,
                                max_rank=50,
                                feat_norm=cfg.TEST.FEAT_NORM)
            },
            device=device)
        evaluator2 = create_supervised_evaluator(
            model,
            metrics={
                'r1_mAP':
                R1_mAP(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)
            },
            device=device)
        evaluator = (evaluator1, evaluator2)

    elif cfg.TEST.RE_RANKING == 'yes':
        print("Create evaluator for reranking")
        evaluator1 = create_supervised_evaluator(
            model,
            metrics={
                'r1_mAP_longterm':
                R1_mAP_reranking(num_query,
                                 max_rank=50,
                                 feat_norm=cfg.TEST.FEAT_NORM)
            },
            device=device)
        evaluator2 = create_supervised_evaluator(
            model,
            metrics={
                'r1_mAP':
                R1_mAP(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)
            },
            device=device)
        evaluator = (evaluator1, evaluator2)
    else:
        print(
            "Unsupported re_ranking config. Only support for no or yes, but got {}."
            .format(cfg.TEST.RE_RANKING))

    evaluator[0].run(val_loader)
    CC_cmc, CC_mAP = evaluator[0].state.metrics['r1_mAP_longterm']
    logger.info('>>>>> TEST: Cloth changing evaluation results:')
    logger.info("mAP: {:.1%}".format(CC_mAP))
    for r in [1, 5, 10, 20, 50]:
        logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, CC_cmc[r - 1]))

    evaluator[1].run(val_loader)
    SS_cmc, SS_mAP = evaluator[1].state.metrics['r1_mAP']
    logger.info('>>>>> TEST: Standard evaluation results:')
    logger.info("mAP: {:.1%}".format(SS_mAP))
    for r in [1, 5, 10, 20, 50]:
        logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, SS_cmc[r - 1]))

    return CC_cmc, CC_mAP, SS_cmc, SS_mAP
def inference(cfg, model, val_loader, num_query):
    device = cfg.MODEL.DEVICE

    logger = logging.getLogger("reid_baseline.inference")
    logger.info("Enter inferencing")
    if cfg.TEST.RE_RANKING == 'no':
        print("Create evaluator")
        evaluator = create_supervised_evaluator(
            model,
            metrics={
                'r1_mAP':
                R1_mAP(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)
            },
            device=device)
    elif cfg.TEST.RE_RANKING == 'yes':
        print("Create evaluator for reranking")
        evaluator = create_supervised_evaluator(
            model,
            metrics={
                'r1_mAP':
                R1_mAP_reranking(num_query,
                                 max_rank=50,
                                 feat_norm=cfg.TEST.FEAT_NORM)
            },
            device=device)
    else:
        print(
            "Unsupported re_ranking config. Only support for no or yes, but got {}."
            .format(cfg.TEST.RE_RANKING))

    evaluator.run(val_loader)
    cmc, mAP, max_200_indices, num_q, num_g = evaluator.state.metrics['r1_mAP']

    # save 200 img_id
    query_list_path = '/home/flyingbird/Documents/reid_competition/test/query_a_list.txt'
    gallery_list_path = '/home/flyingbird/Documents/reid_competition/test/gallery_a_list.txt'

    query_list = list()
    with open(query_list_path, 'r') as f:
        # 测试集中txt文件
        lines = f.readlines()
        for i, line in enumerate(lines):
            data = line.split(" ")
            image_name = data[0].split("/")[1]
            #img_file = os.path.join(r'初赛A榜测试集\query_a', image_name)  # 测试集query文件夹
            query_list.append(image_name)

    # gallery_list = [os.path.join(gallery_list_path, x) for x in # 测试集gallery文件夹
    #                 os.listdir(gallery_list_path)]
    gallery_list = list()
    with open(gallery_list_path, 'r') as f:
        # 测试集中txt文件
        lines = f.readlines()
        for i, line in enumerate(lines):
            data = line.split(" ")
            image_name = data[0].split("/")[1]
            #img_file = os.path.join(r'初赛A榜测试集\query_a', image_name)  # 测试集query文件夹
            gallery_list.append(image_name)
    #query_num = len(query_list)

    res_dict = dict()
    for q_idx in range(num_q):
        #print(query_list[q_idx])
        #print(query_list[q_idx].rindex("\\"))
        filename = query_list[q_idx]  #[query_list[q_idx].rindex("\\") + 1:]
        #max_200_files = [gallery_list[i][gallery_list[i].rindex("\\") + 1:] for i in max_200_indices[q_idx]]
        max_200_files = [gallery_list[i] for i in max_200_indices[q_idx]]
        res_dict[filename] = max_200_files
        #print(query_list[q_idx], max_200_files)

    save_path = '/home/flyingbird/Documents/reid_competition/test/rerank_submission_A.json'

    with open(save_path, 'w', encoding='utf-8') as f:  # 提交文件
        json.dump(res_dict, f)

    logger.info('Validation Results')
    logger.info("mAP: {:.1%}".format(mAP))