def test(cfg):
    model = build_model(cfg)
    device = cfg.MODEL.DEVICE
    checkpoint = torch.load(cfg.TEST.WEIGHT)
    best_score_threshold = checkpoint['best_score_threshold']
    best_final_score = checkpoint['best_final_score']
    print('-' * 30)
    print(f'[Best Score Threshold]: {best_score_threshold}')
    print(f'[OOF Score]: {best_final_score:.4f}')
    print('-' * 30)
    test_loader = make_test_data_loader(cfg)

    tester = Tester(model=model, device=device, cfg=cfg, test_loader=test_loader)
    tester.test()
Beispiel #2
0
def predict(cfg):
    seed_everything(cfg.SEED)
    model = build_model(cfg)
    ema = ModelEMA(model)
    if torch.cuda.is_available():
        device = 'cuda'
        ema.ema.load_state_dict(torch.load(cfg.TEST.WEIGHT)['model_state_dict'])
    else:
        device = 'cpu'
        ema.ema.load_state_dict(torch.load(cfg.TEST.WEIGHT, map_location=device)['model_state_dict'])

    test_loader = make_test_data_loader(cfg)

    predicter = Predicter(model=ema.ema, device=device, cfg=cfg, test_loader=test_loader)
    predicter.predict()
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument("--config_file",
                        default="",
                        help="path to config file",
                        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        makedirs(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        with open(args.config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    model = build_model(cfg, 1)
    model.load_param(cfg.TEST.WEIGHT)

    train_loader, val_loader, num_query, num_classes = make_test_data_loader(
        cfg)
    inference(cfg, model, val_loader, num_query)
def test(cfg):
    logger = setup_logger("reid_baseline", cfg.OUTPUT_DIR)
    logger.info("Running with config:\n{}".format(cfg))

    # prepare dataset
    test_data_loader, num_query = make_test_data_loader(cfg)

    # prepare model
    model = build_model(cfg, num_classes=[700,500])
    logger.info('Path to the checkpoint of model:%s' %(cfg.TEST.WEIGHT))
    model.load_param(cfg.TEST.WEIGHT, 'self')
    camera_model = build_camera_model(cfg, num_classes=5)
    logger.info('Path to the checkpoint of model:%s' %(cfg.TEST.CAMERA_WEIGHT))
    camera_model.load_param(cfg.TEST.CAMERA_WEIGHT, 'self')
    tester(cfg,
            model,
            camera_model,
            test_data_loader,
            num_query
            )
Beispiel #5
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument(
        "--config_file", default="./configs/softmax_triplet.yml", help="path to config file", type=str
    )
    parser.add_argument("opts", help="Modify config options using the command-line", default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(os.environ["WORLD_SIZE"]
                   ) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.DATASETS.NAMES = 'mvb'
    cfg.DATASETS.ROOT_DIR = './data'
    cfg.DATALOADER.NUM_WORKERS = 0
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        with open(args.config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    # init dataloader
    gallery_loader, query_loader, num_query, num_classes = make_test_data_loader(
        cfg)
    # build model and load checkpoint param
    model = build_model(cfg, num_classes)
    model.load_param(cfg.TEST.WEIGHT)

    model.eval().cuda()
    feats = []
    g_pids = []
    g_camids = []
    g_names = []
    q_names = []
    # ===== extract feats =====
    with torch.no_grad():
        print('extract query feats...')
        for batch in query_loader:
            data, _, _, paths = batch
            feat = model(data.cuda())
            feats.append(feat.cpu())
            q_names.extend(paths)
        print('extract gallery feats...')
        for batch in gallery_loader:
            data, pids, camids, paths = batch
            g_pids.extend(pids)
            g_camids.extend(camids)
            feat = model(data.cuda())
            feats.append(feat.cpu())
            g_names.extend(paths)

    # ===== init vars =====
    feats = torch.cat(feats, dim=0) # cat feats because feats is batch-wised
    feats = torch.nn.functional.normalize(feats, dim=1, p=2) # normalize feats
    qf = feats[:num_query]          # query feats
    gf = feats[num_query:]          # gallery feats
    g_pids = np.array(g_pids)       # gallery pids
    g_camids = np.array(g_camids)   # gallery camids
    # ===== calc euclidean distance between gallery feat and query feat =====
    m, n = qf.shape[0], gf.shape[0]
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
                torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    distmat.addmm_(1, -2, qf, gf.t())
    distmat = distmat.cpu().numpy()

    # ===== find min distance of every query during id-wised =====
    num_q, num_g = distmat.shape
    indices = np.argsort(distmat, axis=1)   # sort distmat from low to high (distance)
    q_nameinds = np.zeros((num_q), dtype=int)  # init query ids array for sort because query files is not ordered
    distmat_id_wised = np.ones((num_q, num_classes), dtype=np.float32)*100
    def get_id(x):
        return int(x.split('_')[0])
    for q_idx in range(num_q):
        order = indices[q_idx]
        q_nameinds[q_idx] = int(q_names[q_idx].split('.')[0])  # get query id frome query img filename
        names = np.array(g_names)[order].tolist()
        pids = map(get_id, names)
        dists = distmat[q_idx][order]
        # find min distance of current query during id-wised
        for pid, dist in zip(pids, dists):
            if distmat_id_wised[q_idx, pid] > dist:
                distmat_id_wised[q_idx, pid] = dist
    
    # ===== sort query id from 0 to query nums =====
    orders = np.argsort(q_nameinds, axis=0)
    q_nameinds = q_nameinds[orders]
    distmat_id_wised = distmat_id_wised[orders]

    # ===== write result to csv =====
    with open('../024_bag_result.csv', 'w') as f:
        for q_idx in range(num_q):
            order = np.argsort(distmat_id_wised[q_idx])
            max_dist = distmat_id_wised[q_idx].max()
            buf = '%05d,' % q_nameinds[q_idx]
            for ind in order:
                score = (max_dist-distmat_id_wised[q_idx][ind])/max_dist
                buf += '%04d,%.6f,' % (ind, score)
            buf = buf[:-1]
            f.write(buf+'\n')