예제 #1
0
def detect(opt, logger, cfg):
    demo = FeatureExtractionDemo(cfg, parallel=opt.parallel)
    query_loader = []
    print(len(os.listdir(opt.input_picture_path)))
    for query_image in os.listdir(opt.input_picture_path):
        query_image = cv2.imread(
            os.path.join(opt.input_picture_path, query_image))
        query_image = cv2.cvtColor(query_image,
                                   cv2.COLOR_BGR2RGB)  # PIL: (233, 602)
        query_image = cv2.resize(query_image, (128, 256))
        query_image = np.transpose(query_image, (2, 0, 1))
        query_feats = demo.run_on_image(
            torch.from_numpy(query_image).unsqueeze(0))
        print(query_feats.shape)
        query_loader.append(query_feats)
    extract_features = torch.cat(query_loader, dim=0).data.cpu().numpy()
    # extract_features = torch.nn.functional.normalize(query_feats, dim=1, p=2).data.cpu().numpy()
    print('features:', extract_features.shape)
    with Timer('All Steps'):
        global args
        # args = parser.parse_args()
        label_path = None
        pred_label_path = None
        print('=> Use cuda ?: {}'.format(opt.is_cuda))
        # with Timer('Extract Feature'):
        #     extract_features = extract_fature(args)
        if eval(opt.is_evaluate):
            opt.label_path = 'data/tmp/test.meta'
        if not eval(opt.is_cuda):
            opt.knn_method = 'faiss-cpu'
        with Timer('Face Cluster'):
            cluster_main(opt, extract_features)
        print(
            "=> Face cluster done! The cluster results have been saved in {}".
            format(opt.output_picture_path))
예제 #2
0
class Reid_feature():
    def __init__(self):
        args = get_parser().parse_args()
        cfg = setup_cfg(args)
        self.demo = FeatureExtractionDemo(cfg, parallel=args.parallel)

    def __call__(self, img_list):
        t1 = time.time()
        feat = self.demo.run_on_batch(img_list)
        # print('reid time:', time.time() - t1, len(img_list))
        return feat
예제 #3
0
파일: demo.py 프로젝트: zhaoqun05/fast-reid
                        default='demo_output',
                        help='path to save features')
    parser.add_argument(
        "--opts",
        help="Modify config options using the command-line 'KEY VALUE' pairs",
        default=[],
        nargs=argparse.REMAINDER,
    )
    return parser


if __name__ == '__main__':
    args = get_parser().parse_args()
    cfg = setup_cfg(args)
    demo = FeatureExtractionDemo(cfg,
                                 device=args.device,
                                 parallel=args.parallel)

    PathManager.mkdirs(args.output)
    if args.input:
        if PathManager.isdir(args.input[0]):
            args.input = glob.glob(os.path.expanduser(args.input[0]))
            assert args.input, "The input path(s) was not found"
        for path in tqdm.tqdm(args.input):
            img = cv2.imread(path)
            feat = demo.run_on_image(img)
            feat = feat.numpy()
            np.save(
                os.path.join(args.output,
                             path.replace('.jpg', '.npy').split('/')[-1]),
                feat)
    )
    parser.add_argument(
        "--opts",
        help="Modify config options using the command-line 'KEY VALUE' pairs",
        default=[],
        nargs=argparse.REMAINDER,
    )
    return parser


if __name__ == '__main__':
    args = get_parser().parse_args()
    cfg = setup_cfg(args)
    test_loader, num_query = build_reid_test_loader(
        cfg, dataset_name=args.dataset_name)
    demo = FeatureExtractionDemo(cfg, parallel=args.parallel)

    logger.info("Start extracting image features")
    feats = []
    pids = []
    camids = []
    for (feat, pid, camid) in tqdm.tqdm(demo.run_on_loader(test_loader),
                                        total=len(test_loader)):
        feats.append(feat)
        pids.extend(pid)
        camids.extend(camid)

    feats = torch.cat(feats, dim=0)
    q_feat = feats[:num_query]
    g_feat = feats[num_query:]
    q_pids = np.asarray(pids[:num_query])
예제 #5
0
 def __init__(self):
     args = get_parser().parse_args()
     cfg = setup_cfg(args)
     self.demo = FeatureExtractionDemo(cfg, parallel=args.parallel)
예제 #6
0
def reid_feature():
    args = get_parser().parse_args()
    cfg = setup_cfg(args)
    demo = FeatureExtractionDemo(cfg, parallel=args.parallel)
    return demo
예제 #7
0
    def __init__(self):
        args = get_parser().parse_args()
        cfg = setup_cfg(args)
        self.demo = FeatureExtractionDemo(cfg, parallel=args.parallel)

    def __call__(self, img_list):
        t1 = time.time()
        feat = self.demo.run_on_batch(img_list)
        # print('reid time:', time.time() - t1, len(img_list))
        return feat


if __name__ == '__main__':
    args = get_parser().parse_args()
    cfg = setup_cfg(args)
    demo = FeatureExtractionDemo(cfg, parallel=args.parallel)

    PathManager.mkdirs(args.output)
    # onnx_output = np.load('../tools/deploy/onnx_output/0065_c6s1_009501_02.npy')
    # onnx_output = np.vstack((onnx_output, onnx_output))
    # print(onnx_output.shape)

    if args.input:
        while True:
            img_list = []
        # if PathManager.isdir(args.input[0]):
        #     args.input = glob.glob(os.path.expanduser(args.input[0]))
        #     assert args.input, "The input path(s) was not found"
        # for path in tqdm.tqdm(os.listdir(args.input)):
            t1 = time.time()
            img = cv2.imread(args.input)
예제 #8
0
    )
    parser.add_argument(
        "--opts",
        help="Modify config options using the command-line 'KEY VALUE' pairs",
        default=[],
        nargs=argparse.REMAINDER,
    )
    return parser


if __name__ == '__main__':
    args = get_parser().parse_args()
    logger = setup_logger()
    cfg = setup_cfg(args)
    test_loader, num_query = build_reid_test_loader(cfg, args.dataset_name)
    demo = FeatureExtractionDemo(cfg, device=args.device, parallel=args.parallel)

    logger.info("Start extracting image features")
    feats = []
    pids = []
    camids = []
    for (feat, pid, camid) in tqdm.tqdm(demo.run_on_loader(test_loader), total=len(test_loader.loader)):
        feats.append(feat)
        pids.extend(pid)
        camids.extend(camid)

    feats = torch.cat(feats, dim=0)
    q_feat = feats[:num_query]
    g_feat = feats[num_query:]
    q_pids = np.asarray(pids[:num_query])
    g_pids = np.asarray(pids[num_query:])
예제 #9
0
                        default='/ssd/zphe/data/MOT20_R50-ibn_results',
                        help='path to save features')
    parser.add_argument(
        "--opts",
        help="Modify config options using the command-line 'KEY VALUE' pairs",
        default=[],
    )
    parser.add_argument("--cores", type=int, default=8)
    return parser


# if __name__ == '__main__':
args = get_parser().parse_args()
cfg = setup_cfg(args)
videos = glob.glob(osp.join(args.input_video_dir, '*.mp4'))
demo = FeatureExtractionDemo(cfg, parallel=args.parallel)
# def process_one_video(params):
for video in tqdm(videos):
    # device = torch.device("cuda:%d" % gpu_id) if gpu_id>=0 else torch.device("cpu")
    # demo.predictor.model.to(device)
    videoname = osp.basename(video)
    print('processing ' + videoname)
    jsonfile = osp.join(args.input_det_dir, args.det_type,
                        videoname + '.final.reduced.json')
    if not osp.exists(jsonfile):
        print(jsonfile + ' not exists!!!')
        continue
        # return
    det_results = load_json(jsonfile)
    vidcap = cv2.VideoCapture(video)
    save_results = {}