Esempio n. 1
0
def detect(opt, logger, cfg):
    demo = FeatureExtractionDemo(cfg, parallel=opt.parallel)
    query_loader = []
    print(len(os.listdir(opt.input_picture_path)))
    for query_image in os.listdir(opt.input_picture_path):
        query_image = cv2.imread(
            os.path.join(opt.input_picture_path, query_image))
        query_image = cv2.cvtColor(query_image,
                                   cv2.COLOR_BGR2RGB)  # PIL: (233, 602)
        query_image = cv2.resize(query_image, (128, 256))
        query_image = np.transpose(query_image, (2, 0, 1))
        query_feats = demo.run_on_image(
            torch.from_numpy(query_image).unsqueeze(0))
        print(query_feats.shape)
        query_loader.append(query_feats)
    extract_features = torch.cat(query_loader, dim=0).data.cpu().numpy()
    # extract_features = torch.nn.functional.normalize(query_feats, dim=1, p=2).data.cpu().numpy()
    print('features:', extract_features.shape)
    with Timer('All Steps'):
        global args
        # args = parser.parse_args()
        label_path = None
        pred_label_path = None
        print('=> Use cuda ?: {}'.format(opt.is_cuda))
        # with Timer('Extract Feature'):
        #     extract_features = extract_fature(args)
        if eval(opt.is_evaluate):
            opt.label_path = 'data/tmp/test.meta'
        if not eval(opt.is_cuda):
            opt.knn_method = 'faiss-cpu'
        with Timer('Face Cluster'):
            cluster_main(opt, extract_features)
        print(
            "=> Face cluster done! The cluster results have been saved in {}".
            format(opt.output_picture_path))
Esempio n. 2
0
class Reid_feature():
    def __init__(self):
        args = get_parser().parse_args()
        cfg = setup_cfg(args)
        self.demo = FeatureExtractionDemo(cfg, parallel=args.parallel)

    def __call__(self, img_list):
        import time
        t1 = time.time()
        feat = self.demo.run_on_image(img_list)
        # print('reid time:', time.time() - t1, len(img_list))
        return feat
Esempio n. 3
0
                        help='path to save features')
    parser.add_argument(
        "--opts",
        help="Modify config options using the command-line 'KEY VALUE' pairs",
        default=[],
        nargs=argparse.REMAINDER,
    )
    return parser


if __name__ == '__main__':
    args = get_parser().parse_args()
    cfg = setup_cfg(args)
    demo = FeatureExtractionDemo(cfg,
                                 device=args.device,
                                 parallel=args.parallel)

    PathManager.mkdirs(args.output)
    if args.input:
        if PathManager.isdir(args.input[0]):
            args.input = glob.glob(os.path.expanduser(args.input[0]))
            assert args.input, "The input path(s) was not found"
        for path in tqdm.tqdm(args.input):
            img = cv2.imread(path)
            feat = demo.run_on_image(img)
            feat = feat.numpy()
            np.save(
                os.path.join(args.output,
                             path.replace('.jpg', '.npy').split('/')[-1]),
                feat)
Esempio n. 4
0
    frame_id = 1
    ret = True
    if vidcap.isOpened():
        while ret:
            ret, frame = vidcap.read()
            if frame_id in frames:
                bboxes = det_results[str(frame_id)]
                save_results[str(frame_id)] = []
                for bbox in bboxes:
                    box = bbox[0]
                    pid = bbox[1]
                    box_i = [int(x) for x in box]
                    x, y, w, h = box_i
                    img = frame[y:y + h, x:x + w:, ]
                    # img = img.to(device)
                    feat = demo.run_on_image(img)[0].numpy().tolist()
                    save_results[str(frame_id)].append([box, pid, feat])
            frame_id += 1
            if frame_id > max_frame:
                break
    #save_json(osp.join(args.output, args.det_type, videoname + '.final.reduced.json'), save_results)

# cores = args.cores
# if cores <= 0:
#     cores = cpu_count()
# num_of_process = int(min(cores, len(videos)))
# mp_pool = Pool(cores)
# multi_inputs = []
# for i in range(num_of_process):
#     param = (videos[i], i)
#     multi_inputs.append(param)