Example #1
0
def main(args):
    backbone_type = 'resnet50_irse_mx'
    model_type = model_dict[args.model_name]['weights']
    model_pth = pj(model_dict[args.model_name]['path'], model_type)
    model_pth = os.path.abspath(model_pth)

    if torch.cuda.is_available():
        use_device = 'cuda'
    else:
        use_device = 'cpu'

    infer = Inference(backbone_type=backbone_type,
                      ckpt_fpath=model_pth,
                      device=use_device)

    datapath = args.data

    db_feature = {}

    with torch.no_grad():
        img_cv = cv2.imread('/Users/marschen/Ucloud/Data/debug.png')
        # img_face = face_format(img_cv, 112)
        # # cv2.imwrite('test.jpg',img_face)
        # img_face = face_normalize(img_face)
        # img_face = np.transpose(img_face,[2,0,1])
        # print(img_face.shape)
        # img_face = torch.FloatTensor(img_face)
        # img_face = img_face.unsqueeze(dim=0)
        # probe_feat = infer.execute3(img_face).reshape(1,-1)#[B,F]

        probe_feat = infer.execute(img_cv).reshape(1, -1)
        np.save(pj('/Users/marschen/Ucloud/Data', 'debug.npy'), probe_feat)
Example #2
0
def main():
    backbone_type = 'resnet50_irse_mx'
    model_type = 'model_5932.pth'
    model_pth = pj(
        './model_output/insight_face_res50irsemx_cosface_emore_dist/',
        model_type)
    batch_size = 64

    if torch.cuda.is_available():
        use_device = 'cuda'
    else:
        use_device = 'cpu'

    infer = Inference(backbone_type=backbone_type,
                      ckpt_fpath=model_pth,
                      device=use_device)

    # datapath = '/data/yefei/data/ucloud_elavator_face/'
    datapath = '/data/yefei/data/0107_0114_result/'

    # dataset_test = BasicLoaderV2(imgs_dir=datapath,extstr='.jpg')
    dataset_test = BasicLoader(imgs_dir=datapath, extstr='.jpg')
    dataloader_test = DataLoader(dataset=dataset_test,
                                 num_workers=4,
                                 batch_size=batch_size,
                                 shuffle=False)

    db_feature = {}

    with torch.no_grad():
        for cnt, batch in enumerate(dataloader_test):
            # person_name = '34_技术服务部-韩晨红'
            bimgs = batch['image']  #B,C,H,W
            bfdname = batch['name']

            if cnt % 100 == 0:
                print('Executing %1.3f...' % (cnt / len(dataloader_test)))
            else:
                pass

            cur_b_size = bimgs.shape[0]

            probe_feat = infer.execute3(bimgs).reshape(cur_b_size, -1)  #[B,F]

            for idx, imgname in enumerate(bfdname):
                db_feature[imgname] = probe_feat[idx].reshape(-1, 1)

            #Below is for BasicLoaderV2
            # print(probe_feat.shape)
            # for idx,person_name in enumerate(bfdname):
            #
            #     if person_name in db_feature.keys():
            #         db_feature[person_name]['probe'].append(probe_feat[idx,:].reshape(-1,1))
            #     else:
            #         db_feature[person_name] = {'probe':[probe_feat[idx,:].reshape(-1,1)]}

    save_obj(db_feature, pj(datapath, backbone_type + '_batch_' + model_type))
    print('Done')
def main():
    backbone_type = 'resnet50_irse_mx'
    ckpt_fpath = '/data/output/insight_face_res50irsemx_cosface_emore_dist/best_model4tupu/1591242318_tupu_0.981.pth'
    infer = Inference(backbone_type=backbone_type, ckpt_fpath=ckpt_fpath)
    model = infer.model
    test_obj = CompareFace()
    r_img_fpath = ''
    t_img_fpath = ''
    test_obj(model, r_img_fpath, t_img_fpath)
Example #4
0
    def _load_models(self):
        ####################### load_model #######################
        backbone_type = 'resnet50_irse_mx'
        ckpt_fpath = '/data/output/insight_face_res50irsemx_cosface_emore_dist'#/model_1176.pth'
        res50_model = Inference(backbone_type=backbone_type, ckpt_fpath=ckpt_fpath).model

        backbone_type = 'resnet50_irse_mx'
        ckpt_fpath = '/data/output/insight_face_res50irsemx_cosface_glintasia_dist'  # /model_1176.pth'
        res50ga_model = Inference(backbone_type=backbone_type, ckpt_fpath=ckpt_fpath).model

        # backbone_type = 'resnet101_irse_mx'
        # ckpt_fpath = '/data/output/insight_face_res101irsemx_cosface_emore_dist'
        # res101_model = Inference(backbone_type=backbone_type, ckpt_fpath=ckpt_fpath).model

        mx101_model = MXInference()
        return dict(
            res50=res50_model,
            res50ga=res50ga_model,
            # res101=res101_model,
            mx101=mx101_model)
def main():


    backbone_type = 'resnet101_irse_mx'
    ckpt_fpath = '/data/output/insight_face_res101irsemx_cosface_emore_dist'
    infer = Inference(backbone_type=backbone_type, ckpt_fpath=ckpt_fpath)
    model = infer.model

    register_dir = '/data/FaceRecog/tupu_data/ucloud_staff_faces_update0602'
    save_dir = '/data/FaceRecog/results/image_cache_0602_mtcnn_crop'
    test_obj = InferenceVisual(model=model,
                               register_dir=register_dir,
                               save_dir=save_dir)
    test_img_dir = '/data/FaceRecog/tupu_data/image_cache_0602_mtcnn_crop'
    test_obj(test_img_dir)
def main():
    backbone_type = 'resnet50_irse_mx'
    ckpt_fpath = '/data/output/insight_face_res50irsemx_cosface_emore_dist/best_model4tupu/1591242318_tupu_0.981.pth'
    infer = Inference(backbone_type=backbone_type, ckpt_fpath=ckpt_fpath)
    model = infer.model
    test_obj = CompareFace()
    test_img_dir_list = [
        # '/data/FaceRecog/tupu_data/ucloud_collections_update0601_label/592_产品架构部-丁永涛',
        # '/data/FaceRecog/tupu_data/ucloud_collections_update0601_label/766_综合采购部-张崇',
        # '/data/FaceRecog/tupu_data/ucloud_collections_update0601_label/808_企业文化部-邱雯云',
        '/data/FaceRecog/tupu_data/ucloud_collections_update0601_label/422_技术服务二部-王云峰',
        '/data/FaceRecog/tupu_data/ucloud_collections_update0601_label/606_技术服务一部-王立鹏',
    ]
    for test_img_dir in test_img_dir_list:
        test_obj(model, test_img_dir)
Example #7
0
def main():

    backbone_type = 'resnet50_irse_mx'
    # ckpt_fpath = '/data/output/insight_face_res50irsemx_cosface_emore_dist'
    # ckpt_fpath = '/data/output/insight_face_res50irsemx_cosface_glintasia_dist'
    ckpt_fpath = '/data/output/insight_face_res50irsemx_cosface_glintasia_full/model_1755.pth'
    # ckpt_fpath = '/data/output/insight_face_res50irsemx_cosface_glintasia_full/model_1804.pth'

    data_dir = '/data/FaceRecog/tupu_data/valid_labeled_faces'
    # data_dir = '/data/FaceRecog/tupu_data/valid_ucloud_eval_faces_update20200528'
    infer = Inference(backbone_type=backbone_type, ckpt_fpath=ckpt_fpath)
    model = infer.model
    eval = EvalTupu(data_dir=data_dir)
    with torch.no_grad():
        eval(model)
def main():

    backbone_type = 'resnet50_irse_mx'
    ckpt_fpath = '/data/output/insight_face_res50irsemx_cosface_emore_dist'
    # ckpt_fpath = '/data/output/insight_face_res50irsemx_cosface_glintasia_dist'
    # ckpt_fpath = '/data/output/insight_face_res50irsemx_cosface_glintasia_full/model_1755.pth'
    # ckpt_fpath = '/data/output/insight_face_res50irsemx_cosface_glintasia_full/model_1804.pth'

    infer = Inference(backbone_type=backbone_type, ckpt_fpath=ckpt_fpath)
    model = infer.model
    test_obj = EvalTriplet(
        data_dir='/data/FaceRecog/tupu_data/zhongsheng_labeled_update0603',
        visual_save_dir=
        '/data/FaceRecog/results/zhongsheng_labeled_test_results')
    # test_obj(model)
    test_obj.calc_balanced_acc(model)
Example #9
0
def main():

    backbone_type = 'resnet50_irse_mx'
    # ckpt_fpath = '/data/output/insight_face_res50irsemx_cosface_emore_dist'
    # ckpt_fpath = '/data/output/insight_face_res50irsemx_cosface_glintasia_dist'
    # ckpt_fpath = '/data/output/insight_face_res50irsemx_cosface_glintasia_full/model_1755.pth'
    # ckpt_fpath = '/data/output/insight_face_res50irsemx_cosface_glintasia_full/model_1804.pth'
    ckpt_fpath = '/Users/marschen/Ucloud/Project/FaceRecog/model_output/insight_face_res50irsemx_cosface_emore_dist/model_1240.pth'

    infer = Inference(backbone_type=backbone_type,
                      ckpt_fpath=ckpt_fpath,
                      device='cpu')
    model = infer.model
    eval = EvalHegui()
    with torch.no_grad():
        eval(model)
def main():

    backbone_type = 'resnet50_irse_mx'
    ckpt_fpath = '/data/output/insight_face_res50irsemx_cosface_emore_dist'
    # ckpt_fpath = '/data/output/insight_face_res50irsemx_cosface_glintasia_dist'
    # ckpt_fpath = '/data/output/insight_face_res50irsemx_cosface_glintasia_full/model_1755.pth'
    # ckpt_fpath = '/data/output/insight_face_res50irsemx_cosface_glintasia_full/model_1804.pth'

    infer = Inference(backbone_type=backbone_type, ckpt_fpath=ckpt_fpath)
    model = infer.model

    val_dataset = Dataset(data_dir='/data/data/glintasia',
                          shuffle=False,
                          eval=True)
    eval = EvalGlintaisa(val_dataset=val_dataset)
    with torch.no_grad():
        eval(model)
Example #11
0
def search_best_model():
    backbone_type = 'resnet50_irse_v2'
    # ckpt_dir = '/data/output/insight_face_res50irsev2_emore_dist'
    ckpt_dir = '/data/output/insight_face_res50irsev2_emore_dist/best_model4tupu'
    data_dir = '/data/FaceRecog/tupu_data/valid_labeled_faces'

    test_obj = EvalTupu(data_dir=data_dir)

    ckpt_fpath_list = [
        os.path.join(ckpt_dir, ckpt_fn) for ckpt_fn in os.listdir(ckpt_dir)
        if os.path.isfile(os.path.join(ckpt_dir, ckpt_fn))
        and ckpt_fn.endswith('.pth')
    ]
    for ckpt_fpath in tqdm(ckpt_fpath_list):
        infer = Inference(backbone_type=backbone_type, ckpt_fpath=ckpt_fpath)
        model = infer.model
        test_obj(model)
Example #12
0
def main(args):


    if torch.cuda.is_available():
        use_device = 'cuda'
        use_cpu = False
    else:
        use_device = 'cpu'
        use_cpu = True

    if not args.use_onnx:
        print('using pytorch')
        backbone_type = 'resnet101_irse_mx'
        model_type = model_dict[args.model_name]['weights']
        model_pth = pj(model_dict[args.model_name]['path'], model_type)
        model_pth = os.path.abspath(model_pth)
        infer = Inference(backbone_type=backbone_type,
                      ckpt_fpath=model_pth,
                      device=use_device)
    else:
        print('using onnx')
        model_pth = args.onnx_pth
        model_pth = os.path.abspath(model_pth)
        infer = ONNXModel(onnx_path=model_pth)

    weights_faceDet = 'weights/face_det/mobilenet0.25_Final.pth'
    weights_faceDet = os.path.abspath(weights_faceDet)
    faceDet = RetinaFaceDet('mobile0.25', weights_faceDet , use_cpu=use_cpu, backbone_location='')

    datapath = args.data
    savepath = args.save

    if os.path.isdir(savepath):
        pass
    else:
        os.mkdir(savepath)

    FLG_nface = False
    if args.nface:
        FLG_nface = True
        if not os.path.isdir(args.nface):
            os.mkdir(args.nface)


    imglst = [ c for c in os.listdir(pj(datapath)) if c.lower().endswith('.jpg') ]

    db_feature = []
    db_label = []
    scale = 1.3
    with torch.no_grad():
        for cnt,imgname in enumerate(imglst):
            #decode image name
            person_id = imgname

            if cnt%10 == 0:
                print('Executing %1.3f...'% ((cnt+1)/len(imglst)) , end='\r')

            img_cv2 = cv2.imread(pj(datapath,imgname))
            img_cv2 = cv2.resize(img_cv2,dsize=None,fx=args.moisac,fy=args.moisac)

            img_cv2_det = cv2.resize(img_cv2,fx=args.det_scale,fy=args.det_scale,dsize=None)

            dets = faceDet.execute(img_cv2_det, threshold=args.det_threshold, topk=5000,
                                   keep_topk=500,
                                   nms_threshold=0.2)

            if dets is None:
                continue
            if len(dets) <= 0:
                continue

            max_face = None
            max_face_sz = 0
            for idx, b in enumerate(dets):
                if b[4] < args.det_threshold:
                    continue
                b = list(map(int, b))
                """
                expand bbox and rescale into unscale size.
                """

                pwidth = int((b[2] - b[0]) * scale / args.det_scale)
                pheight = int((b[3] - b[1]) * scale / args.det_scale)
                pcx = int((b[2] + b[0]) / 2 / args.det_scale)
                pcy = int((b[3] + b[1]) / 2 / args.det_scale)

                if pwidth > max_face_sz:
                    img_face = cv2.getRectSubPix(img_cv2, (pwidth, pheight), (pcx, pcy))
                    img_face = face_format(img_face, 112)
                    max_face = img_face.copy()
                    max_face_sz = pwidth

            # cv2.imwrite('test.jpg',max_face)
            # break
            if max_face is not None:
                max_face = cv2.resize(max_face, dsize=None, fx=args.moisacx112, fy=args.moisacx112)
                if FLG_nface:
                    cv2.imwrite( pj(args.nface,imgname) , max_face )

                if not args.use_onnx:
                    probe_feat = infer.execute(max_face).reshape(1,-1)#[B,F]
                else:
                    img_format = _format(max_face)
                    img_data = _normalize(img_format)
                    img_data = np.transpose(img_data, axes=[2, 0, 1])
                    img_data = np.expand_dims(img_data, axis=0)
                    probe_feat = infer.forward(img_data)

                db_feature.append(probe_feat)
                db_label.append(person_id)


    # end with torch.no_grad():

    """
    save data into separate files each uuid to a .npy
    """
    print('#'*5,'merging and saving')
    db_feature = np.concatenate(db_feature,axis=0)
    db_label = np.array(db_label)
    save_obj({'X':db_feature,'y':db_label} , pj(savepath,'gallery_%s.pkl'%args.model_name))
    print('Done')