コード例 #1
0
def perform_val(embedding_size,
                batch_size,
                model,
                carray,
                issame,
                nrof_folds=10,
                is_ccrop=False,
                is_flip=True):
    """perform val"""
    embeddings = np.zeros([len(carray), embedding_size])

    for idx in tqdm.tqdm(range(0, len(carray), batch_size)):
        batch = carray[idx:idx + batch_size]
        batch = np.transpose(batch, [0, 2, 3, 1]) * 0.5 + 0.5
        if is_ccrop:
            batch = ccrop_batch(batch)
        if is_flip:
            fliped = hflip_batch(batch)
            emb_batch = model(batch) + model(fliped)
            embeddings[idx:idx + batch_size] = l2_norm(emb_batch)
        else:
            batch = ccrop_batch(batch)
            emb_batch = model(batch)
            embeddings[idx:idx + batch_size] = l2_norm(emb_batch)

    tpr, fpr, accuracy, best_thresholds = evaluate(embeddings, issame,
                                                   nrof_folds)

    return accuracy.mean(), best_thresholds.mean()
コード例 #2
0
def get_embeds(base64_image):
    global model
    image = base64.b64decode(base64_image)
    numpy_image = np.frombuffer(image, dtype=np.uint8)
    face = cv2.imdecode(numpy_image, 1)
    face = cv2.resize(face, (112, 112))
    face = face.astype(np.float32) / 255.
    img = np.expand_dims(face, 0)
    embeds = l2_norm(model(img))
    return embeds
コード例 #3
0
ファイル: test.py プロジェクト: ryanchankh/arcface-tf2
def main(_argv):
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
    os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpu

    logger = tf.get_logger()
    logger.disabled = True
    logger.setLevel(logging.FATAL)

    cfg = load_yaml(FLAGS.cfg_path)

    model = ArcFaceModel(size=cfg['input_size'],
                         backbone_type=cfg['backbone_type'],
                         training=False)

    ckpt_path = tf.train.latest_checkpoint('./checkpoints/' + cfg['sub_name'])
    if ckpt_path is not None:
        print('[*] load ckpt from {}'.format(ckpt_path))
        model.load_weights(ckpt_path)
    else:
        print('[*] Cannot find ckpt.')
        exit()

    if FLAGS.img_path:
        print("[*] Encode {} to ./output_embeds.npy".format(FLAGS.img_path))
        img = cv2.imread(FLAGS.img_path)
        img = cv2.resize(img, (cfg['input_size'], cfg['input_size']))
        img = img.astype(np.float32) / 255.
        if len(img.shape) == 3:
            img = np.expand_dims(img, 0)
        embeds = l2_norm(model(img))
        np.save('./output_embeds.npy', embeds)
    else:
        print("[*] Loading LFW, AgeDB30 and CFP-FP...")
        lfw, agedb_30, cfp_fp, lfw_issame, agedb_30_issame, cfp_fp_issame = \
            get_val_data(cfg['test_dataset'])

        print("[*] Perform Evaluation on LFW...")
        acc_lfw, best_th = perform_val(
            cfg['embd_shape'], cfg['batch_size'], model, lfw, lfw_issame,
            is_ccrop=cfg['is_ccrop'])
        print('    acc {:.4f}, th: {:.2f}'.format(acc_lfw, best_th))

        print("[*] Perform Evaluation on AgeDB30...")
        acc_agedb30, best_th = perform_val(
            cfg['embd_shape'], cfg['batch_size'], model, agedb_30,
            agedb_30_issame, is_ccrop=cfg['is_ccrop'])
        print('    acc {:.4f}, th: {:.2f}'.format(acc_agedb30, best_th))

        print("[*] Perform Evaluation on CFP-FP...")
        acc_cfp_fp, best_th = perform_val(
            cfg['embd_shape'], cfg['batch_size'], model, cfp_fp, cfp_fp_issame,
            is_ccrop=cfg['is_ccrop'])
        print('    acc {:.4f}, th: {:.2f}'.format(acc_cfp_fp, best_th))
コード例 #4
0
def prepare_facebank(cfg, model):
    names = ['Unknown']
    embeddings = []
    detector = MTCNN()
    for name in os.listdir(cfg['face_bank']):
        if os.path.isfile(name):
            continue
        else:
            emb = []
            for file in tqdm(os.listdir(os.path.join(cfg['face_bank'], name))):
                if not os.path.isfile(
                        os.path.join(cfg['face_bank'], name, file)):
                    continue
                else:
                    image = cv2.imread(
                        os.path.join(cfg['face_bank'], name, file))
                    image = cv2.resize(image,
                                       (cfg['input_size'], cfg['input_size']))
                    face = detector.detect_faces(image)
                    if len(face) > 0:
                        face = face[0]
                        refrence = get_reference_facial_points(
                            default_square=True)

                        landmark = []
                        for _, points in face['keypoints'].items():
                            landmark.append(list(points))

                        warped_face = warp_and_crop_face(
                            image,
                            landmark,
                            reference_pts=refrence,
                            crop_size=(cfg['input_size'], cfg['input_size']))
                        image = np.array(warped_face)

                        image = image.astype(np.float32) / 255.
                        if len(image.shape) == 3:
                            image = np.expand_dims(image, 0)
                        emb.append(l2_norm(model(image)).numpy())
            if len(emb) == 0:
                continue
            emb = np.array(emb)
            mean = np.mean(emb, axis=0)
            embeddings.append(mean)
        names.append(name)
    embeddings = np.array(embeddings)
    names = np.array(names)

    np.save(os.path.join('data', 'facebank.npy'), embeddings)
    np.save(os.path.join('data', 'names.npy'), names)

    return embeddings, names
コード例 #5
0
def get_vect_face_img(align_face_img):  # 벡터화된 이미지 값을 직접 넣도록 했습니다.
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'  # 기본값 0

    logger = tf.get_logger()
    logger.disabled = True
    logger.setLevel(logging.FATAL)
    set_memory_growth()

    img = align_face_img
    #     img = cv2.imread(img_path) # opencv 로 이미지 읽어옴
    # img = cv2.resize(img, (cfg['input_size'], cfg['input_size'])) # 이미지 크기 조정
    img = img.astype(np.float32) / 255.  # 0. ~ 255. 사이 값 변환
    if len(img.shape) == 3:  # 차원 조절
        img = np.expand_dims(img, 0)
    embeds = l2_norm(model(img))  # l2 normalization?
    return embeds
コード例 #6
0
ファイル: chech.py プロジェクト: ssmgg/arcface-tf2
def main(_argv):
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
    os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu

    logger = tf.get_logger()
    logger.disabled = True
    logger.setLevel(logging.FATAL)
    set_memory_growth()

    cfg = load_yaml(FLAGS.cfg_path)

    model = ArcFaceModel(size=cfg['input_size'],
                         backbone_type=cfg['backbone_type'],
                         training=False)

    ckpt_path = tf.train.latest_checkpoint('./checkpoints/' + cfg['sub_name'])
    if ckpt_path is not None:
        print("[*] load ckpt from {}".format(ckpt_path))
        model.load_weights(ckpt_path)
    else:
        print("[*] Cannot find ckpt from {}.".format(ckpt_path))
        exit()

    if FLAGS.img_path:

        print("Check Start!!!")

        file_dir = "C:/Users/smgg/Desktop/dataset/superjunior/all3.jpeg"
        npy_dir = "/SCLab/newTWICE_id/*,npy"

        img_list = glob.glob(file_dir)
        npy_list = glob.glob(npy_dir)

        for img_name in img_list:
            img = cv2.cvtColor(cv2.imread(img_name), cv2.COLOR_BGR2RGB)
            detector = MTCNN()
            data_list = detector.detect_faces(img)

            for data in data_list:
                xmin, ymin, width, height = data['box']
                xmax = xmin + width
                ymax = ymin + height

                face_image = img[ymin:ymax, xmin: xmax, :]
                face_image = cv2.cvtColor(face_image, cv2.COLOR_RGB2BGR)

                # cv2.imshow('', face_image)
                # cv2.waitKey(0)

                img_resize = cv2.resize(face_image, (cfg['input_size'], cfg['input_size']))
                img_resize = img_resize.astype(np.float32) / 255.
                if len(img_resize.shape) == 3:
                    img_resize = np.expand_dims(img_resize, 0)
                embeds = l2_norm(model(img_resize))

                i = 0
                cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (0, 255, 0), 1)
                for npy_name in npy_list:

                    name_embeds = np.load(npy_name)
                    # print(1)

                    if distance(embeds, name_embeds,1) < 0.37:
                        i = i + 1
                        name = npy_name.split('/')[5].split('\\')[1].split('.npy')[0]

                        cv2.putText(img, name, (xmin, ymin - 15*(i)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1,cv2.LINE_AA)


                    # else:
                    #     cv2.putText(img, "Unknown", (xmin, ymin - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
                    #     cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (0, 255, 0), 1)
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            cv2.imshow('', img)
            cv2.waitKey(0)
コード例 #7
0
def main(_argv):
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
    os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu

    logger = tf.get_logger()
    logger.disabled = True
    logger.setLevel(logging.FATAL)
    set_memory_growth()

    cfg = load_yaml(FLAGS.cfg_path)

    model = ArcFaceModel(size=cfg['input_size'],
                         backbone_type=cfg['backbone_type'],
                         training=False)

    ckpt_path = tf.train.latest_checkpoint('./checkpoints/' + cfg['sub_name'])
    if ckpt_path is not None:
        print("[*] load ckpt from {}".format(ckpt_path))
        model.load_weights(ckpt_path)
    else:
        print("[*] Cannot find ckpt from {}.".format(ckpt_path))
        exit()

    if FLAGS.img_path:

        print("[*] Encode {} to ./output_embeds.npy".format(FLAGS.img_path))

        # file_dir = "C:/Users/chaehyun/PycharmProjects/ArcFace37_TF2x/data/AFDB_masked_face_dataset/*"
        file_dir = "C:/Users/smgg/Desktop/dataset/superjunior/*.jpg"
        # detector = MTCNN()
        i = 0
        img_list = glob.glob(file_dir)
        for i_list in img_list:
            img = cv2.cvtColor(cv2.imread(i_list), cv2.COLOR_BGR2RGB)
            detector = MTCNN()
            data_list = detector.detect_faces(img)

            for data in data_list:
                xmin, ymin, width, height = data['box']
                xmax = xmin + width
                ymax = ymin + height

                face_image = img[ymin:ymax, xmin:xmax, :]
                face_image = cv2.cvtColor(face_image, cv2.COLOR_RGB2BGR)
                cv2.imshow('', face_image)

                print(i_list)

                img_resize = cv2.resize(face_image,
                                        (cfg['input_size'], cfg['input_size']))
                cv2.imwrite("./{}.jpg".format(i), img_resize)
                cv2.imshow('', img_resize)
                cv2.waitKey(0)
                i = i + 1

                cv2.putText(img, "0", (xmin, ymin - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1,
                            cv2.LINE_AA)
                cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (0, 255, 0), 1)

                img_resize = img_resize.astype(np.float32) / 255.
                if len(img_resize.shape) == 3:
                    img_resize = np.expand_dims(img_resize, 0)
                embeds = l2_norm(model(img_resize))

                #print('./newTWICE_id/{}.npy'.format(i_list.split('/')[6].split('\\')[1].split('.jpg')[0]))
                np.save(
                    './newTWICE_id/{}.npy'.format(
                        i_list.split('/')[5].split('\\')[1].split('.jpg')[0]),
                    embeds)
    else:
        print("[*] Loading LFW, AgeDB30 and CFP-FP...")
        lfw, agedb_30, cfp_fp, lfw_issame, agedb_30_issame, cfp_fp_issame = \
            get_val_data(cfg['test_dataset'])

        print("[*] Perform Evaluation on LFW...")
        acc_lfw, best_th = perform_val(cfg['embd_shape'],
                                       cfg['batch_size'],
                                       model,
                                       lfw,
                                       lfw_issame,
                                       is_ccrop=cfg['is_ccrop'])
        print("    acc {:.4f}, th: {:.2f}".format(acc_lfw, best_th))

        print("[*] Perform Evaluation on AgeDB30...")
        acc_agedb30, best_th = perform_val(cfg['embd_shape'],
                                           cfg['batch_size'],
                                           model,
                                           agedb_30,
                                           agedb_30_issame,
                                           is_ccrop=cfg['is_ccrop'])
        print("    acc {:.4f}, th: {:.2f}".format(acc_agedb30, best_th))

        print("[*] Perform Evaluation on CFP-FP...")
        acc_cfp_fp, best_th = perform_val(cfg['embd_shape'],
                                          cfg['batch_size'],
                                          model,
                                          cfp_fp,
                                          cfp_fp_issame,
                                          is_ccrop=cfg['is_ccrop'])
        print("    acc {:.4f}, th: {:.2f}".format(acc_cfp_fp, best_th))
コード例 #8
0
ファイル: infer.py プロジェクト: 3P2S/arcface
def main(_argv):
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
    os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu

    logger = tf.get_logger()
    logger.disabled = True
    logger.setLevel(logging.FATAL)
    set_memory_growth()

    cfg = load_yaml(FLAGS.cfg_path)

    model = ArcFaceModel(size=cfg['input_size'],
                         backbone_type=cfg['backbone_type'],
                         training=False)

    ckpt_path = tf.train.latest_checkpoint('./checkpoints/' + cfg['sub_name'])
    if ckpt_path is not None:
        print("[*] load ckpt from {}".format(ckpt_path))
        model.load_weights(ckpt_path)
    else:
        print("[*] Cannot find ckpt from {}.".format(ckpt_path))
        exit()

    if FLAGS.update:
        print('Face bank updating...')
        targets, names = prepare_facebank(cfg, model)
        print('Face bank updated')
    else:
        targets, names = load_facebank(cfg)
        print('Face bank loaded')

    if FLAGS.video is None:
        cap = cv2.VideoCapture(0)
    else:
        cap = cv2.VideoCapture(str(FLAGS.video))

    if FLAGS.save:
        video_writer = cv2.VideoWriter('./recording.avi',
                                       cv2.VideoWriter_fourcc(*'XVID'), 10,
                                       (640, 480))
        # frame rate 6 due to my laptop is quite slow...

    while cap.isOpened():

        is_success, frame = cap.read()
        if is_success:
            img = frame
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            bboxes, landmarks, faces = align_multi(
                cfg, img, min_confidence=FLAGS.min_confidence, limits=3)
            bboxes = bboxes.astype(int)
            embs = []
            for face in faces:
                if len(face.shape) == 3:
                    face = np.expand_dims(face, 0)
                face = face.astype(np.float32) / 255.
                embs.append(l2_norm(model(face)).numpy())

            list_min_idx = []
            list_score = []
            for emb in embs:
                dist = [euclidean(emb, target) for target in targets]
                min_idx = np.argmin(dist)
                list_min_idx.append(min_idx)
                list_score.append(dist[int(min_idx)])
            list_min_idx = np.array(list_min_idx)
            list_score = np.array(list_score)
            list_min_idx[list_score > FLAGS.threshold] = -1
            for idx, box in enumerate(bboxes):
                frame = utils.draw_box_name(box, landmarks[idx],
                                            names[list_min_idx[idx] + 1],
                                            frame)
            frame = cv2.resize(frame, (640, 480))
            cv2.imshow('face Capture', frame)
        key = cv2.waitKey(1) & 0xFF
        if FLAGS.save:
            video_writer.write(frame)

        if key == ord('q'):
            break

    cap.release()
    if FLAGS.save:
        video_writer.release()
    cv2.destroyAllWindows()