示例#1
0
 def __init__(self, model_path, epoch_num='0000', image_size=(112, 112),
              no_face_raise=True):
     self.model_path = ','.join([model_path, epoch_num])
     self.no_face_raise = no_face_raise
     args = argparse.Namespace()
     args.model = self.model_path
     args.det = 0
     args.flip = 0
     args.threshold = 1.24
     args.ga_model = ''
     args.image_size = ",".join([str(i) for i in image_size])
     self.model = face_model.FaceModel(args)
def get_identity_descriptor(images, default_bbox):
    """
    Compute an identity vector (by the ArcFace face recognition system) for each image in `images`.

    Args:
        images:
            iterable of `numpy.ndarray`, dtype == uint8, shape == (256, 256, 3)
            Images to compute identity descriptors for.
        default_bbox:
            `tuple` of `int`, length == 4
            See `get_default_bbox()`.

    Returns:
        descriptors:
            `numpy.ndarray`, dtype == float32, shape == (`len(images)`, `FACE_DESCRIPTOR_DIM`)
        num_bad_images:
            int
            For how many images face detection failed.
    """
    global arcface_model

    # Load the model if it hasn't been loaded yet
    if arcface_model is None:
        from insightface import face_model

        arcface_model = face_model.FaceModel(
            image_size='112,112',
            model=
            "/Vol0/user/e.burkov/Projects/insightface/models/model-r100-ii/model,0000",
            ga_model="",
            det=0,
            flip=1,
            threshold=1.24,
            gpu=0)

    num_bad_images = 0
    images_cropped = []

    for image in images:
        image_cropped = arcface_model.get_input(image)
        if image_cropped is None:  # no faces found
            num_bad_images += 1
            t, l, b, r = default_bbox
            image_cropped = cv2.resize(image[t:256 - b, l:256 - r], (112, 112),
                                       interpolation=cv2.INTER_CUBIC)
            image_cropped = image_cropped.transpose(2, 0, 1)

        images_cropped.append(image_cropped)

    return arcface_model.get_feature(np.stack(images_cropped)), num_bad_images
示例#3
0
parser.add_argument('--gpu', default=0, type=int, help='gpu id')
parser.add_argument(
    '--det',
    default=0,
    type=int,
    help='mtcnn option, 1 means using R+O, 0 means detect from begining')
parser.add_argument('--flip',
                    default=0,
                    type=int,
                    help='whether do lr flip aug')
parser.add_argument('--threshold',
                    default=1.24,
                    type=float,
                    help='ver dist threshold')
args = parser.parse_args()
model = face_model.FaceModel(args)


def test():
    img = cv2.imread(
        '/home/lyk/machine_learning/competition/iqiyi/take_face/test_data_jpg_face/IQIYI_VID_DATA_Part1/IQIYI_VID_TRAIN/IQIYI_VID_TRAIN_0000001.mp4/IQIYI_VID_TRAIN_0000001.mp4_003.jpg/IQIYI_VID_TRAIN_0000001.mp4_003.jpg_0.jpg'
    )
    img = model.get_input(img)
    f1 = model.get_feature(img)
    #print(f1[0:10])

    img = cv2.imread(
        '/home/lyk/machine_learning/competition/iqiyi/take_face/test_data_jpg_face/IQIYI_VID_DATA_Part1/IQIYI_VID_TRAIN/IQIYI_VID_TRAIN_0000001.mp4/IQIYI_VID_TRAIN_0000001.mp4_002.jpg/IQIYI_VID_TRAIN_0000001.mp4_002.jpg_0.jpg'
    )
    img = model.get_input(img)
    f2 = model.get_feature(img)