示例#1
0
def get_embeddings(filenames):
    faces = [extract_faces(f) for f in filenames]
    samples = asarray(faces, 'float32')
    samples = preprocess_input(samples, version=2)
    model = VGGFace(model = 'resnet50', include_top = False, input_shape = (224, 224, 3), pooling = 'avg')
    yhat = model.predict(samples)
    return yhat
示例#2
0
def tensorflow_example():
    """This example uses TensorFlow instead of CoreML, and was found to give consistent numbers to CoreML"""
    model = VGGFace(model="senet50", pooling="avg", include_top=False)
    img = image.load_img('../image/ajb.jpg', target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = utils.preprocess_input(x, version=2)
    embeddings = model.predict(x)[0]
    print("TensorFlow embeddings: ", embeddings)
示例#3
0
class face_analyzer:
    def __init__(self):
        self.face_not_found = 0
        self.model = VGGFace(model='resnet50',
                             include_top=False,
                             input_shape=(224, 224, 3),
                             pooling='avg')

    def get_face(self, img):
        face_cascade = cv2.CascadeClassifier(
            "cascades\\data\\haarcascade_frontalface_default.xml")
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        coords = face_cascade.detectMultiScale(gray, 1.1, 4)
        x, y, w, h = coords[0]
        img = img[y:y + h, x:x + w]
        img = cv2.resize(img, (224, 224))
        return img

    def find_best_match(self):
        self.scores = {}
        for person in os.listdir("userdata"):
            self.get_cos_dif("userdata" + "\\" + person + "\\" +
                             "userface.jpg")
        mini = 1
        mini_link = ""
        for person in self.scores:
            prs = person.split('\\')[-2]
            print(f"{prs}  -> {self.scores[person]}")
            if self.scores[person] < mini:
                mini_link = person
                mini = self.scores[person]

        if mini < 0.4:
            #print(f"User {mini_link} detected")
            return "\\".join(mini_link.split("\\")[:-1]) + "\\" + "text.txt"
        return None

    def get_cos_dif(self, cnt_person):
        self.face_not_found = 0
        self.ppl = []
        self.ppl.append(cv2.imread(cnt_person))
        self.ppl.append(cv2.imread("auth_pers.jpg"))

        faces = []
        for img in self.ppl:
            faces.append(self.get_face(img))

        faces = asarray(faces, 'float32')

        faces = preprocess_input(faces, version=2)

        preds = self.model.predict(faces)
        score = cosine(preds[0], preds[1])
        #print( f"{cnt_person} --> {score}")
        self.scores[cnt_person] = score
示例#4
0
 def testTHPrediction(self):
     keras.backend.set_image_dim_ordering('th')
     model = VGGFace()
     img = image.load_img('image/ak.jpg', target_size=(224, 224))
     x = image.img_to_array(img)
     x = np.expand_dims(x, axis=0)
     x = utils.preprocess_input(x)
     preds = model.predict(x)
     print('Predicted:', utils.decode_predictions(preds))
     self.assertIn(utils.decode_predictions(preds)[0][0][0], 'Aamir_Khan')
     self.assertAlmostEqual(
         utils.decode_predictions(preds)[0][0][1], 0.94938219)
示例#5
0
 def testRESNET50(self):
     keras.backend.set_image_dim_ordering('tf')
     model = VGGFace(model='resnet50')
     img = image.load_img('image/ajb.jpg', target_size=(224, 224))
     x = image.img_to_array(img)
     x = np.expand_dims(x, axis=0)
     x = utils.preprocess_input(x, version=2)
     preds = model.predict(x)
     #print ('\n',"RESNET50")
     #print('\n',preds)
     #print('\n','Predicted:', utils.decode_predictions(preds))
     self.assertIn('A._J._Buckley', utils.decode_predictions(preds)[0][0][0])
     self.assertAlmostEqual(utils.decode_predictions(preds)[0][0][1], 0.91819614)
示例#6
0
 def testRESNET50(self):
     keras.backend.set_image_dim_ordering('tf')
     model = VGGFace(model='resnet50')
     img = image.load_img('image/ajb.jpg', target_size=(224, 224))
     x = image.img_to_array(img)
     x = np.expand_dims(x, axis=0)
     x = utils.preprocess_input(x, version=2)
     preds = model.predict(x)
     #print ('\n',"RESNET50")
     #print('\n',preds)
     #print('\n','Predicted:', utils.decode_predictions(preds))
     self.assertIn('A._J._Buckley', utils.decode_predictions(preds)[0][0][0])
     self.assertAlmostEqual(utils.decode_predictions(preds)[0][0][1], 0.91819614,places=3)
示例#7
0
 def testSENET50(self):
     image_data_format()
     model = VGGFace(model='senet50')
     img = image.load_img('image/ajb.jpg', target_size=(224, 224))
     x = image.img_to_array(img)
     x = np.expand_dims(x, axis=0)
     x = utils.preprocess_input(x, version=2)
     preds = model.predict(x)
     # print ('\n', "SENET50")
     # print('\n',preds)
     # print('\n','Predicted:', utils.decode_predictions(preds))
     self.assertIn('A._J._Buckley', utils.decode_predictions(preds)[0][0][0])
     self.assertAlmostEqual(utils.decode_predictions(preds)[0][0][1], 0.9993529, places=3)
示例#8
0
class BlackboxModel(metaclass=Singleton):
    """
    Singleton class representing blackbox model
    """
    def __init__(self, architecture):
        self.model = VGGFace(model=architecture)

    def predict(self, batch):
        preprocessed_batch = utils.preprocess_input(batch, version=2)
        preds = self.model.predict(preprocessed_batch)
        return preds

    def __call__(self, batch):
        return self.model(batch)
 def testVGG16(self):
     keras.backend.image_data_format()
     model = VGGFace(model='vgg16')
     img = image.load_img('image/ajb.jpg', target_size=(224, 224))
     x = image.img_to_array(img)
     x = np.expand_dims(x, axis=0)
     x = utils.preprocess_input(x, version=1)
     preds = model.predict(x)
     # print ('\n', "VGG16")
     # print('\n',preds)
     # print('\n','Predicted:', utils.decode_predictions(preds))
     self.assertIn('A.J._Buckley', utils.decode_predictions(preds)[0][0][0])
     self.assertAlmostEqual(utils.decode_predictions(preds)[0][0][1],
                            0.9790116,
                            places=3)
示例#10
0
def get_predictions_from_png_image_example():
    """Example usage to get predictions (human identity) from image"""
    from tensorflow.keras.preprocessing import image
    import numpy as np
    import keras_vggface.utils as libutils

    image_preprocessor = create_preprocessing_model()
    model = VGGFace(model='senet50')
    img = image.load_img('image/ajb-resized.jpg',
                         target_size=(224, 224),
                         interpolation="bilinear")
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    preprocessed = image_preprocessor.predict(x)
    predictions = model.predict(preprocessed)
    print('Predicted:', libutils.decode_predictions(predictions))
示例#11
0
def tensorflow_custom_preprocessing_example():
    """Example usage to get face embeddings from cropped image of human face"""
    import numpy as np
    from tensorflow.keras.preprocessing import image

    image_preprocessor = create_preprocessing_model()
    embeddings_model = VGGFace(model="senet50",
                               pooling="avg",
                               include_top=False,
                               input_shape=(224, 224, 3))

    img = image.load_img('../image/ajb.jpg', target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    preprocessed = image_preprocessor.predict(x)
    embeddings = embeddings_model.predict(preprocessed)
    print("TensorFlow embeddings: ", embeddings)
示例#12
0
 def testVGG16(self):
     keras.backend.set_image_dim_ordering('tf')
     model = VGGFace(include_top=False,
                     input_shape=(224, 224, 3),
                     pooling='avg')  # pooling: None, avg or max
     #model = VGGFace(model='vgg16')
     img = image.load_img('image/ajb.jpg', target_size=(224, 224))
     x = image.img_to_array(img)
     x = np.expand_dims(x, axis=0)
     x = utils.preprocess_input(x, version=2)
     preds = model.predict(x)
     print('\n', "VGG16")
     print('\n', preds)
     print('\n', 'Predicted:', utils.decode_predictions(preds))
     self.assertIn('A.J._Buckley', utils.decode_predictions(preds)[0][0][0])
     self.assertAlmostEqual(utils.decode_predictions(preds)[0][0][1],
                            0.9790116,
                            places=3)
def double_faces_extraction(double_face_video_path, detections,
                            temp_dir_double_extraction):
    list_det_final = []
    frame_counter = 0
    face_model = VGGFace(model='resnet50',
                         include_top=False,
                         input_shape=(224, 224, 3),
                         pooling='avg')
    cv2video = cv2.VideoCapture(double_face_video_path)
    video_double = os.path.basename(double_face_video_path)
    video_name = video_double[:-4]
    video_frames_extraction_folder = os.path.join(temp_dir_double_extraction,
                                                  video_name)

    # #print("Processing video")
    analysis_step = True
    right_face = {
        'best_img': None,
        'features': [],
        'avg_feature': None,
        'rois': []
    }
    left_face = {
        'best_img': None,
        'features': [],
        'avg_feature': None,
        'rois': []
    }

    current_annotation = 'right'
    video_frames_extraction_folder_annotation_left = video_frames_extraction_folder + "_left"

    Path(video_frames_extraction_folder_annotation_left).mkdir(parents=True,
                                                               exist_ok=True)

    video_frames_extraction_folder_annotation_right = video_frames_extraction_folder + "_right"
    Path(video_frames_extraction_folder_annotation_right).mkdir(parents=True,
                                                                exist_ok=True)

    counter_for_analysis = 0
    counter_maximum_frame_analysis = 20

    # #print("Processing: ", video_name)
    pbar = tqdm(total=len(detections))
    total = len(detections)
    previous_f = {}
    previous_f['roi'] = None
    detections_final = {}
    roi_right = None
    roi_left = None
    while (frame_counter < total):
        # Capture frame-by-frame
        ret, frame = cv2video.read()
        if ret is True:

            if analysis_step and frame_counter % 30 == 0:
                faces = detections[frame_counter]

                if len(faces) == 2:
                    extraction_condition = check_left_right_from_center(
                        faces, frame.shape[1])
                    if extraction_condition:
                        left_face_from_frame, right_face_from_frame = extract_left_right_faces(
                            faces)

                        if current_annotation == 'right':
                            f = right_face_from_frame
                        else:
                            f = left_face_from_frame

                        resized_left_face = cv2.resize(
                            left_face_from_frame['img'], (224, 224))
                        y_left = face_model.predict(
                            np.expand_dims(resized_left_face, axis=0))
                        left_face['features'].append(y_left)

                        resized_right_face = cv2.resize(
                            right_face_from_frame['img'], (224, 224))
                        y_right = face_model.predict(
                            np.expand_dims(resized_right_face, axis=0))
                        right_face['features'].append(y_right)

            elif not analysis_step:
                faces = detections[frame_counter]

                if len(faces) > 1:

                    if current_annotation == 'right':
                        f, pred = findFaceOnSide(face_model, faces,
                                                 (left_face, right_face), True,
                                                 frame.shape[1],
                                                 previous_f['roi'])
                    else:
                        f, pred = findFaceOnSide(face_model, faces,
                                                 (left_face, right_face),
                                                 False, frame.shape[1],
                                                 previous_f['roi'])

                elif len(faces) == 1:
                    # print("only one face")
                    checked_similarity, pred = check_face_similarity(
                        face_model, faces[0], (left_face, right_face))
                    if (checked_similarity == 0 and current_annotation
                            == 'left') or (checked_similarity == 1
                                           and current_annotation == 'right'):
                        f = faces[0]

                    # just a not verified condition
                    ##in this case is a face B
                    else:
                        f = None

                # detection fails --> return map_if_error error
                else:
                    # print("detection problem: ", frame_counter)
                    f = None
                if (f is not None) and (f['img'].size != 0):

                    detections_final[frame_counter] = [f]
                    previous_f = f

                    f['roi'] = add_margin(f['roi'], 0.9 * f['roi'][2])
                    f['roi'] = enclosing_square(f['roi'])
                    img = cut(frame, f['roi'])
                    cv2.imwrite(
                        video_frames_extraction_folder + "_" +
                        current_annotation +
                        "/frame-{:06}.png".format(frame_counter), img)
                else:
                    if previous_f['roi'] is None:

                        roi_prov = (100, 100, 100, 100)
                        img_recover = cut(frame, roi_prov)
                        detections_final[frame_counter] = [{'roi': roi_prov}]

                    else:
                        img_recover = cut(frame, previous_f['roi'])
                        detections_final[frame_counter] = [previous_f]

                    cv2.imwrite(
                        video_frames_extraction_folder + "_" +
                        current_annotation +
                        "/frame-{:06}.png".format(frame_counter), img_recover)
        # else:
        #     if not analysis_step:
        #         print("ret False")

        # in this case just append a None object (interpolation to do after this stage), cv2 video fails

        frame_counter += 1

        if frame_counter == total and analysis_step or counter_for_analysis > counter_maximum_frame_analysis:
            frame_counter = 0
            cv2video.set(cv2.CAP_PROP_POS_FRAMES, 0)
            analysis_step = False
            left_face['avg_feature'] = np.mean(left_face['features'], axis=0)
            right_face['avg_feature'] = np.mean(right_face['features'], axis=0)
            counter_for_analysis = 0
            pbar = tqdm(total=total)

        if frame_counter == total and not analysis_step and current_annotation == 'right':
            cv2video.set(cv2.CAP_PROP_POS_FRAMES, 0)
            frame_counter = 0
            current_annotation = 'left'
            pbar = tqdm(total=total)
            list_det_final.append(detections_final)
            detections_final = {}
            previous_f = {}
            previous_f['roi'] = None

        pbar.update(1)
    pbar.close()

    # When everything done, release the video capture object
    cv2video.release()
    # Closes all the frames
    cv2.destroyAllWindows()
    list_det_final.append(detections_final)

    return list_det_final
示例#14
0
    args = parser.parse_args()
    dir_path = args.images_dir
    output_filename = 'pubfig_feats_vgg_512_test.npy'

    # LOAD MODEL
    vgg_features = VGGFace(include_top=False,
                           input_shape=(224, 224, 3),
                           pooling='avg')  # pooling: None, avg or max

    # LIST FILES
    files = sorted(glob.glob(dir_path + '*.jpg'))

    feats_list = []
    # CALCULATE FEATURES
    for i, image_path in enumerate(files):
        print("[INFO]: Extracting {0} ...".format(image_path))
        image_name = os.path.basename(image_path)[:-4]
        im = image.load_img(image_path)
        im = cv2.resize(np.uint8(im), (224, 224))
        im_np = np.expand_dims(im, axis=0)
        im_np = im_np.astype('float32')
        im_preproc = utils.preprocess_input(im_np, version=1)

        feats = vgg_features.predict(im_preproc)
        feats_list.append(feats[0])

        if len(feats_list) % 1000 == 0:
            np.save(output_filename, feats_list)

    np.save(output_filename, feats_list)