Ejemplo n.º 1
0
def dataset_to_embeddings_lib(dataset_path):
    features_extractor = FaceFeaturesExtractor()
    transform = transforms.Compose(
        [preprocessing.ExifOrientationNormalize(),
         transforms.Resize(1024)])
    dataset = datasets.ImageFolder(dataset_path)
    embeddings = []
    labels = []
    size = 0
    for img_path, label in dataset.samples:
        print(img_path)
        _, embedding = features_extractor(
            transform(Image.open(img_path).convert('RGB')))
        if embedding is None:
            print("Could not find face on {}".format(img_path))
            continue
        if embedding.shape[0] > 1:
            print(
                "Multiple faces detected for {}, taking one with highest probability"
                .format(img_path))
            embedding = embedding[0, :]
        size = size + 1
        embeddings.append(embedding.flatten())
        labels.append(label)
    # If there are less than two embeddings, do not save
    if (size < 2):
        return
    # Store embeddings and labels
    dataset.class_to_idx = normalise_dict_keys(dataset.class_to_idx)
    idx_to_class = {v: k for k, v in dataset.class_to_idx.items()}
    labels = list(map(lambda idx: idx_to_class[idx], labels))
    np.savetxt(dataset_path + os.path.sep + "embeddings.txt", embeddings)
    np.savetxt(dataset_path + os.path.sep + "labels.txt",
               np.array(labels, dtype=np.str).reshape(-1, 1),
               fmt="%s")
 def __init__(self):
     firebase = pyrebase.initialize_app(firebaseConfig)
     storage = firebase.storage()
     url = storage.child('model/face_recogniser.pkl').get_url(store_token)
     self.face_recogniser = joblib.load(urlopen(url))
     # file_loc = os.path.dirname(os.path.abspath(__file__))
     # self.face_recogniser = joblib.load(os.path.join(file_loc, 'model', 'face_recogniser.pkl'))
     self.preprocess = preprocessing.ExifOrientationNormalize()
Ejemplo n.º 3
0
def main():
    """
        Face Matching
    """
    
    activity = ["CELEB MATCH", "VIDEO SEARCH"]
    choice = st.sidebar.selectbox("Choose Activity",activity)
    
    #CELEB MATCH
    if choice == "CELEB MATCH":
        face_recogniser = load_model('model/face_recogniser.pkl')
        preprocess = preprocessing.ExifOrientationNormalize()
        uploaded_file = st.file_uploader("Choose an image...", type=["jpg","png", "jpeg"])
        if uploaded_file is not None:
            image = Image.open(uploaded_file)
            image = preprocess(image)
            image = image.convert("RGB")
            bbs, _ = aligner.detect(image)
            if bbs is not None:
                faces = torch.stack([extract_face(image, bb) for bb in bbs])
                embeddings = facenet(facenet_preprocess(faces)).detach().numpy()
                predictions = face_recogniser.classifier.predict_proba(embeddings)
                for bb, probs in zip(bbs, predictions):
                    try:
                        cropped_faces = []
                        cropped_face = image.crop(bb)
                        cropped_faces.append(cropped_face)
                        prediction = top_prediction(face_recogniser.idx_to_class, probs)
                        files = glob.glob("images/" + prediction.label + "/*.*")
                        actor_image = Image.open(files[0])
                        actor_image_bbs, _ = aligner.detect(actor_image)
                        actor_image = actor_image.crop(actor_image_bbs[0]) if len(actor_image_bbs) > 0 else actor_image
                        cropped_faces.append(actor_image)
                        st.image(cropped_faces, width=100)
                        st.write(prediction.label)
                    except:
                        pass
            else:
                st.write("Can't detect face")
            st.image(image, caption='Uploaded Image.', use_column_width=True)
    elif choice == "VIDEO SEARCH":
        st.write("Video Search")
        url = st.text_input("YOUTUBE URL")
        if url:
            video = get_video(url)
            if video:
                st.video(url)
                vpr = get_video_processor(video)
                vpr.read_frames()
                st.write("Number of frames " + str(vpr.frame_count))
                st.write("Duration " + str(int(vpr.duration)) + " s")
                
                frame_idx = st.number_input("Frame index", value=0, min_value=0, max_value=vpr.frame_count-1)
                if frame_idx:
                    frame_image = Image.fromarray(vpr.frames[frame_idx])
                    st.image(frame_image, caption='Image at selected frame')
Ejemplo n.º 4
0
def main():
    args = parse_args()
    preprocess = preprocessing.ExifOrientationNormalize()
    img = Image.open(args.image_path)
    filename = img.filename
    img = preprocess(img)
    img = img.convert('RGB')

    faces, img = recognise_faces(img)
    if not faces:
        print('No faces found in this image.')

    if args.save_dir:
        basename = os.path.basename(filename)
        name = basename.split('.')[0]
        ext = basename.split('.')[1]
        img.save('{}_tagged.{}'.format(name, ext))

    img.show()
Ejemplo n.º 5
0
def main():
    args = parse_args()
    trans = transforms.Compose(
        [preprocessing.ExifOrientationNormalize(),
         transforms.Resize(1024)])

    images = datasets.ImageFolder(root=args.input_folder)
    images.idx_to_class = {v: k for k, v in images.class_to_idx.items()}
    create_dirs(args.output_folder, images.classes)

    mtcnn = MTCNN(prewhiten=False)

    for idx, (path, y) in enumerate(images.imgs):
        print("Aligning {} {}/{} ".format(path, idx + 1, len(images)), end='')
        aligned_path = args.output_folder + os.path.sep + images.idx_to_class[
            y] + os.path.sep + os.path.basename(path)
        if not os.path.exists(aligned_path):
            img = mtcnn(img=trans(Image.open(path).convert('RGB')),
                        save_path=aligned_path)
            print("No face found" if img is None else '')
        else:
            print('Already aligned')
Ejemplo n.º 6
0
def main():
    cap = cv2.VideoCapture(0)
    face_recogniser = joblib.load(MODEL_PATH)
    preprocess = preprocessing.ExifOrientationNormalize()

    while True:
        # Capture frame-by-frame
        ret, frame = cap.read()
        frame = cv2.flip(frame, 1)

        img = Image.fromarray(frame)
        faces = face_recogniser(preprocess(img))
        if faces is not None:
            draw_bb_on_img(faces, img)

        # Display the resulting frame
        cv2.imshow('video', np.array(img))
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # When everything done, release the captureq
    cap.release()
    cv2.destroyAllWindows()
Ejemplo n.º 7
0
def dataset_to_embeddings(dataset, features_extractor):
    transform = transforms.Compose(
        [preprocessing.ExifOrientationNormalize(),
         transforms.Resize(1024)])

    embeddings = []
    labels = []
    for img_path, label in dataset.samples:
        print(img_path)
        _, embedding = features_extractor(
            transform(Image.open(img_path).convert('RGB')))
        if embedding is None:
            print("Could not find face on {}".format(img_path))
            continue
        if embedding.shape[0] > 1:
            print(
                "Multiple faces detected for {}, taking one with highest probability"
                .format(img_path))
            embedding = embedding[0, :]
        embeddings.append(embedding.flatten())
        labels.append(label)

    return np.stack(embeddings), labels
Ejemplo n.º 8
0
def add_embeddings_for_img(dataset_path, img_path, img_id):
    # Create Embeddings and Labels in case they are not present using
    embeddings_path = os.path.join(dataset_path, 'embeddings.txt')
    labels_path = os.path.join(dataset_path, 'labels.txt')
    if ((not os.path.isfile(embeddings_path))
            or (not os.path.isfile(labels_path))):
        dataset_to_embeddings_lib(dataset_path)
        return

    # Load Embeddings and Labels
    embeddings = np.loadtxt(embeddings_path).tolist()
    labels = np.loadtxt(labels_path, dtype='str').tolist()

    # Generate new embedding
    features_extractor = FaceFeaturesExtractor()
    print(img_path)
    transform = transforms.Compose(
        [preprocessing.ExifOrientationNormalize(),
         transforms.Resize(1024)])
    _, embedding = features_extractor(
        transform(Image.open(img_path).convert('RGB')))
    if embedding is None:
        print("Could not find face on {}".format(img_path))
        return
    if embedding.shape[0] > 1:
        print(
            "Multiple faces detected for {}, taking one with highest probability"
            .format(img_path))
        embedding = embedding[0, :]
    embeddings.append(embedding.flatten())
    # Just append, as the labels are already a list of strings and the ID is a string
    labels.append(img_id)
    # Store embeddings and labels
    np.savetxt(dataset_path + os.path.sep + "embeddings.txt", embeddings)
    np.savetxt(dataset_path + os.path.sep + "labels.txt",
               np.array(labels, dtype=np.str).reshape(-1, 1),
               fmt="%s")
Ejemplo n.º 9
0
def main():
    args = parse_args()
    preprocess = preprocessing.ExifOrientationNormalize()
    img = Image.open(args.image_path)
    filename = img.filename
    if args.fast:
        width, height = img.size
        factor = 512 / width
        size = [round(width * factor), round(height * factor)]
        img = img.resize(size, Image.BILINEAR)
    img = preprocess(img)
    img = img.convert('RGB')

    faces, img = recognise_faces(img, args)
    if not faces:
        print('No faces found in this image.')

    if args.save_dir:
        basename = os.path.basename(filename)
        name = basename.split('.')[0]
        ext = basename.split('.')[1]
        img.save('{}_tagged.{}'.format(name, ext))

    img.show()
Ejemplo n.º 10
0
import io
import joblib
from PIL import Image
from flask import Flask
from flask_restplus import Api, Resource, fields, abort, inputs
from werkzeug.datastructures import FileStorage
from face_recognition import preprocessing

face_recogniser = joblib.load('model/face_recogniser.pkl')
preprocess = preprocessing.ExifOrientationNormalize()

IMAGE_KEY = 'image'
INCLUDE_PREDICTIONS_KEY = 'include_predictions'
app = Flask(__name__)
api = Api(app, version='0.1.0', title='Face Recognition API', doc='/docs')

parser = api.parser()
parser.add_argument(IMAGE_KEY,
                    type=FileStorage,
                    location='files',
                    required=True,
                    help='Image on which face recognition will be run.')
parser.add_argument(INCLUDE_PREDICTIONS_KEY,
                    type=inputs.boolean,
                    default=False,
                    help='Whether to include all predictions in response.')

bounding_box = api.model(
    'BoundingBox', {
        'left': fields.Float,
        'top': fields.Float,