示例#1
0
 def __init__(self,
              model_path,
              extractor: BaseExtractor = DLibExtractor(),
              threshold: float = 0.9,
              align=True):
     super().__init__()
     self.pipeline = Pipeline([
         DLibDetector(scale=1),
         # OneMillisecondAligner(extractor.resize.size) if align else NoAligner(),
         extractor
     ])
     self.model_path = model_path
     # for vgg face, 0.7 seems to be the best
     self.threshold = threshold
     if os.path.exists(model_path):
         self.reload()
         return
     classes = os.listdir(FilePathManager.resolve("faces"))
     self.classes = {}
     for clz in classes:
         path = FilePathManager.resolve(f"faces/{clz}")
         temp = os.listdir(path)
         images = []
         for t in temp:
             fi = f"{path}/{t}"
             images.append(cv2.imread(fi))
         self.add_person(clz, images)
示例#2
0
class EvmPredictor:
    pipeline = Pipeline([
        DLibDetector(scale=1),
        OneMillisecondAligner(224),
        VggExtractor()
    ])

    def __init__(self, evm_model_path: str):
        self.model_path = evm_model_path
        self.evm: EVM = joblib.load(self.model_path)

    def reload(self):
        self.evm = joblib.load(self.model_path)

    def save(self):
        joblib.dump(self.evm, self.model_path)

    @staticmethod
    def extract_from_images(images):
        result = []
        for image in images:
            temp = EvmPredictor.pipeline(image)[0].reshape(-1)
            if temp[0] == 0:
                continue
            result.append(temp)
        return np.asarray(result)

    def add_person(self, person_name, images):
        X = EvmPredictor.extract_from_images(images)
        y = np.full((len(images), 1), person_name)
        self.evm.fit(X, y)
        self.save()

    def remove_person(self, person_name):
        self.evm.remove(person_name)
        self.save()

    def predict_from_image(self, image):
        faces = EvmPredictor.pipeline(image)[0]
        return self.evm.predict_with_prop(faces)

    def predict_from_path(self, path):
        return self.predict_from_image(cv2.imread(path))
示例#3
0
import glob

import cv2

from aligners.base_aligner import BaseAligner
from bases.pipeline import Pipeline
from detectors.dlib_detector import DLibDetector
from file_path_manager import FilePathManager


class PassAligner(BaseAligner):
    def forward(self, inputs):
        return inputs


if __name__ == '__main__':
    FilePathManager.clear_dir("output")

    path = FilePathManager.resolve("output")
    faces = sorted(glob.glob(FilePathManager.resolve("images/*")))

    pipeline = Pipeline([DLibDetector(), PassAligner()])

    for i, face in enumerate(faces):
        face = cv2.imread(face)

        cropped_output, image = pipeline(face, True)
        for j, cropped_image in enumerate(cropped_output):
            cv2.imwrite(path + "/test{}-{}.jpeg".format(i, j), cropped_image)
        return Variable(faces), image

    def forward(self, inputs):
        faces, image = inputs
        _, features = self.light_cnn(faces)
        return features, image

    def postprocess(self, inputs):
        features, image = inputs
        return torch2cv(features), image


if __name__ == '__main__':
    FilePathManager.clear_dir("output")

    path = FilePathManager.resolve("output")
    faces = sorted(glob.glob(FilePathManager.resolve("images/*")))

    # pipeline = Pipeline([DLibDetector(), OneMillisecondAligner(), VggExtractor()])
    pipeline = Pipeline(
        [DLibDetector(),
         NoAligner(scale=3),
         LightCNNExtractor()])
    # pipeline = Pipeline([DLibDetector(), Crop(), Resize(224), VggExtractor()])

    for i, face in enumerate(faces):
        face = cv2.imread(face)

        features, _ = pipeline(face, True)
        print("{} image: #{} Features.".format(i, features.shape))
示例#5
0
            faces = faces.unsqueeze(0)

        if self.use_cuda:
            faces = faces.cuda()
        return Variable(faces), image

    def forward(self, inputs):
        faces, image = inputs
        return self.vgg_face(faces), image

    def postprocess(self, inputs):
        features, image = inputs
        return torch2cv(features), image


if __name__ == '__main__':
    FilePathManager.clear_dir("output")

    path = FilePathManager.resolve("output")
    faces = sorted(glob.glob(FilePathManager.resolve("images/*")))

    # pipeline = Pipeline([DLibDetector(), OneMillisecondAligner(), VggExtractor()])
    pipeline = Pipeline([DLibDetector(), NoAligner(scale=3), VggExtractor()])
    # pipeline = Pipeline([DLibDetector(), Crop(), Resize(224), VggExtractor()])

    for i, face in enumerate(faces):
        face = cv2.imread(face)

        features, _ = pipeline(face, True)
        print("{} image: #{} Features.".format(i, features.shape))
示例#6
0
import numpy as np
from sklearn.metrics import make_scorer, accuracy_score
from sklearn.model_selection import train_test_split, GridSearchCV

from aligners.one_millisecond_aligner import OneMillisecondAligner
from bases.pipeline import Pipeline
from classifiers.evm import EVM
from detectors.dlib_detector import DLibDetector
from extractors.dlib_extractor import DLibExtractor
# from extractors.vgg_extractor import VggExtractor
from file_path_manager import FilePathManager

root_path = FilePathManager.resolve("faces")
classes = os.listdir(root_path)
pipeline = Pipeline([
    DLibDetector(scale=1),
    OneMillisecondAligner(224),
    # VggExtractor()
    DLibExtractor()
])
X, y = [], []
for clz in classes:
    files = os.listdir(f"{root_path}/{clz}")
    for file in files:
        path = f"{root_path}/{clz}/{file}"
        image = cv2.imread(path)
        temp = pipeline(image)[0].reshape(-1)
        if temp.shape[0] == 0:
            print(f"wrong : {path}")
            continue
        print(f"correct : {path}")
示例#7
0
        return self.aligner.align(
            self.size,
            self.image,
            rect,
            landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)

    def forward(self, inputs):
        rects, self.image = inputs

        rects = Utils.points2rects(rects)
        with Pool(cpu_count()) as pool:
            result = pool.map(self.align, rects)

        return result, self.image


if __name__ == '__main__':
    FilePathManager.clear_dir("output")

    path = FilePathManager.resolve("output")
    faces = sorted(glob.glob(FilePathManager.resolve("images/*")))

    pipeline = Pipeline([DLibDetector(), OneMillisecondAligner()])

    for i, face in enumerate(faces):
        face = cv2.imread(face)

        cropped_output, image = pipeline(face)
        for j, cropped_image in enumerate(cropped_output):
            cv2.imwrite(path + "/test{}-{}.jpeg".format(i, j), cropped_image)
        super().__init__()

    def preprocess(self, inputs):
        return inputs

    def forward(self, inputs):
        faces, image = inputs
        rects = dlib.rectangles(Utils.points2rects(faces))
        csses = []
        for rect in rects:
            csses.append(_rect_to_css(rect))
        result = face_recognition.face_encodings(image, csses)
        return np.array(result), rects, image


if __name__ == '__main__':
    FilePathManager.clear_dir("output")

    path = FilePathManager.resolve("output")
    faces = sorted(glob.glob(FilePathManager.resolve("images/*")))

    # pipeline = Pipeline([DLibDetector(), OneMillisecondAligner(), VggExtractor()])
    pipeline = Pipeline([DLibDetector(), NoAligner(scale=0), DLibExtractor()])
    # pipeline = Pipeline([DLibDetector(), Crop(), Resize(224), VggExtractor()])

    for i, face in enumerate(faces):
        face = cv2.imread(face)

        features, _ = pipeline(face, True)
        print("{} image: #{} Features.".format(i, features.shape))
示例#9
0
from transforms.crop import Crop
from transforms.scale import Scale


class NoAligner(BaseAligner):
    def __init__(self, scale: float = 0.0):
        """
        :param scale:  between 0 (0%) and 1 (100%)
        """
        self.scale = scale
        self.pipeline = Pipeline([Scale(scale), Crop()])

    def forward(self, inputs):
        return self.pipeline(inputs)


if __name__ == '__main__':
    FilePathManager.clear_dir("output")

    path = FilePathManager.resolve("output")
    faces = sorted(glob.glob(FilePathManager.resolve("images/*")))

    pipeline = Pipeline([DLibDetector(), NoAligner(scale=1.5)])

    for i, face in enumerate(faces):
        face = cv2.imread(face)

        cropped_output, image = pipeline(face, True)
        for j, cropped_image in enumerate(cropped_output):
            cv2.imwrite(path + "/test{}-{}.jpeg".format(i, j), cropped_image)