def run():
    parser = argparse.ArgumentParser(description='Choose the model')
    parser.add_argument('-model', default="HOG")
    args = parser.parse_args()

    cuda = torch.device('cuda')
    poser = MorphRotateCombinePoser256Param6(
        morph_module_spec=FaceMorpherSpec(),
        morph_module_file_name="data/face_morpher.pt",
        rotate_module_spec=TwoAlgoFaceRotatorSpec(),
        rotate_module_file_name="data/two_algo_face_rotator.pt",
        combine_module_spec=CombinerSpec(),
        combine_module_file_name="data/combiner.pt",
        device=cuda)

    if args.model == "HOG":
        face_detector = dlib.get_frontal_face_detector()
    else:
        cnn_face_detection_model = face_recognition_models.cnn_face_detector_model_location(
        )
        face_detector = dlib.cnn_face_detection_model_v1(
            cnn_face_detection_model)

    landmark_locator = dlib.shape_predictor(
        "data/shape_predictor_68_face_landmarks.dat")

    video_capture = cv2.VideoCapture(0)

    master = Tk()
    PuppeteerApp(master, poser, face_detector, landmark_locator, video_capture,
                 cuda, args.model)
    master.mainloop()
Esempio n. 2
0
    def get_age(self, img):

        cnn_face_detection_model = face_recognition_models.cnn_face_detector_model_location(
        )
        detector = dlib.cnn_face_detection_model_v1(cnn_face_detection_model)

        input_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

        detected = [r.rect for r in detector(input_img, 1)]

        return zip([
            _trim_css_to_bounds(_rect_to_css(face), input_img.shape)
            for face in detected
        ], self.get_age_from_faces(img, detected))
    def __init__(self, gpu):
        cuda.set_device(gpu)
        face_detector = dlib.get_frontal_face_detector()

        predictor_68_point_model = face_recognition_models.pose_predictor_model_location(
        )
        pose_predictor_68_point = dlib.shape_predictor(
            predictor_68_point_model)

        cnn_face_detection_model = face_recognition_models.cnn_face_detector_model_location(
        )
        self.cnn_face_detector = dlib.cnn_face_detection_model_v1(
            cnn_face_detection_model)

        face_recognition_model = face_recognition_models.face_recognition_model_location(
        )
        face_encoder = dlib.face_recognition_model_v1(face_recognition_model)
Esempio n. 4
0
        "pip install git+https://github.com/ageitgey/face_recognition_models")
    quit()

ImageFile.LOAD_TRUNCATED_IMAGES = True

face_detector = dlib.get_frontal_face_detector()

predictor_68_point_model = face_recognition_models.pose_predictor_model_location(
)
pose_predictor_68_point = dlib.shape_predictor(predictor_68_point_model)

predictor_5_point_model = face_recognition_models.pose_predictor_five_point_model_location(
)
pose_predictor_5_point = dlib.shape_predictor(predictor_5_point_model)

cnn_face_detection_model = face_recognition_models.cnn_face_detector_model_location(
)
cnn_face_detector = dlib.cnn_face_detection_model_v1(cnn_face_detection_model)

face_recognition_model = face_recognition_models.face_recognition_model_location(
)
face_encoder = dlib.face_recognition_model_v1(face_recognition_model)


def _rect_to_css(rect):
    """
    Convert a dlib 'rect' object to a plain tuple in (top, right, bottom, left) order

    :param rect: a dlib 'rect' object
    :return: a plain tuple representation of the rect in (top, right, bottom, left) order
    """
    return rect.top(), rect.right(), rect.bottom(), rect.left()
Esempio n. 5
0
 def set_model_path(self):
     """ Model path handled by face_recognition_models """
     model_path = face_recognition_models.cnn_face_detector_model_location()
     logger.debug("Loading model: '%s'", model_path)
     return model_path
Esempio n. 6
0
class FaceFrameAnnotator(FrameAnnotator):
    name = 'face'
    cfd = dlib.cnn_face_detection_model_v1(
        frm.cnn_face_detector_model_location())
    hfd = dlib.get_frontal_face_detector()

    def __init__(self):
        #prototxt = "/home/taylor/face-test/resnet50_256.prototxt"
        #caffemodel = "/home/taylor/face-test/resnet50_256.caffemodel"
        prototxt = "/home/taylor/face-test/caffe_model/resnet50_scratch_caffe/resnet50_scratch.prototxt"
        caffemodel = "/home/taylor/face-test/caffe_model/resnet50_scratch_caffe/resnet50_scratch.caffemodel"

        caffe.set_device(0)
        caffe.set_mode_gpu()
        self.net = caffe.Net(prototxt, 1, weights=caffemodel)
        self.net.blobs['data'].reshape(1, 3, 224, 224)

        self.transformer = caffe.io.Transformer(
            {'data': self.net.blobs['data'].data.shape})
        self.transformer.set_transpose('data', (2, 0, 1))
        self.transformer.set_mean('data',
                                  np.array([91.4953, 103.8827, 131.0912]))
        self.transformer.set_raw_scale('data', 255)

    def process_next(self, img, foutput):

        img2 = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        faces, hog, conf, hscore = self.detect_faces(img2)
        embed = self.face_features(img, faces)
        #embed = fr.face_encodings(img, faces, num_jitters=10)
        #lndmk = fr.face_landmarks(img, faces)

        #print([round(x, 4) for x in em.tolist()])
        #print(type([round(x, 4) for x in em.tolist()]))

        output = []
        for face, co, em in zip(faces, conf, embed):
            overlap, score = cnn_to_hog_conf(face, hog, hscore)
            output.append({
                'box': {
                    'top': face[0],
                    'bottom': face[2],
                    'left': face[3],
                    'right': face[1]
                },
                'cnn_score': co,
                'hog_overlap': overlap,
                'hog_score': score,
                'embed': [round(x, 4) for x in em.tolist()]
            })

        return (output)

    def face_features(self, img, faces):
        X = []
        for face in faces:
            c = [int((face[3] + face[1]) / 2), int((face[0] + face[2]) / 2)]
            h = int((face[2] - face[0]) / 2 * 1.3)
            w = int((face[1] - face[3]) / 2 * 1.3)
            d = [c[1] - h, c[1] + h, c[0] - w, c[0] + w]
            d[0] = max(0, d[0])
            d[2] = max(0, d[2])
            d[1] = min(img.shape[0], d[1])
            d[3] = min(img.shape[1], d[3])
            crop_img = img[d[0]:d[1], d[2]:d[3], :]
            img_scaled = skimage.transform.resize(crop_img, (224, 224),
                                                  mode='constant')
            self.net.blobs['data'].data[...] = self.transformer.preprocess(
                'data', img_scaled)
            out = self.net.forward()
            #X.append(self.net.blobs['feat_extract'].data[0].flatten())
            X.append(self.net.blobs['pool5/7x7_s1'].data[0].flatten())

        return X

    def detect_faces(self, img):

        dets = self.cfd(img, 1)
        rect_cnn = [_trim_bounds(f.rect, img.shape) for f in dets]
        conf = [f.confidence for f in dets]

        hog_triple = self.hfd.run(img, 1)
        dets = hog_triple[0]
        hscore = hog_triple[1]
        rect_hog = [_trim_bounds(f, img.shape) for f in dets]

        return rect_cnn, rect_hog, conf, hscore
Esempio n. 7
0
    import face_recognition_models
except:
    print("Please install `face_recognition_models` with this command before using `face_recognition`:")
    print()
    print("pip install git+https://github.com/ageitgey/face_recognition_models")
    quit()

face_detector = dlib.get_frontal_face_detector()

predictor_68_point_model = face_recognition_models.pose_predictor_model_location()
pose_predictor_68_point = dlib.shape_predictor(predictor_68_point_model)

predictor_5_point_model = face_recognition_models.pose_predictor_five_point_model_location()
pose_predictor_5_point = dlib.shape_predictor(predictor_5_point_model)

cnn_face_detection_model = face_recognition_models.cnn_face_detector_model_location()
cnn_face_detector = dlib.cnn_face_detection_model_v1(cnn_face_detection_model)

face_recognition_model = face_recognition_models.face_recognition_model_location()
face_encoder = dlib.face_recognition_model_v1(face_recognition_model)


def _rect_to_css(rect):
    """
    Convert a dlib 'rect' object to a plain tuple in (top, right, bottom, left) order

    :param rect: a dlib 'rect' object
    :return: a plain tuple representation of the rect in (top, right, bottom, left) order
    """
    return rect.top(), rect.right(), rect.bottom(), rect.left()
Esempio n. 8
0
 def set_model_path(self):
     """ Model path handled by face_recognition_models """
     return face_recognition_models.cnn_face_detector_model_location()
import scipy.misc
import dlib
import numpy as np
import face_recognition_models as frm

face_detector = dlib.get_frontal_face_detector()

predictor_68_point_model = frm.pose_predictor_model_location()
pose_predictor_68_point = dlib.shape_predictor(predictor_68_point_model)

predictor_5_point_model = frm.pose_predictor_five_point_model_location()
pose_predictor_5_point = dlib.shape_predictor(predictor_5_point_model)

cnn_face_detection_model = frm.cnn_face_detector_model_location()
cnn_face_detector = dlib.cnn_face_detection_model_v1(cnn_face_detection_model)

face_recognition_model = frm.face_recognition_model_location()
face_encoder = dlib.face_recognition_model_v1(face_recognition_model)


def rect_to_css(rect):
    return rect.top(), rect.right(), rect.bottom(), rect.left()


def css_to_rect(css):
    return dlib.rectangle(css[3], css[0], css[1], css[2])


def trim_css_to_bounds(css, image_shape):
    return max(css[0],
               0), min(css[1],
Esempio n. 10
0
import dlib
import numpy as np
import face_recognition_models as models
# import models

# ====================================================================================================================
# models에 있는 모델 변수 적용
face_detector = dlib.get_frontal_face_detector()

point_68_predictor = models.pose_predictor_model_location()
point_68_pose = dlib.shape_predictor(point_68_predictor)

point_5_predictor = models.pose_predictor_five_point_model_location()
point_5_pose = dlib.shape_predictor(point_5_predictor)

face_detection_model = models.cnn_face_detector_model_location()
face_detector_tool = dlib.cnn_face_detection_model_v1(face_detection_model)

face_recognition_model = models.face_recognition_model_location()
face_encoder = dlib.face_recognition_model_v1(face_recognition_model)


# ====================================================================================================================
# dlib 'rect' 객체를 top, right, bottom, left 순서로 변환
# param rect : dlib 'rect' 오브젝트
# return : tuple (top, right, bottom, left)
def _rect_to_css(rect):
    return rect.top(), rect.right(), rect.bottom(), rect.left()


# tuple (top, right, bottom, left) -> dlib 'rect' 객체 변환
Esempio n. 11
0
def get_age(output_dir, img_dir, faces, resume=None):
    cfg.freeze()

    if output_dir is not None:
        if img_dir is None:
            raise ValueError(
                "=> --img_dir argument is required if --output_dir is used")

        output_dir = Path(output_dir)
        output_dir.mkdir(parents=True, exist_ok=True)

    # create model
    print("=> creating model '{}'".format(cfg.MODEL.ARCH))
    model = get_model(model_name=cfg.MODEL.ARCH, pretrained=None)
    device = "cuda" if torch.cuda.is_available() else "cpu"
    model = model.to(device)

    # load checkpoint
    resume_path = resume

    if resume_path is None:
        resume_path = Path(__file__).resolve().parent.joinpath(
            "misc", "epoch044_0.02343_3.9984.pth")

        if not resume_path.is_file():
            print(
                f"=> model path is not set; start downloading trained model to {resume_path}"
            )
            url = "https://github.com/yu4u/age-estimation-pytorch/releases/download/v1.0/epoch044_0.02343_3.9984.pth"
            urllib.request.urlretrieve(url, str(resume_path))
            print("=> download finished")

    if Path(resume_path).is_file():
        print("=> loading checkpoint '{}'".format(resume_path))
        checkpoint = torch.load(resume_path, map_location="cpu")
        model.load_state_dict(checkpoint['state_dict'])
        print("=> loaded checkpoint '{}'".format(resume_path))
    else:
        raise ValueError("=> no checkpoint found at '{}'".format(resume_path))

    if device == "cuda":
        cudnn.benchmark = True

    model.eval()
    margin = args.margin
    img_dir = img_dir

    cnn_face_detection_model = face_recognition_models.cnn_face_detector_model_location(
    )
    detector = dlib.cnn_face_detection_model_v1(cnn_face_detection_model)

    img_size = cfg.MODEL.IMG_SIZE
    image_generator = yield_images_from_dir(img_dir)

    with torch.no_grad():
        for img, name in image_generator:
            input_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            img_h, img_w, _ = np.shape(input_img)

            # detect faces using dlib detector
            detected = [r.rect for r in detector(input_img, 1)]

            faces = np.empty((len(detected), img_size, img_size, 3))

            if len(detected) > 0:
                for i, d in enumerate(detected):
                    x1, y1, x2, y2, w, h = d.left(), d.top(
                    ), d.right() + 1, d.bottom() + 1, d.width(), d.height()
                    xw1 = max(int(x1 - margin * w), 0)
                    yw1 = max(int(y1 - margin * h), 0)
                    xw2 = min(int(x2 + margin * w), img_w - 1)
                    yw2 = min(int(y2 + margin * h), img_h - 1)
                    cv2.rectangle(img, (x1, y1), (x2, y2), (255, 255, 255), 2)
                    cv2.rectangle(img, (xw1, yw1), (xw2, yw2), (255, 0, 0), 2)
                    faces[i] = cv2.resize(img[yw1:yw2 + 1, xw1:xw2 + 1],
                                          (img_size, img_size))

                # predict ages
                inputs = torch.from_numpy(
                    np.transpose(faces.astype(np.float32),
                                 (0, 3, 1, 2))).to(device)
                outputs = F.softmax(model(inputs), dim=-1).cpu().numpy()
                ages = np.arange(0, 101)
                predicted_ages = (outputs * ages).sum(axis=-1)

                return zip(faces, predicted_ages)