コード例 #1
0
	def __init__(self,landmark_model="large"):
		"""
			Load previously encoded faces and their features
		"""
		with open('../../data/face_recognize_features.json') as f:
			faces = json.load(f)

		face_list = []
		face_name_list = []
		for face in faces: 
			for encoding in face['encodings']:
				face_list.append(np.array(encoding))
				face_name_list.append(face['name'])

		self.face_list = np.array(face_list)
		self.face_name_list = face_name_list

		if landmark_model == "small":
			predictor_5_point_model = face_recognition_models.pose_predictor_five_point_model_location()
			self.pose_predictor = dlib.shape_predictor(predictor_5_point_model)
		else:
			predictor_68_point_model = face_recognition_models.pose_predictor_model_location()
			self.pose_predictor = dlib.shape_predictor(predictor_68_point_model)
		
		face_recognition_model = face_recognition_models.face_recognition_model_location()
		self.face_encoder = dlib.face_recognition_model_v1(face_recognition_model)
コード例 #2
0
 def __init__(self, EYE_AR_THRESH, EYE_AR_CONSEC_FRAMES):
     self.detector = dlib.get_frontal_face_detector()
     self.predictor = dlib.shape_predictor(
         face_recognition_models.pose_predictor_model_location())
     self.EYE_AR_THRESH = EYE_AR_THRESH
     self.EYE_AR_CONSEC_FRAMES = EYE_AR_CONSEC_FRAMES
     self.counter = 0
コード例 #3
0
def vec_data(path):
    mdl = fm.pose_predictor_model_location()

    frm = fm.face_recognition_model_location()

    cnn = dlib.face_recognition_model_v1(frm)

    fd = dlib.get_frontal_face_detector()

    im = cv.imread(path)

    hi = (200, 200)

    im = cv.resize(im, hi, interpolation=cv.INTER_AREA)
    df = fd(im, 1)
    a = []
    if len(df) != 0:
        fpose = dlib.shape_predictor(mdl)
        pl = fpose(im, df[0])
        vec = cnn.compute_face_descriptor(im, pl)
        for z in range(len(vec)):
            a.append(vec[z])
        return a
    else:
        return a
コード例 #4
0
def dlib_landmark(img_path, img_dir):
    detector = dlib.get_frontal_face_detector()
    predictor_68_point_model = face_recognition_models.pose_predictor_model_location()

    predictor = dlib.shape_predictor(predictor_68_point_model)
    # load the input image, resize it, and convert it to grayscale
    image = cv2.imread(img_path)
    # image = imutils.resize(image, width=500)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    # detect faces in the grayscale image
    rects = detector(gray, 1)
    # loop over the face detections
    shapes = []
    for (i, rect) in enumerate(rects):
        shape = predictor(gray, rect)
        shape = face_utils.shape_to_np(shape)
        for (x, y) in shape:
            cv2.circle(image, (x, y), 5, (0, 0, 255), -1)
            # show the output image with the face detections + facial landmarks
        plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
        plt.show()
        shape = swap_np_columns(shape)
        shapes.append(shape)
        if i == 0:
            pts_file = img_dir + img_path.split('/')[-1].split('.')[0] + '.pts'
            print(pts_file)
            mio.export_landmark_file(menpo.shape.PointCloud(shape),
                                     pts_file,
                                     overwrite=True)
            mio_img = mio.import_image(img_path)
            mio_img.view_landmarks()
            plt.show()
            
    return shapes
コード例 #5
0
    def __init__(self, desiredFaceWidth=256):
        # self.net = openface.TorchNeuralNet(args.networkModel, imgDim=args.imgDim,cuda=args.cuda)
        # self.align = openface.AlignDlib(args.dlibFacePredictor)
        self.desiredFaceWidth = desiredFaceWidth
        self.neuralNetLock = threading.Lock()
        self.predictor = dlib.shape_predictor(
            face_recognition_models.pose_predictor_model_location())
        self.align = FaceAligner(self.predictor,
                                 desiredFaceWidth=self.desiredFaceWidth).align

        logger.info("Opening classifier.pkl to load existing known faces db")
コード例 #6
0
    def __init__(self, filepath):
        self.path = filepath
        self.detector = dlib.get_frontal_face_detector()
        self.win = dlib.image_window()
        self.predictor_model = face_recognition_models.pose_predictor_model_location(
        )
        self.pose_predictor = dlib.shape_predictor(self.predictor_model)

        self.face_recognition_model = face_recognition_models.face_recognition_model_location(
        )
        self.face_encoder = dlib.face_recognition_model_v1(
            self.face_recognition_model)
コード例 #7
0
    def __init__(self, size=256, align=True, histogram=True):
        predictor = dlib.shape_predictor(
            face_recognition_models.pose_predictor_model_location())
        self._detector = dlib.get_frontal_face_detector()
        self._size = size
        self._align = align
        self._histogram = histogram

        if self._align:
            self._aligner = FaceAligner(predictor=predictor)
        else:
            self._aligner = None
コード例 #8
0
    def __init__(self, gpu):
        cuda.set_device(gpu)
        face_detector = dlib.get_frontal_face_detector()

        predictor_68_point_model = face_recognition_models.pose_predictor_model_location(
        )
        pose_predictor_68_point = dlib.shape_predictor(
            predictor_68_point_model)

        cnn_face_detection_model = face_recognition_models.cnn_face_detector_model_location(
        )
        self.cnn_face_detector = dlib.cnn_face_detection_model_v1(
            cnn_face_detection_model)

        face_recognition_model = face_recognition_models.face_recognition_model_location(
        )
        face_encoder = dlib.face_recognition_model_v1(face_recognition_model)
コード例 #9
0
 def __init__(self,
              blur_size=2,
              seamless_clone=False,
              mask_type="facehullandrect",
              erosion_kernel_size=None,
              **kwargs):
     super(Faceswap, self).__init__()
     self.build_pipeline_by_JSONFile("./faceswap_config.json")
     predictor_68_point_model = face_recognition_models.pose_predictor_model_location(
     )
     self.pose_predictor = dlib.shape_predictor(predictor_68_point_model)
     self.erosion_kernel = None
     self.blur_size = blur_size
     self.seamless_clone = seamless_clone
     self.mask_type = mask_type.lower()
     self.video_capture = None
     self.all_faces = []
コード例 #10
0
 def __init__(self, threshold, ratio):
     self.similarity_threshold = threshold
     self.ratio = ratio
     self.predictor = dlib.shape_predictor(face_recognition_models.pose_predictor_model_location())
コード例 #11
0
ファイル: human_face_model.py プロジェクト: templeblock/koh
 def __init__(self):
     predictor_68_point_model = face_recognition_models.pose_predictor_model_location()
     self.pose_predictor = dlib.shape_predictor(predictor_68_point_model)
コード例 #12
0
def EAR(eye):
    A = dist.euclidean(eye[1], eye[5])
    B = dist.euclidean(eye[2], eye[4])
    C = dist.euclidean(eye[0], eye[3])
    ear = (A + B) / (2.0 * C)

    return ear


EYE_AR_THRESH = 0.25
EYE_AR_CONSEC_FRAMES = 4

COUNTER = 0
TOTAL = 0

shape_predictor = FRM.pose_predictor_model_location()
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(shape_predictor)

(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

vs = VideoStream(src=0).start()
fileStream = False
save_flag = False
count = 0
save_path = './training_data/down/'

while True:
    if fileStream and not vs.more():
        break
コード例 #13
0
# -*- coding: utf-8 -*-
import PIL.Image
import dlib
import numpy as np
import face_recognition_models as models
# import models

# ====================================================================================================================
# models에 있는 모델 변수 적용
face_detector = dlib.get_frontal_face_detector()

point_68_predictor = models.pose_predictor_model_location()
point_68_pose = dlib.shape_predictor(point_68_predictor)

point_5_predictor = models.pose_predictor_five_point_model_location()
point_5_pose = dlib.shape_predictor(point_5_predictor)

face_detection_model = models.cnn_face_detector_model_location()
face_detector_tool = dlib.cnn_face_detection_model_v1(face_detection_model)

face_recognition_model = models.face_recognition_model_location()
face_encoder = dlib.face_recognition_model_v1(face_recognition_model)


# ====================================================================================================================
# dlib 'rect' 객체를 top, right, bottom, left 순서로 변환
# param rect : dlib 'rect' 오브젝트
# return : tuple (top, right, bottom, left)
def _rect_to_css(rect):
    return rect.top(), rect.right(), rect.bottom(), rect.left()
コード例 #14
0
def run(video_path, actor_kb, stride=3):

    # setup the face detector & encoder for use on each frame
    detector = dlib.get_frontal_face_detector()
    shape_predictor_path = face_recognition_models.pose_predictor_model_location(
    )
    sp = dlib.shape_predictor(shape_predictor_path)
    facerec_model_path = face_recognition_models.face_recognition_model_location(
    )
    recognizer = dlib.face_recognition_model_v1(facerec_model_path)

    # Add face reference encodings to our version of KB
    # TODO: untested
    detector = dlib.get_frontal_face_detector()
    sp_path = face_recognition_models.pose_predictor_model_location()
    sp = dlib.shape_predictor(sp_path)
    rec_path = face_recognition_models.face_recognition_model_location()
    rec = dlib.face_recognition_model_v1(rec_path)

    for actor_name, actor_info in actor_kb.items():

        image = cv2.imread(actor_info["image_path"])
        face_rgb = image[:, :, ::-1]

        locations = detector(face_rgb)
        # Assume there's only one face in the reference image
        detection_object = sp(face_rgb, locations[0])
        encoding = rec.compute_face_descriptor(face_rgb, detection_object,
                                               REC_JITTER)
        actor_kb[actor_name]["encoding"] = encoding

    # split record list into lists of the columns (keeping order) for input into recognizer functions
    # maintaining order between the lists for easy referencing later on
    # eg: known_encodings[1] is from the same record as known_characters[1], etc
    known_encodings = []
    known_characters = []
    counts = []
    for actor_name, record in actor_kb.items():
        known_encodings.append(record['encoding'])
        known_characters.append(record['character'])
        counts.append(0)

    # Open the input movie file
    input_movie = cv2.VideoCapture(video_path)
    length = int(input_movie.get(cv2.CAP_PROP_FRAME_COUNT))
    FPS = input_movie.get(cv2.CAP_PROP_FPS)
    width = int(input_movie.get(cv2.CAP_PROP_FRAME_WIDTH))  #1280
    height = int(input_movie.get(cv2.CAP_PROP_FRAME_HEIGHT))  #720

    # Initialize some variables
    face_locations = []
    detected_faces_encodings = []
    recognized_faces_names = []
    frame_number = 0
    for actor_name, actor_info in actor_kb.items():
        actor_kb[actor_name]["count"] = 0

    frames_analysed = 0
    print("\n\trecognizer: Finished Setup, entering loop!\n\tINPUT VIDEO: {}".
          format(video_path))
    while True:
        # READ FRAME IN
        # Grab a single frame of video
        ret, frame = input_movie.read()

        # Quit when the input video file ends
        if not ret:
            break

        frame_number += 1
        if not frame_number % stride == 0:
            continue

        frames_analysed += 1

        # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
        rgb_frame = frame[:, :, ::-1]
        frame_copy = frame.copy()
        # BEGIN FACE DETECTION & ENCODING OVER THE FRAME. (find ALL faces in the frame. NB: Encoding is like analysing a face. Recognition is encoding + comparing to some kb of encodings)
        locations = detector(rgb_frame)
        full_object_detections = []
        for location in locations:
            full_object_detections.append(
                sp(rgb_frame, location)
            )  # An array of type full_object_detection[]. This object contains fields like the landmark points, but we pass the full thing to the recognizer
        detected_faces_encodings = []
        # For each detected face, compute the face descriptor
        # Jitter can improve quality of encoding, but increases the time taken in direct proportion to the number of jitters (jitter of 100 -> 100x as long)
        if REC_JITTER == 0:
            for landmarks in full_object_detections:
                detected_faces_encodings.append(
                    recognizer.compute_face_descriptor(rgb_frame, landmarks))
        else:
            for landmarks in full_object_detections:
                detected_faces_encodings.append(
                    recognizer.compute_face_descriptor(rgb_frame, landmarks,
                                                       REC_JITTER))
        # print("Done. Got encodings for {} faces, from {} object detections".format(len(detected_faces_encodings),len(full_object_detections)))
        # print("\nStarting to compare the encodings in knowledge base to those found in the image")

        # COMPARE FACES FOUND IN IMAGE TO KNOWN FACES, THEN SAVE NAMES OF THOSE RECOGNIZED
        recognized_faces_names = []
        encodings_match = []
        for count, face_encoding in enumerate(detected_faces_encodings):

            # calculate Euclidian distance (similarity) between this face encoding and each known face encodings.
            encodings_match = [
                np.linalg.norm(
                    np.array(known_encodings[i]) - np.array(face_encoding)) <=
                COMPARISON_TOLERANCE for i in range(0, len(known_encodings))
            ]
            # finally, match any recognized encoding to the associated name.
            name = None
            for i in range(0, len(encodings_match)):
                if encodings_match[i]:
                    name = known_characters[i]
                    counts[i] += 1
                    break
            recognized_faces_names.append(name)
        print("\n***Recognized: {}".format(recognized_faces_names))
        #print("\nDone Comparing")
        #print("\tCurrent count: ")
        #for i, character_name in enumerate(known_characters):
        #    print("\t{} : {}".format(character_name , counts[i]))
        # FINISHED RECOGNIZING

        if __name__ == "__main__":
            write_sample_frames(locations, frame_copy, full_object_detections,
                                frame_number)

        #print("{}, {}".format(frame_number/FPS, recognized_faces_names))
    #print("\n\tRecognizer: Finished processing {}".format(video_path))
    input_movie.release()
    cv2.destroyAllWindows()

    # Store counts for each actor in an updated knowledge base
    characters_to_actors = {}
    for actor_name, info in actor_kb.items():
        characters_to_actors[info["character"]] = actor_name

    for i, name in enumerate(known_characters):
        actor_kb[characters_to_actors[name]]["count"] = counts[i]
        del actor_kb[characters_to_actors[name]]["encoding"]
    print(
        "\tRecognizer: getPercentages:run:\n\t\ttotal_frames == {}\n\t\tStride == {}\n\t\tReturning tuple: (actor_kb, frames_analysed) == ({},{})"
        .format(frame_number, STRIDE, actor_kb, frames_analysed))
    input("Continue?")
    return (actor_kb, frames_analysed, frame_number)
コード例 #15
0
class FaceRecognizer:
  
  ImageFile.LOAD_TRUNCATED_IMAGES = True

  
  predictor_68_point_model = face_recognition_models.pose_predictor_model_location()
  pose_predictor_68_point = dlib.shape_predictor(predictor_68_point_model)

  face_recognition_model = face_recognition_models.face_recognition_model_location()
  face_encoder = dlib.face_recognition_model_v1(face_recognition_model)
  
  detector = MTCNN()
  
  def __init__ (self):
    self.clf = None
    return
  
  def img_load(self,img_dir):
    img = imageio.imread(img_dir)
    img = np.asarray(img)
    return img
  
  def _raw_face_landmarks(self, face_image, face_locations=None, model="large"):
    pose_predictor = self.pose_predictor_68_point
    return [pose_predictor(face_image, face_location) for face_location in face_locations]


  def face_encodings(self, face_image, known_face_locations=None, num_jitters=2):
    """
    Given an image, return the 128-dimension face encoding for each face in the image.
    :param face_image: The image that contains one or more faces
    :param known_face_locations: Optional - the bounding boxes of each face if you already know them.
    :param num_jitters: How many times to re-sample the face when calculating encoding. Higher is more accurate, but slower (i.e. 100 is 100x slower)
    :return: A list of 128-dimensional face encodings (one for each face in the image)
    """
    raw_landmarks = self._raw_face_landmarks(face_image, known_face_locations)
    return [np.array(self.face_encoder.compute_face_descriptor(face_image, raw_landmark_set, num_jitters)) for raw_landmark_set in raw_landmarks]
  
  
  def predict_and_encode (self, img, confidence = 0.9,ratio = 30,visua = False):
    predict = self.detector.detect_faces(img)
    #print(predict)
    out_boxes = [ item['box'] for item in predict if item["confidence"]> confidence and item["box"][3]>img.shape[0]/ratio]
    out_boxes = dlib.rectangles([dlib.rectangle(a,b,a+c,b+d) for a,b,c,d in out_boxes ])
    #print(out_boxes)
    key = [item['keypoints'] for item in predict if item["confidence"]> confidence and item["box"][3]>img.shape[0]/ratio]
    img_encoded = self.face_encodings(img,out_boxes)
    #print(key)
    if visua:
      self.visualize(img,out_boxes,key)
    return out_boxes, key, img_encoded
  
  
  def visualize(self, img,out_boxes,key,name = []):
    img2 = img.copy()
    plt.figure(figsize=(15, 15))
    for i, c in list(enumerate(out_boxes)):
      box = out_boxes[i]
      cv2.rectangle(img2,(box.left(),box.top()),(box.right(),box.bottom()),(0,255,0),int(img.shape[1]/150))
      if len(name) != 0:
        plt.text(box.left(),box.top(),name[i],fontsize = img.shape[0]/50, color = 'red')
      keyx = [item[0] for item in key[i].values()]
      keyy = [item[1] for item in key[i].values()]
      #print(keyx,keyy)
      #plt.plot(keyx,keyy,'bo')
    plt.axis('off')
    plt.imshow(img2)
    #plt.savefig("/content/drive/My Drive/predict.png", bbox_inches='tight', pad_inches=0)
    plt.show()
  def init_KNN(self, X_train, y_train):
    clf = neighbors.KNeighborsClassifier(n_neighbors = 15, p = 2, weights = 'distance', n_jobs = -1)
    clf.fit(X_train, y_train)
    self.clf = clf
  def search_face(self, img):
    out_boxes, key, img_encoded = self.predict_and_encode (img)
    print(len(img_encoded))
    if len(img_encoded)==0:
      print("can not search")
    else:
      results = self.clf.predict(img_encoded)
      score = self.clf.predict_proba(img_encoded)
      print(results)
      #print(score)
      final_results = []
      for i in range (len(results)):
        if max(score[i])>0.8:
          final_results.append(results[i])
        else:
          final_results.append("unknown")
      print(final_results)
      self.visualize(img,out_boxes,key,final_results)
コード例 #16
0
ファイル: check.py プロジェクト: aksarkabir/neural_net_loging
import os
import face_recognition_models as fm
import dlib
import skimage as sk
import numpy as np
import cv2 as cv
import csv
import simplejson as sj
import face_recognition
import simplejson as sj
import socket
import random
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(('127.0.0.1', 5000))

mdl = fm.pose_predictor_model_location()

frm = fm.face_recognition_model_location()

cnn = dlib.face_recognition_model_v1(frm)

fd = dlib.get_frontal_face_detector()

im = cv.imread('test.jpg')

hi = (200, 200)

im = cv.resize(im, hi, interpolation=cv.INTER_AREA)
df = fd(im, 1)

if len(df) != 0:
コード例 #17
0
ファイル: dlib.py プロジェクト: haoyu592/AI-
 def set_model_path(self):
     """ Model path handled by face_recognition_models """
     model_path = face_recognition_models.pose_predictor_model_location()
     logger.debug("Loading model: '%s'", model_path)
     return model_path
コード例 #18
0
ファイル: dlib.py プロジェクト: xuduofeng/thug-memes
 def __init__(self, config, *args, **kwargs):
     self._landmarks_model = face_recognition_models.pose_predictor_model_location(
     )
     super().__init__(config, *args, **kwargs)
    def saveEncodings(self, verbose=True):

        # initialize face encoding parameters
        ImageFile.LOAD_TRUNCATED_IMAGES = True
        predictor_68_point_model = face_recognition_models.pose_predictor_model_location(
        )
        pose_predictor_68_point = dlib.shape_predictor(
            predictor_68_point_model)
        predictor_5_point_model = face_recognition_models.pose_predictor_five_point_model_location(
        )
        pose_predictor_5_point = dlib.shape_predictor(predictor_5_point_model)
        face_recognition_model = face_recognition_models.face_recognition_model_location(
        )
        face_encoder = dlib.face_recognition_model_v1(face_recognition_model)

        pose_predictor = pose_predictor_68_point

        # initialize a text file for saving images where faces are not found
        with open('waste_files.txt', 'w') as waste:
            waste.write("Images which are not suitable for training are-\n")

            try:
                # load previously saved encodings
                pickle_in = open(self.base_dir + "known_face_encodings.pickle",
                                 "rb")
                known_face_encodings = pickle.load(pickle_in)
                pickle_in.close()

                pickle_in = open(self.base_dir + "known_face_names.pickle",
                                 "rb")
                known_face_names = pickle.load(pickle_in)
                pickle_in.close()
                # filter out faces which are already trained
                temp = []
                for name in self.names:
                    if name not in known_face_names:
                        temp.append(name)

                self.names = temp

            except:
                # declare encodings as empty
                known_face_encodings = []
                known_face_names = []
                #self.names = ['1231', '1232', '1234', '1238', '1242']#'1237', '1239','1235',

            print("[INFO] Encoding... ", self.names)
            if self.names != []:
                # looping through names to be trained
                for name in tqdm(self.names):
                    # clear the lists for new user
                    face_images = []
                    faces_locations = []
                    face_names = []

                    # load images of person to be trained
                    base = self.user_images
                    name = name.strip()
                    base = os.path.join(base, name)
                    # looping through images of person to be trained
                    for img_path in glob.glob(os.path.join(base, "*.jpg")):
                        # read image
                        image_data = cv2.imread(img_path)

                        ###############
                        # check face using resnet
                        ###############
                        blob = cv2.dnn.blobFromImage(image_data, 1.0,
                                                     (300, 300),
                                                     (104.0, 177.0, 123.0))
                        self.net2.setInput(blob)
                        detections = self.net2.forward()
                        (h, w) = image_data.shape[:2]
                        confidences = []
                        boxes = []
                        for i in range(0, detections.shape[2]):
                            confidence = detections[0, 0, i, 2]
                            if confidence > 0.98:
                                box = detections[0, 0, i, 3:7] * np.array(
                                    [w, h, w, h])
                                box = box.astype("int")
                                # startX, startY, endX, endY
                                confidences.append(float(confidence))
                                boxes.append([
                                    box[0], box[1], box[2] - box[0],
                                    box[3] - box[1]
                                ])

                        if len(boxes) > 0:
                            pass
                        else:
                            waste.write(img_path + "\n")
                            #print("[INFO] Image {} not suitable for training: Resnet filtered out".format(img_path))
                            continue

                        ###############
                        # face detection using yolo
                        ###############
                        # load model parameters
                        blob = cv2.dnn.blobFromImage(image_data,
                                                     1 / 255, (416, 416),
                                                     [0, 0, 0],
                                                     1,
                                                     crop=False)
                        self.net.setInput(blob)
                        # fetch predictions from model/network
                        layers_names = self.net.getLayerNames()
                        outs = self.net.forward([
                            layers_names[i[0] - 1]
                            for i in self.net.getUnconnectedOutLayers()
                        ])
                        # fetch size of image
                        (frame_height, frame_width) = image_data.shape[:2]
                        # declare overall confidence list
                        confidences = []
                        # declare bounding boxes list
                        boxes = []
                        face_locations = []
                        # looping through model predictions/ predictions for each grid cell
                        for out in outs:
                            # looping through detectors outputs for grid cell
                            for detection in out:
                                # fetch classifier probabilities for different classes
                                scores = detection[5:]
                                # fetch maximum probabilty class
                                class_id = np.argmax(scores)
                                # define confidence as maximum probability
                                confidence = scores[class_id]
                                # filter predictions based on confidence threshold
                                if confidence > self.yolo_conf_threshold:
                                    # fetch bounding box dimensions
                                    center_x = int(detection[0] * frame_width)
                                    center_y = int(detection[1] * frame_height)
                                    width = int(detection[2] * frame_width)
                                    height = int(detection[3] * frame_height)
                                    left = int(center_x - width / 2)
                                    top = int(center_y - height / 2)
                                    # append confidence in confidences list
                                    confidences.append(float(confidence))
                                    # append bounding box in bounding boxes list
                                    boxes.append([left, top, width, height])

                        # perform non maximum suppression of overlapping images
                        indices = cv2.dnn.NMSBoxes(boxes, confidences,
                                                   self.yolo_conf_threshold,
                                                   self.nms_threshold)

                        # fetch faces bounding boxes
                        for i in indices:
                            i = i[0]
                            box = boxes[i]
                            left = box[0]
                            top = box[1]
                            width = box[2]
                            height = box[3]
                            face_locations.append(
                                np.array([
                                    top, left + width +
                                    (width * self.margin // 100), top + height,
                                    left - (width * self.margin // 100)
                                ]))

                        if len(face_locations) != 1:
                            waste.write(img_path + "\n")
                            # If there are no people (or too many people) in a training image, skip the image.
                            if verbose:
                                pass
                                #print("[INFO] Image {} not suitable for training: {}".format(img_path, "Didn't find a face" if len(face_locations) < 1 else "Found more than one face"))
                        else:
                            for face_location in face_locations:
                                if min(face_location) < 0:
                                    pass
                                    #print("[INFO] Image {} not suitable for training: Face is not in Boundary of Image".format(img_path))
                                else:
                                    ######################################################
                                    # histogram equalization
                                    frame1 = image_data[
                                        face_location[0]:face_location[2],
                                        face_location[3]:face_location[1], :]
                                    img_to_yuv = cv2.cvtColor(
                                        frame1, cv2.COLOR_BGR2YUV)
                                    img_to_yuv[:, :, 0] = cv2.equalizeHist(
                                        img_to_yuv[:, :, 0])
                                    frame1 = cv2.cvtColor(
                                        img_to_yuv, cv2.COLOR_YUV2BGR)

                                    image_data[
                                        face_location[0]:face_location[2],
                                        face_location[3]:
                                        face_location[1], :] = frame1
                                    ###################################################
                                    # Add face encoding for current image to the training set
                                    faces_locations.append(face_locations[0])
                                    face_images.append(image_data)
                                    face_names.append(name)

                    faces_locations = [
                        self._css_to_rect(face_location)
                        for face_location in faces_locations
                    ]

                    raw_landmarks = []
                    for face_image, face_location in zip(
                            face_images, faces_locations):
                        faces = dlib.full_object_detections()
                        faces.append(pose_predictor(face_image, face_location))
                        raw_landmarks.append(faces)

                    if len(faces_locations) < 120:
                        print(
                            "[INFO]: %s skipped as total number of proper images are <120."
                            % name)
                        continue

                    # generate encoding for captured faces of user
                    encodings = list(np.array(face_encoder.compute_face_descriptor(batch_img=face_images, batch_faces=raw_landmarks, num_jitters=20))\
                                     .reshape((len(face_images), 128)))

                    # append new data in old data
                    known_face_encodings += encodings
                    known_face_names += face_names

                    # save the encodings after every iteration of distinct class
                    pickle_out = open(
                        self.base_dir + "known_face_names.pickle", "wb")
                    pickle.dump(known_face_names, pickle_out)
                    pickle_out.close()
                    pickle_out = open(
                        self.base_dir + "known_face_encodings.pickle", "wb")
                    pickle.dump(known_face_encodings, pickle_out)
                    pickle_out.close()
                    print("[INFO]: %s saved!" % name)

            else:
                print("Encoding Skipped!\n")
コード例 #20
0
import scipy.misc
import dlib
import numpy as np
import face_recognition_models as frm

face_detector = dlib.get_frontal_face_detector()

predictor_68_point_model = frm.pose_predictor_model_location()
pose_predictor_68_point = dlib.shape_predictor(predictor_68_point_model)

predictor_5_point_model = frm.pose_predictor_five_point_model_location()
pose_predictor_5_point = dlib.shape_predictor(predictor_5_point_model)

cnn_face_detection_model = frm.cnn_face_detector_model_location()
cnn_face_detector = dlib.cnn_face_detection_model_v1(cnn_face_detection_model)

face_recognition_model = frm.face_recognition_model_location()
face_encoder = dlib.face_recognition_model_v1(face_recognition_model)


def rect_to_css(rect):
    return rect.top(), rect.right(), rect.bottom(), rect.left()


def css_to_rect(css):
    return dlib.rectangle(css[3], css[0], css[1], css[2])


def trim_css_to_bounds(css, image_shape):
    return max(css[0],
               0), min(css[1],
コード例 #21
0
ファイル: api.py プロジェクト: 13221325403/face_recognition
import scipy.misc
import dlib
import numpy as np

try:
    import face_recognition_models
except:
    print("Please install `face_recognition_models` with this command before using `face_recognition`:")
    print()
    print("pip install git+https://github.com/ageitgey/face_recognition_models")
    quit()

face_detector = dlib.get_frontal_face_detector()

predictor_68_point_model = face_recognition_models.pose_predictor_model_location()
pose_predictor_68_point = dlib.shape_predictor(predictor_68_point_model)

predictor_5_point_model = face_recognition_models.pose_predictor_five_point_model_location()
pose_predictor_5_point = dlib.shape_predictor(predictor_5_point_model)

cnn_face_detection_model = face_recognition_models.cnn_face_detector_model_location()
cnn_face_detector = dlib.cnn_face_detection_model_v1(cnn_face_detection_model)

face_recognition_model = face_recognition_models.face_recognition_model_location()
face_encoder = dlib.face_recognition_model_v1(face_recognition_model)


def _rect_to_css(rect):
    """
    Convert a dlib 'rect' object to a plain tuple in (top, right, bottom, left) order
コード例 #22
0
image_path = "/Users/Daniel/Desktop/pips1.jpg"
print(
    "attempting to load image from path: {}. \nCurrent working dir: {}".format(
        image_path, os.getcwd()))
image = face_recognition.load_image_file(
    image_path)  #load image as numpy array in RGB format
#image = image[::-1]
for i, row in enumerate(image):
    for j, p in enumerate(row):
        image[i][j] = p[::-1]
'''
for i, row in enumerate(image):
    image[i] = row[::-1]
'''
predictor_path = face_recognition_models.pose_predictor_model_location(
)  # 68 points
face_rec_model_path = face_recognition_models.face_recognition_model_location()

# Load all the models we need: a detector to find the faces, a shape predictor
# to find face landmarks so we can precisely localize the face, and finally the
# face recognition model.
print("\nLoading models: ")
#detector_path = face_recognition_models.cnn_face_detector_model_location()
#detector = dlib.cnn_face_detection_model_v1(detector_path)
detector = dlib.get_frontal_face_detector()
sp = dlib.shape_predictor(predictor_path)
# facerec = dlib.face_recognition_model_v1(face_rec_model_path)

print("\nGetting locations of faces: ")
locations = detector(image)
print("Getting landmarks")
コード例 #23
0
 def set_model_path(self):
     """ Model path handled by face_recognition_models """
     return face_recognition_models.pose_predictor_model_location()
コード例 #24
0
import PIL.Image
import dlib
import numpy as np
import face_recognition_models

face_detector = dlib.get_frontal_face_detector()
pose_predictor_68_point = dlib.shape_predictor(
    face_recognition_models.pose_predictor_model_location())
pose_predictor_5_point = dlib.shape_predictor(
    face_recognition_models.pose_predictor_five_point_model_location())
cnn_face_detector = dlib.cnn_face_detection_model_v1(
    face_recognition_models.cnn_face_detector_model_location())
face_encoder = dlib.face_recognition_model_v1(
    face_recognition_models.face_recognition_model_location())


def _rect_to_css(rect):
    return rect.top(), rect.right(), rect.bottom(), rect.left()


def _css_to_rect(css):
    return dlib.rectangle(css[3], css[0], css[1], css[2])


def _trim_css_to_bounds(css, image_shape):
    return max(css[0],
               0), min(css[1],
                       image_shape[1]), min(css[2],
                                            image_shape[0]), max(css[3], 0)

コード例 #25
0
ファイル: api.py プロジェクト: sashacmc/face_recognition
try:
    import face_recognition_models
except Exception:
    print(
        "Please install `face_recognition_models` with this command before using `face_recognition`:\n"
    )
    print(
        "pip install git+https://github.com/ageitgey/face_recognition_models")
    quit()

ImageFile.LOAD_TRUNCATED_IMAGES = True

face_detector = dlib.get_frontal_face_detector()

predictor_68_point_model = face_recognition_models.pose_predictor_model_location(
)
pose_predictor_68_point = dlib.shape_predictor(predictor_68_point_model)

predictor_5_point_model = face_recognition_models.pose_predictor_five_point_model_location(
)
pose_predictor_5_point = dlib.shape_predictor(predictor_5_point_model)

cnn_face_detection_model = face_recognition_models.cnn_face_detector_model_location(
)
cnn_face_detector = dlib.cnn_face_detection_model_v1(cnn_face_detection_model)

face_recognition_model = face_recognition_models.face_recognition_model_location(
)
face_encoder = dlib.face_recognition_model_v1(face_recognition_model)

コード例 #26
0
ファイル: face_desc_gen.py プロジェクト: nqtinh/DoAnTotNghiep
#  @Project FCloudConsole
#  @Homepage https://fsmartstore.com
#  @Copyright (c) 2018 Fujinet Systems JSC. All Rights Reserved.
#  @License All resources of the source code are owned by 'Fujinet Systems JSC'.
#  Intentionally infringing, stealing, exchanging, or trading in all of the
#  resources below without our consent, is a violation of intellectual property
#  rights. So, if you accidentally receive this source code, please send an email
#  to [email protected], so we can find the best solution for this problem.

import dlib
import face_recognition_models as frm
import numpy as np

from .face_det.face_det_mtcnn import FaceDetectorMTCNN

_pose_predictor = dlib.shape_predictor(frm.pose_predictor_model_location())

_face_encoder = dlib.face_recognition_model_v1(
    frm.face_recognition_model_location())


class FaceRecCoreV1:
    DfFaceRecConfid = 0.71

    @classmethod
    def gen_face_desc(cls,
                      np_image,
                      face_rect=None,
                      num_jitters=1,
                      detector=FaceDetectorMTCNN):
        """