def vec_data(path):
    mdl = fm.pose_predictor_model_location()

    frm = fm.face_recognition_model_location()

    cnn = dlib.face_recognition_model_v1(frm)

    fd = dlib.get_frontal_face_detector()

    im = cv.imread(path)

    hi = (200, 200)

    im = cv.resize(im, hi, interpolation=cv.INTER_AREA)
    df = fd(im, 1)
    a = []
    if len(df) != 0:
        fpose = dlib.shape_predictor(mdl)
        pl = fpose(im, df[0])
        vec = cnn.compute_face_descriptor(im, pl)
        for z in range(len(vec)):
            a.append(vec[z])
        return a
    else:
        return a
Beispiel #2
0
    def encode_filter(filter_files):
        images = []
        faces = []

        FACE_ALIGNMENT = FaceAlignment(LandmarksType._2D,
                                       enable_cuda=True,
                                       flip_input=False)
        for i, filter_file in enumerate(filter_files):
            images.append(skimage.io.imread(str(filter_file)))
            faces.append(FACE_ALIGNMENT.get_landmarks(images[i]))
        FACE_ALIGNMENT = None

        face_recognition_model = face_recognition_models.face_recognition_model_location(
        )
        face_encoder = dlib.face_recognition_model_v1(face_recognition_model)
        for i, face in enumerate(faces):
            if face is None:
                print('Warning: {} has no face.'.format(filter_files[i]))
                continue
            if len(face) > 1:
                print('Warning: {} has more than one face.'.format(
                    filter_files[i]))

            parts = []
            for p in face[0]:
                parts.append(dlib.point(p[0], p[1]))
            raw_landmark_set = dlib.full_object_detection(rect, parts)
            yield numpy.array(
                face_encoder.compute_face_descriptor(images[i],
                                                     raw_landmark_set, 1))
Beispiel #3
0
	def __init__(self,landmark_model="large"):
		"""
			Load previously encoded faces and their features
		"""
		with open('../../data/face_recognize_features.json') as f:
			faces = json.load(f)

		face_list = []
		face_name_list = []
		for face in faces: 
			for encoding in face['encodings']:
				face_list.append(np.array(encoding))
				face_name_list.append(face['name'])

		self.face_list = np.array(face_list)
		self.face_name_list = face_name_list

		if landmark_model == "small":
			predictor_5_point_model = face_recognition_models.pose_predictor_five_point_model_location()
			self.pose_predictor = dlib.shape_predictor(predictor_5_point_model)
		else:
			predictor_68_point_model = face_recognition_models.pose_predictor_model_location()
			self.pose_predictor = dlib.shape_predictor(predictor_68_point_model)
		
		face_recognition_model = face_recognition_models.face_recognition_model_location()
		self.face_encoder = dlib.face_recognition_model_v1(face_recognition_model)
Beispiel #4
0
    def __init__(self, filepath):
        self.path = filepath
        self.detector = dlib.get_frontal_face_detector()
        self.win = dlib.image_window()
        self.predictor_model = face_recognition_models.pose_predictor_model_location(
        )
        self.pose_predictor = dlib.shape_predictor(self.predictor_model)

        self.face_recognition_model = face_recognition_models.face_recognition_model_location(
        )
        self.face_encoder = dlib.face_recognition_model_v1(
            self.face_recognition_model)
Beispiel #5
0
def init_engine(mode='image', num_face=10, mask='all'):
    if mask == 'recognize':
        predictor_5_point_model = face_recognition_models.pose_predictor_five_point_model_location()
        pose_predictor_5_point = dlib.shape_predictor(predictor_5_point_model)
        #predictor_68_point_model = face_recognition_models.pose_predictor_model_location()
        #pose_predictor_68_point = dlib.shape_predictor(predictor_68_point_model)
        face_recognition_model = face_recognition_models.face_recognition_model_location()
        face_encoder = dlib.face_recognition_model_v1(face_recognition_model)
        return pose_predictor_5_point, face_encoder
    else:
        face_detector = dlib.get_frontal_face_detector()
        return face_detector
    def __init__(self, gpu):
        cuda.set_device(gpu)
        face_detector = dlib.get_frontal_face_detector()

        predictor_68_point_model = face_recognition_models.pose_predictor_model_location(
        )
        pose_predictor_68_point = dlib.shape_predictor(
            predictor_68_point_model)

        cnn_face_detection_model = face_recognition_models.cnn_face_detector_model_location(
        )
        self.cnn_face_detector = dlib.cnn_face_detection_model_v1(
            cnn_face_detection_model)

        face_recognition_model = face_recognition_models.face_recognition_model_location(
        )
        face_encoder = dlib.face_recognition_model_v1(face_recognition_model)
Beispiel #7
0
    def __init__(self, ignore_gender=False):
        self.file_log = open("log/file_log", "a")

        self.img_size = 64
        self.model = WideResNet(self.img_size, depth=16, k=8)()
        self.model.load_weights(
            os.path.join("pretrained_models", "weights.28-3.73.hdf5"))

        self.image = None

        self.detector = dlib.get_frontal_face_detector()
        self.face_recognition_model = face_recognition_models.face_recognition_model_location(
        )
        self.face_encoder = dlib.face_recognition_model_v1(
            self.face_recognition_model)

        self.list_face = []
        self.preload_list_face = True
        self.logs = []
        self.ignore_gender = ignore_gender
Beispiel #8
0
    def __init__(self,
                 employees,
                 resolution=(320, 240),
                 model_type="small",
                 nb_iters=1):
        # nb_iters - the higher is more percise but slower
        # model_type - "small" / "large" large is more accurate but slower
        self.camera = picamera.PiCamera()
        self.camera.resolution = resolution
        self.np_output = np.zeros((resolution[1], resolution[0], 3),
                                  dtype=np.uint8)
        self.model = frm.face_recognition_model_location()
        self.encoder = dlib.face_recognition_model_v1(self.model)
        self.face_detector = dlib.get_frontal_face_detector()
        self.model_type = model_type
        self.nb_iters = nb_iters
        self.employees = employees

        self.predictor = frm.pose_predictor_five_point_model_location()
        if self.model_type == "large":
            self.predictor = frm.face_recognition_models.pose_predictor_model_location(
            )

        self.load_faces()
import face_recognition_models
import time
import os

this_file_dir, _ = os.path.split(__file__)
landmarks_file_dir = os.path.join(this_file_dir,'shape_predictor_68_face_landmarks.dat')

# HOG face detector using the built-in dlib class
FACE_DETECTOR = dlib.get_frontal_face_detector()

# download the required pre-trained face detection model here:
# http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
PREDICTOR_MODEL = landmarks_file_dir
FACE_POSE_PREDICTOR = dlib.shape_predictor(PREDICTOR_MODEL)

FACE_RECOGNITION_MODEL = face_recognition_models.face_recognition_model_location()
FACE_ENCODER = dlib.face_recognition_model_v1(FACE_RECOGNITION_MODEL)


def _detect_faces(image, upsample_num_times):
    """
    private function for detect faces using dlib
        :param image: 
        :param upsample_num_times: 
    """
    # Run the HOG face detector on the image data.
    return FACE_DETECTOR(image, upsample_num_times)


def detect_faces(image, upsample_num_times=1):
    return _detect_faces(image, upsample_num_times)
Beispiel #10
0
video_path = "../video_clips/21_JUMP_STREET_DVS20.avi"
name_mappings = {"channing_tatum": "Jenko"}
actor_name = "channing_tatum"
actor_kb = []

print("video path: " + video_path)
print("name_mappings: \n\t{}".format(name_mappings))
print("actor_name: {} (single value for this test)".format(actor_name))

tatum_image_path = "./frame_1.png"
tatum_image = face_recognition.load_image_file(
    tatum_image_path)  #load image as numpy array in RGB format

predictor_path = face_recognition_models.models.shape_predictor_68_face_landmarks
face_rec_model_path = face_recognition_models.face_recognition_model_location()

# Load all the models we need: a detector to find the faces, a shape predictor
# to find face landmarks so we can precisely localize the face, and finally the
# face recognition model.
detector = dlib.get_frontal_face_detector()  #HOG-Based
sp = dlib.shape_predictor(predictor_path)
facerec = dlib.face_recognition_model_v1(face_rec_model_path)

locations = detector(tatum_image)
landmarks = sp.__call__(tatum_image, locations[0])

# Using location of face, and shape predictor, generate the 128-Vector encoding of the face (the face descriptor)

encoding = facerec.compute_face_descriptor(tatum_image, )
    def saveEncodings(self, verbose=True):

        # initialize face encoding parameters
        ImageFile.LOAD_TRUNCATED_IMAGES = True
        predictor_68_point_model = face_recognition_models.pose_predictor_model_location(
        )
        pose_predictor_68_point = dlib.shape_predictor(
            predictor_68_point_model)
        predictor_5_point_model = face_recognition_models.pose_predictor_five_point_model_location(
        )
        pose_predictor_5_point = dlib.shape_predictor(predictor_5_point_model)
        face_recognition_model = face_recognition_models.face_recognition_model_location(
        )
        face_encoder = dlib.face_recognition_model_v1(face_recognition_model)

        pose_predictor = pose_predictor_68_point

        # initialize a text file for saving images where faces are not found
        with open('waste_files.txt', 'w') as waste:
            waste.write("Images which are not suitable for training are-\n")

            try:
                # load previously saved encodings
                pickle_in = open(self.base_dir + "known_face_encodings.pickle",
                                 "rb")
                known_face_encodings = pickle.load(pickle_in)
                pickle_in.close()

                pickle_in = open(self.base_dir + "known_face_names.pickle",
                                 "rb")
                known_face_names = pickle.load(pickle_in)
                pickle_in.close()
                # filter out faces which are already trained
                temp = []
                for name in self.names:
                    if name not in known_face_names:
                        temp.append(name)

                self.names = temp

            except:
                # declare encodings as empty
                known_face_encodings = []
                known_face_names = []
                #self.names = ['1231', '1232', '1234', '1238', '1242']#'1237', '1239','1235',

            print("[INFO] Encoding... ", self.names)
            if self.names != []:
                # looping through names to be trained
                for name in tqdm(self.names):
                    # clear the lists for new user
                    face_images = []
                    faces_locations = []
                    face_names = []

                    # load images of person to be trained
                    base = self.user_images
                    name = name.strip()
                    base = os.path.join(base, name)
                    # looping through images of person to be trained
                    for img_path in glob.glob(os.path.join(base, "*.jpg")):
                        # read image
                        image_data = cv2.imread(img_path)

                        ###############
                        # check face using resnet
                        ###############
                        blob = cv2.dnn.blobFromImage(image_data, 1.0,
                                                     (300, 300),
                                                     (104.0, 177.0, 123.0))
                        self.net2.setInput(blob)
                        detections = self.net2.forward()
                        (h, w) = image_data.shape[:2]
                        confidences = []
                        boxes = []
                        for i in range(0, detections.shape[2]):
                            confidence = detections[0, 0, i, 2]
                            if confidence > 0.98:
                                box = detections[0, 0, i, 3:7] * np.array(
                                    [w, h, w, h])
                                box = box.astype("int")
                                # startX, startY, endX, endY
                                confidences.append(float(confidence))
                                boxes.append([
                                    box[0], box[1], box[2] - box[0],
                                    box[3] - box[1]
                                ])

                        if len(boxes) > 0:
                            pass
                        else:
                            waste.write(img_path + "\n")
                            #print("[INFO] Image {} not suitable for training: Resnet filtered out".format(img_path))
                            continue

                        ###############
                        # face detection using yolo
                        ###############
                        # load model parameters
                        blob = cv2.dnn.blobFromImage(image_data,
                                                     1 / 255, (416, 416),
                                                     [0, 0, 0],
                                                     1,
                                                     crop=False)
                        self.net.setInput(blob)
                        # fetch predictions from model/network
                        layers_names = self.net.getLayerNames()
                        outs = self.net.forward([
                            layers_names[i[0] - 1]
                            for i in self.net.getUnconnectedOutLayers()
                        ])
                        # fetch size of image
                        (frame_height, frame_width) = image_data.shape[:2]
                        # declare overall confidence list
                        confidences = []
                        # declare bounding boxes list
                        boxes = []
                        face_locations = []
                        # looping through model predictions/ predictions for each grid cell
                        for out in outs:
                            # looping through detectors outputs for grid cell
                            for detection in out:
                                # fetch classifier probabilities for different classes
                                scores = detection[5:]
                                # fetch maximum probabilty class
                                class_id = np.argmax(scores)
                                # define confidence as maximum probability
                                confidence = scores[class_id]
                                # filter predictions based on confidence threshold
                                if confidence > self.yolo_conf_threshold:
                                    # fetch bounding box dimensions
                                    center_x = int(detection[0] * frame_width)
                                    center_y = int(detection[1] * frame_height)
                                    width = int(detection[2] * frame_width)
                                    height = int(detection[3] * frame_height)
                                    left = int(center_x - width / 2)
                                    top = int(center_y - height / 2)
                                    # append confidence in confidences list
                                    confidences.append(float(confidence))
                                    # append bounding box in bounding boxes list
                                    boxes.append([left, top, width, height])

                        # perform non maximum suppression of overlapping images
                        indices = cv2.dnn.NMSBoxes(boxes, confidences,
                                                   self.yolo_conf_threshold,
                                                   self.nms_threshold)

                        # fetch faces bounding boxes
                        for i in indices:
                            i = i[0]
                            box = boxes[i]
                            left = box[0]
                            top = box[1]
                            width = box[2]
                            height = box[3]
                            face_locations.append(
                                np.array([
                                    top, left + width +
                                    (width * self.margin // 100), top + height,
                                    left - (width * self.margin // 100)
                                ]))

                        if len(face_locations) != 1:
                            waste.write(img_path + "\n")
                            # If there are no people (or too many people) in a training image, skip the image.
                            if verbose:
                                pass
                                #print("[INFO] Image {} not suitable for training: {}".format(img_path, "Didn't find a face" if len(face_locations) < 1 else "Found more than one face"))
                        else:
                            for face_location in face_locations:
                                if min(face_location) < 0:
                                    pass
                                    #print("[INFO] Image {} not suitable for training: Face is not in Boundary of Image".format(img_path))
                                else:
                                    ######################################################
                                    # histogram equalization
                                    frame1 = image_data[
                                        face_location[0]:face_location[2],
                                        face_location[3]:face_location[1], :]
                                    img_to_yuv = cv2.cvtColor(
                                        frame1, cv2.COLOR_BGR2YUV)
                                    img_to_yuv[:, :, 0] = cv2.equalizeHist(
                                        img_to_yuv[:, :, 0])
                                    frame1 = cv2.cvtColor(
                                        img_to_yuv, cv2.COLOR_YUV2BGR)

                                    image_data[
                                        face_location[0]:face_location[2],
                                        face_location[3]:
                                        face_location[1], :] = frame1
                                    ###################################################
                                    # Add face encoding for current image to the training set
                                    faces_locations.append(face_locations[0])
                                    face_images.append(image_data)
                                    face_names.append(name)

                    faces_locations = [
                        self._css_to_rect(face_location)
                        for face_location in faces_locations
                    ]

                    raw_landmarks = []
                    for face_image, face_location in zip(
                            face_images, faces_locations):
                        faces = dlib.full_object_detections()
                        faces.append(pose_predictor(face_image, face_location))
                        raw_landmarks.append(faces)

                    if len(faces_locations) < 120:
                        print(
                            "[INFO]: %s skipped as total number of proper images are <120."
                            % name)
                        continue

                    # generate encoding for captured faces of user
                    encodings = list(np.array(face_encoder.compute_face_descriptor(batch_img=face_images, batch_faces=raw_landmarks, num_jitters=20))\
                                     .reshape((len(face_images), 128)))

                    # append new data in old data
                    known_face_encodings += encodings
                    known_face_names += face_names

                    # save the encodings after every iteration of distinct class
                    pickle_out = open(
                        self.base_dir + "known_face_names.pickle", "wb")
                    pickle.dump(known_face_names, pickle_out)
                    pickle_out.close()
                    pickle_out = open(
                        self.base_dir + "known_face_encodings.pickle", "wb")
                    pickle.dump(known_face_encodings, pickle_out)
                    pickle_out.close()
                    print("[INFO]: %s saved!" % name)

            else:
                print("Encoding Skipped!\n")
Beispiel #12
0
def run(video_path, actor_kb, stride=3):

    # setup the face detector & encoder for use on each frame
    detector = dlib.get_frontal_face_detector()
    shape_predictor_path = face_recognition_models.pose_predictor_model_location(
    )
    sp = dlib.shape_predictor(shape_predictor_path)
    facerec_model_path = face_recognition_models.face_recognition_model_location(
    )
    recognizer = dlib.face_recognition_model_v1(facerec_model_path)

    # Add face reference encodings to our version of KB
    # TODO: untested
    detector = dlib.get_frontal_face_detector()
    sp_path = face_recognition_models.pose_predictor_model_location()
    sp = dlib.shape_predictor(sp_path)
    rec_path = face_recognition_models.face_recognition_model_location()
    rec = dlib.face_recognition_model_v1(rec_path)

    for actor_name, actor_info in actor_kb.items():

        image = cv2.imread(actor_info["image_path"])
        face_rgb = image[:, :, ::-1]

        locations = detector(face_rgb)
        # Assume there's only one face in the reference image
        detection_object = sp(face_rgb, locations[0])
        encoding = rec.compute_face_descriptor(face_rgb, detection_object,
                                               REC_JITTER)
        actor_kb[actor_name]["encoding"] = encoding

    # split record list into lists of the columns (keeping order) for input into recognizer functions
    # maintaining order between the lists for easy referencing later on
    # eg: known_encodings[1] is from the same record as known_characters[1], etc
    known_encodings = []
    known_characters = []
    counts = []
    for actor_name, record in actor_kb.items():
        known_encodings.append(record['encoding'])
        known_characters.append(record['character'])
        counts.append(0)

    # Open the input movie file
    input_movie = cv2.VideoCapture(video_path)
    length = int(input_movie.get(cv2.CAP_PROP_FRAME_COUNT))
    FPS = input_movie.get(cv2.CAP_PROP_FPS)
    width = int(input_movie.get(cv2.CAP_PROP_FRAME_WIDTH))  #1280
    height = int(input_movie.get(cv2.CAP_PROP_FRAME_HEIGHT))  #720

    # Initialize some variables
    face_locations = []
    detected_faces_encodings = []
    recognized_faces_names = []
    frame_number = 0
    for actor_name, actor_info in actor_kb.items():
        actor_kb[actor_name]["count"] = 0

    frames_analysed = 0
    print("\n\trecognizer: Finished Setup, entering loop!\n\tINPUT VIDEO: {}".
          format(video_path))
    while True:
        # READ FRAME IN
        # Grab a single frame of video
        ret, frame = input_movie.read()

        # Quit when the input video file ends
        if not ret:
            break

        frame_number += 1
        if not frame_number % stride == 0:
            continue

        frames_analysed += 1

        # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
        rgb_frame = frame[:, :, ::-1]
        frame_copy = frame.copy()
        # BEGIN FACE DETECTION & ENCODING OVER THE FRAME. (find ALL faces in the frame. NB: Encoding is like analysing a face. Recognition is encoding + comparing to some kb of encodings)
        locations = detector(rgb_frame)
        full_object_detections = []
        for location in locations:
            full_object_detections.append(
                sp(rgb_frame, location)
            )  # An array of type full_object_detection[]. This object contains fields like the landmark points, but we pass the full thing to the recognizer
        detected_faces_encodings = []
        # For each detected face, compute the face descriptor
        # Jitter can improve quality of encoding, but increases the time taken in direct proportion to the number of jitters (jitter of 100 -> 100x as long)
        if REC_JITTER == 0:
            for landmarks in full_object_detections:
                detected_faces_encodings.append(
                    recognizer.compute_face_descriptor(rgb_frame, landmarks))
        else:
            for landmarks in full_object_detections:
                detected_faces_encodings.append(
                    recognizer.compute_face_descriptor(rgb_frame, landmarks,
                                                       REC_JITTER))
        # print("Done. Got encodings for {} faces, from {} object detections".format(len(detected_faces_encodings),len(full_object_detections)))
        # print("\nStarting to compare the encodings in knowledge base to those found in the image")

        # COMPARE FACES FOUND IN IMAGE TO KNOWN FACES, THEN SAVE NAMES OF THOSE RECOGNIZED
        recognized_faces_names = []
        encodings_match = []
        for count, face_encoding in enumerate(detected_faces_encodings):

            # calculate Euclidian distance (similarity) between this face encoding and each known face encodings.
            encodings_match = [
                np.linalg.norm(
                    np.array(known_encodings[i]) - np.array(face_encoding)) <=
                COMPARISON_TOLERANCE for i in range(0, len(known_encodings))
            ]
            # finally, match any recognized encoding to the associated name.
            name = None
            for i in range(0, len(encodings_match)):
                if encodings_match[i]:
                    name = known_characters[i]
                    counts[i] += 1
                    break
            recognized_faces_names.append(name)
        print("\n***Recognized: {}".format(recognized_faces_names))
        #print("\nDone Comparing")
        #print("\tCurrent count: ")
        #for i, character_name in enumerate(known_characters):
        #    print("\t{} : {}".format(character_name , counts[i]))
        # FINISHED RECOGNIZING

        if __name__ == "__main__":
            write_sample_frames(locations, frame_copy, full_object_detections,
                                frame_number)

        #print("{}, {}".format(frame_number/FPS, recognized_faces_names))
    #print("\n\tRecognizer: Finished processing {}".format(video_path))
    input_movie.release()
    cv2.destroyAllWindows()

    # Store counts for each actor in an updated knowledge base
    characters_to_actors = {}
    for actor_name, info in actor_kb.items():
        characters_to_actors[info["character"]] = actor_name

    for i, name in enumerate(known_characters):
        actor_kb[characters_to_actors[name]]["count"] = counts[i]
        del actor_kb[characters_to_actors[name]]["encoding"]
    print(
        "\tRecognizer: getPercentages:run:\n\t\ttotal_frames == {}\n\t\tStride == {}\n\t\tReturning tuple: (actor_kb, frames_analysed) == ({},{})"
        .format(frame_number, STRIDE, actor_kb, frames_analysed))
    input("Continue?")
    return (actor_kb, frames_analysed, frame_number)
class FaceRecognizer:
  
  ImageFile.LOAD_TRUNCATED_IMAGES = True

  
  predictor_68_point_model = face_recognition_models.pose_predictor_model_location()
  pose_predictor_68_point = dlib.shape_predictor(predictor_68_point_model)

  face_recognition_model = face_recognition_models.face_recognition_model_location()
  face_encoder = dlib.face_recognition_model_v1(face_recognition_model)
  
  detector = MTCNN()
  
  def __init__ (self):
    self.clf = None
    return
  
  def img_load(self,img_dir):
    img = imageio.imread(img_dir)
    img = np.asarray(img)
    return img
  
  def _raw_face_landmarks(self, face_image, face_locations=None, model="large"):
    pose_predictor = self.pose_predictor_68_point
    return [pose_predictor(face_image, face_location) for face_location in face_locations]


  def face_encodings(self, face_image, known_face_locations=None, num_jitters=2):
    """
    Given an image, return the 128-dimension face encoding for each face in the image.
    :param face_image: The image that contains one or more faces
    :param known_face_locations: Optional - the bounding boxes of each face if you already know them.
    :param num_jitters: How many times to re-sample the face when calculating encoding. Higher is more accurate, but slower (i.e. 100 is 100x slower)
    :return: A list of 128-dimensional face encodings (one for each face in the image)
    """
    raw_landmarks = self._raw_face_landmarks(face_image, known_face_locations)
    return [np.array(self.face_encoder.compute_face_descriptor(face_image, raw_landmark_set, num_jitters)) for raw_landmark_set in raw_landmarks]
  
  
  def predict_and_encode (self, img, confidence = 0.9,ratio = 30,visua = False):
    predict = self.detector.detect_faces(img)
    #print(predict)
    out_boxes = [ item['box'] for item in predict if item["confidence"]> confidence and item["box"][3]>img.shape[0]/ratio]
    out_boxes = dlib.rectangles([dlib.rectangle(a,b,a+c,b+d) for a,b,c,d in out_boxes ])
    #print(out_boxes)
    key = [item['keypoints'] for item in predict if item["confidence"]> confidence and item["box"][3]>img.shape[0]/ratio]
    img_encoded = self.face_encodings(img,out_boxes)
    #print(key)
    if visua:
      self.visualize(img,out_boxes,key)
    return out_boxes, key, img_encoded
  
  
  def visualize(self, img,out_boxes,key,name = []):
    img2 = img.copy()
    plt.figure(figsize=(15, 15))
    for i, c in list(enumerate(out_boxes)):
      box = out_boxes[i]
      cv2.rectangle(img2,(box.left(),box.top()),(box.right(),box.bottom()),(0,255,0),int(img.shape[1]/150))
      if len(name) != 0:
        plt.text(box.left(),box.top(),name[i],fontsize = img.shape[0]/50, color = 'red')
      keyx = [item[0] for item in key[i].values()]
      keyy = [item[1] for item in key[i].values()]
      #print(keyx,keyy)
      #plt.plot(keyx,keyy,'bo')
    plt.axis('off')
    plt.imshow(img2)
    #plt.savefig("/content/drive/My Drive/predict.png", bbox_inches='tight', pad_inches=0)
    plt.show()
  def init_KNN(self, X_train, y_train):
    clf = neighbors.KNeighborsClassifier(n_neighbors = 15, p = 2, weights = 'distance', n_jobs = -1)
    clf.fit(X_train, y_train)
    self.clf = clf
  def search_face(self, img):
    out_boxes, key, img_encoded = self.predict_and_encode (img)
    print(len(img_encoded))
    if len(img_encoded)==0:
      print("can not search")
    else:
      results = self.clf.predict(img_encoded)
      score = self.clf.predict_proba(img_encoded)
      print(results)
      #print(score)
      final_results = []
      for i in range (len(results)):
        if max(score[i])>0.8:
          final_results.append(results[i])
        else:
          final_results.append("unknown")
      print(final_results)
      self.visualize(img,out_boxes,key,final_results)
Beispiel #14
0
def filter_face(input_dir, output_dir, processn, alignments, fps,
                filter_encodings, workerqueue):
    face_recognition_model = face_recognition_models.face_recognition_model_location(
    )
    face_encoder = dlib.face_recognition_model_v1(face_recognition_model)
    filter_threshold = 0.56

    def encode_one_face_local(image, faces):
        for points in faces:
            parts = []
            for p in points:
                parts.append(dlib.point(p[0], p[1]))
            raw_landmark_set = dlib.full_object_detection(rect, parts)
            yield numpy.array(
                face_encoder.compute_face_descriptor(image, raw_landmark_set,
                                                     1))

    container = av.open(input_dir)
    stream = container.streams.video[0]
    container.seek(int(alignments[0][0] * stream.time_base * 1000000))
    fps = 1 / fps

    idx = 0
    length = len(alignments)

    frame = next(f for f in container.decode(video=0))
    timenext = frame.pts * stream.time_base + fps

    def process_frame():
        nonlocal idx
        while (idx < length) and (alignments[idx][0] < frame.pts):
            idx += 1
        face_files = []
        faces = []
        while (idx < length) and (frame.pts == alignments[idx][0]):
            face_file = output_dir / alignments[idx][1]
            if face_file.exists():
                face_files.append(face_file)
                faces.append(
                    numpy.array(alignments[idx][3]).reshape(
                        (-1, 2)).astype(int))
            idx += 1
        if len(face_files) > 0:
            scores = []
            encodings = list(
                encode_one_face_local(frame.to_nd_array(format='rgb24'),
                                      faces))
            min = 1
            for (face_file, encoding) in zip(face_files, encodings):
                score = numpy.linalg.norm(filter_encodings - encoding, axis=1)
                scores.append(score)
                t = score.min()
                if t < min:
                    min = t
                if t > filter_threshold:
                    face_file.replace(output_dir / 'filter1' / face_file.name)
            if len(face_files) > 1:
                for i, face_file in enumerate(face_files):
                    if face_file.exists() and scores[i].min() > min:
                        face_file.replace(output_dir / 'filter2' /
                                          face_file.name)
            workerqueue.put((processn, idx, face_files, scores))

    process_frame()

    for frame in container.decode(video=0):
        timenow = frame.pts * stream.time_base
        if (timenow >= timenext):
            timenext += fps
            process_frame()
            if idx >= length:
                break

    workerqueue.put(None)
Beispiel #15
0
import dlib
import skimage as sk
import numpy as np
import cv2 as cv
import csv
import simplejson as sj
import face_recognition
import simplejson as sj
import socket
import random
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(('127.0.0.1', 5000))

mdl = fm.pose_predictor_model_location()

frm = fm.face_recognition_model_location()

cnn = dlib.face_recognition_model_v1(frm)

fd = dlib.get_frontal_face_detector()

im = cv.imread('test.jpg')

hi = (200, 200)

im = cv.resize(im, hi, interpolation=cv.INTER_AREA)
df = fd(im, 1)

if len(df) != 0:
    fpose = dlib.shape_predictor(mdl)
    pl = fpose(im, df[0])
Beispiel #16
0
    print()
    print("pip install git+https://github.com/ageitgey/face_recognition_models")
    quit()

face_detector = dlib.get_frontal_face_detector()

predictor_68_point_model = face_recognition_models.pose_predictor_model_location()
pose_predictor_68_point = dlib.shape_predictor(predictor_68_point_model)

predictor_5_point_model = face_recognition_models.pose_predictor_five_point_model_location()
pose_predictor_5_point = dlib.shape_predictor(predictor_5_point_model)

cnn_face_detection_model = face_recognition_models.cnn_face_detector_model_location()
cnn_face_detector = dlib.cnn_face_detection_model_v1(cnn_face_detection_model)

face_recognition_model = face_recognition_models.face_recognition_model_location()
face_encoder = dlib.face_recognition_model_v1(face_recognition_model)


def _rect_to_css(rect):
    """
    Convert a dlib 'rect' object to a plain tuple in (top, right, bottom, left) order

    :param rect: a dlib 'rect' object
    :return: a plain tuple representation of the rect in (top, right, bottom, left) order
    """
    return rect.top(), rect.right(), rect.bottom(), rect.left()


def _css_to_rect(css):
    """
Beispiel #17
0
face_detector = dlib.get_frontal_face_detector()

predictor_68_point_model = face_recognition_models.pose_predictor_model_location(
)
pose_predictor_68_point = dlib.shape_predictor(predictor_68_point_model)

predictor_5_point_model = face_recognition_models.pose_predictor_five_point_model_location(
)
pose_predictor_5_point = dlib.shape_predictor(predictor_5_point_model)

cnn_face_detection_model = face_recognition_models.cnn_face_detector_model_location(
)
cnn_face_detector = dlib.cnn_face_detection_model_v1(cnn_face_detection_model)

face_recognition_model = face_recognition_models.face_recognition_model_location(
)
face_encoder = dlib.face_recognition_model_v1(face_recognition_model)


def _rect_to_css(rect):
    """
    Convert a dlib 'rect' object to a plain tuple in (top, right, bottom, left) order

    :param rect: a dlib 'rect' object
    :return: a plain tuple representation of the rect in (top, right, bottom, left) order
    """
    return rect.top(), rect.right(), rect.bottom(), rect.left()


def _css_to_rect(css):
    """
Beispiel #18
0
def main(mode='test', img_path='def'):
    t = time.clock()
    classes = [
        'MXG', 'Sanaken', 'Zofinka', 'Toalk', 'Zissxzirsziiss', 'kiasummer'
    ]

    known_face_encodes = [
        np.loadtxt(MAIN_PATH + '/persons/MXG/fv.txt'),
        np.loadtxt(MAIN_PATH + '/persons/Sanaken/fv.txt'),
        np.loadtxt(MAIN_PATH + '/persons/Zofinka/fv.txt'),
        np.loadtxt(MAIN_PATH + '/persons/Toalk/fv.txt'),
        np.loadtxt(MAIN_PATH + '/persons/Zissxzirsziiss/fv.txt'),
        np.loadtxt(MAIN_PATH + '/persons/kiasummer/fv.txt')
    ]

    known_face_encodes = np.reshape(known_face_encodes, (6, 5, 128))

    # get image
    if img_path == 'def':
        image = cv2.imread('team.jpg', 1)
    else:
        image = cv2.imread(img_path, 1)

    # output
    init_align_faces = []
    out_arr = []

    # get bboxes
    fd = FaceDetector()

    conf, faceboxes, mboxes = fd.get_faceboxes(image)

    color = [255, 255, 255]

    # border widths; I set them all to 150
    top, bottom, left, right = [120] * 4

    image = cv2.copyMakeBorder(image,
                               top,
                               bottom,
                               left,
                               right,
                               cv2.BORDER_CONSTANT,
                               value=color)
    bbox_mark_image = image.copy()

    # get alignment model
    predictor_model = MAIN_PATH + "/models/shape_predictor_68_face_landmarks.dat"
    face_pose_predictor = dlib.shape_predictor(predictor_model)
    face_aligner = openface.AlignDlib(predictor_model)

    # init predection model
    predictor_5_point_model = face_recognition_models.pose_predictor_five_point_model_location(
    )
    pose_predictor_5_point = dlib.shape_predictor(predictor_5_point_model)
    face_recognition_model = face_recognition_models.face_recognition_model_location(
    )
    face_encoder = dlib.face_recognition_model_v1(face_recognition_model)

    for i in range(len(faceboxes)):
        # get dlib rectangle from facebox
        face_rect = dlib.rectangle(faceboxes[i][0], faceboxes[i][1],
                                   faceboxes[i][2], faceboxes[i][3])

        # Get the the face's pose
        pose_landmarks = face_pose_predictor(image, face_rect)

        # Use openface to calculate and perform the face alignment
        alignedFace = face_aligner.align(
            534,
            image,
            face_rect,
            landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
        alignedFace_out = cv2.resize(alignedFace,
                                     (faceboxes[i][2] - faceboxes[i][0],
                                      faceboxes[i][3] - faceboxes[i][1]))
        initFace_out = image[faceboxes[i][1]:faceboxes[i][3],
                             faceboxes[i][0]:faceboxes[i][2]]
        init_align_faces.append([initFace_out, alignedFace_out])

        # draw marks
        parts = dlib.full_object_detection.parts(pose_landmarks)
        FaceDetector.draw_marks(bbox_mark_image, parts)

        # get face landmarks for feature extraction
        landmark_set = pose_predictor_5_point(
            alignedFace,
            dlib.rectangle(0, 0, alignedFace.shape[0], alignedFace.shape[1]))

        # get feature vector
        feature_vector = np.array(
            face_encoder.compute_face_descriptor(alignedFace, landmark_set, 1))

        # known_face_encode = np.loadtxt('persons/MXG/fv.txt')
        ind = compare_faces(known_face_encodes, feature_vector)
        if (ind != -1):
            face_class = classes[ind]
            colour = (0, 255, 0)
        else:
            face_class = "Unknown"
            colour = (0, 0, 255)
        sh = max(image.shape[0], image.shape[1])
        mult = sh / 500
        if mult < 1:
            mult = 1
        cv2.putText(image, face_class, (faceboxes[i][0], faceboxes[i][1] - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5 * mult, colour, 1)
        cv2.rectangle(bbox_mark_image, (faceboxes[i][0], faceboxes[i][1]),
                      (faceboxes[i][2], faceboxes[i][3]), (0, 255, 0))
        cv2.rectangle(image, (faceboxes[i][0], faceboxes[i][1]),
                      (faceboxes[i][2], faceboxes[i][3]), (0, 255, 0))
        #cv2.rectangle(image, (mboxes[i][0], mboxes[i][1]), (mboxes[i][2], mboxes[i][3]), (0, 255, 0))
        #out_arr.append({'x1': faceboxes[i][0], 'y1': faceboxes[i][1], 'x2': faceboxes[i][2], 'y2': faceboxes[i][3], 'class': face_class, 'conf': conf[i]})
        out_arr.append({
            'x1': mboxes[i][0],
            'y1': mboxes[i][1],
            'x2': mboxes[i][2],
            'y2': mboxes[i][3],
            'class': face_class,
            'conf': conf[i]
        })

    t = time.clock() - t
    out_imgs = [image, bbox_mark_image, init_align_faces, t]
    if mode == 'test':
        cv2.imshow("Preview", image)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
    if mode == 'metr':
        #cv2.imshow("Preview", image)
        #cv2.waitKey(0)
        #cv2.destroyAllWindows()
        return out_arr
    if mode == 'process':
        return out_imgs
    if mode == 'def':
        return out_imgs