def test_partial_face_locations(self):
        img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama_partial_face.jpg'))
        detected_faces = api.face_locations(img)

        self.assertEqual(len(detected_faces), 1)
        self.assertEqual(detected_faces[0], (142, 191, 365, 0))

        img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama_partial_face2.jpg'))
        detected_faces = api.face_locations(img)

        self.assertEqual(len(detected_faces), 1)
        self.assertEqual(detected_faces[0], (142, 551, 409, 349))
    def test_partial_face_locations(self):
        img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama_partial_face.jpg'))
        detected_faces = api.face_locations(img)

        self.assertEqual(len(detected_faces), 1)
        self.assertEqual(detected_faces[0], (142, 191, 365, 0))

        img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama_partial_face2.jpg'))
        detected_faces = api.face_locations(img)

        self.assertEqual(len(detected_faces), 1)
        self.assertEqual(detected_faces[0], (142, 551, 409, 349))
Beispiel #3
0
def main(file, model):
    im = PIL.Image.open(file)
    im = im.convert("RGB")
    image = np.array(im)

    locations = fr.face_locations(image,
                                  number_of_times_to_upsample=1,
                                  model=model)
    encodings = fr.face_encodings(image,
                                  known_face_locations=locations,
                                  model="default")
    faces = []

    for i, encoding in enumerate(encodings):
        width = locations[i][1] - locations[i][3]
        height = locations[i][2] - locations[i][0]

        x = round((locations[i][3] + (width / 2)) / im.width, percision)
        y = round((locations[i][0] + (height / 2)) / im.height, percision)
        w = round((width * scaling) / im.width, percision)
        h = round((height * scaling) / im.height, percision)
        id = str(uuid.uuid4())

        faces.append({
            "id": id,
            "x": x,
            "y": y,
            "w": w,
            "h": h,
            "detector": "face_recognition@" + face_recognition.__version__,
            "confidence": 1,
            "encoding": list(encoding)
        })

    print(json.dumps(faces))
def video_detect_and_blur(img, input_path, output_path, model):
    if (os.stat(input_path + img).st_size != 0):
        name = img[:img.rfind('.')]
        unknown_image = face_recognition.load_image_file(input_path + img)
        face_locations = face_recognition.face_locations(
            unknown_image, number_of_times_to_upsample=0, model=model)
        image = cv2.imread(input_path + img)
        for face_location in face_locations:
            top, right, bottom, left = face_location
            sub_face = image[top:bottom, left:right]
            # apply a gaussian blur on this new recangle image
            sub_face = cv2.GaussianBlur(sub_face, (51, 51), 75)
            # merge this blurry rectangle to our final image
            image[top:top + sub_face.shape[0],
                  left:left + sub_face.shape[1]] = sub_face
            # cv2.rectangle(image, (left, top), (right, bottom), (0, 255, 0), 2)
        # print(output_path + BLURRED_DIR + img)
        cv2.imwrite(output_path + BLURRED_DIR + img, image)
        with open(output_path + INFO_DIR + name + '.csv',
                  'w',
                  newline='',
                  encoding="utf-8") as csvfile:
            fieldnames = ['location_id', 'top', 'left', 'bottom', 'right']
            writer = csv.writer(csvfile)
            writer.writerow(fieldnames)
            for (idx, loc) in enumerate(face_locations):
                top, right, bottom, left = loc
                writer.writerow([
                    'id_' + str(idx),
                    str(top),
                    str(left),
                    str(bottom),
                    str(right)
                ])
    def test_face_loc(self):
        img = api.load_img_file(
            os.path.join(os.path.dirname(__file__), 'test_img', 'obama.jpg'))
        detected_faces = api.face_locations(img)

        self.assertEqual(len(detected_faces), 1)
        self.assertEqual(detected_faces[0], (142, 617, 409, 349))
Beispiel #6
0
def test_image(image_to_check, model):
    unknown_image = face_recognition.load_image_file(image_to_check)
    face_locations = face_recognition.face_locations(
        unknown_image, number_of_times_to_upsample=0, model=model)

    for face_location in face_locations:
        print_result(image_to_check, face_location)
Beispiel #7
0
def main(image_to_check, cpus, model):
    for img_file in image_files_in_folder(image_to_check):
        unknown_image = face_recognition.load_image_file(img_file)
        face_locations = face_recognition.face_locations(
            unknown_image, number_of_times_to_upsample=0, model=model)

        for faceLocation in face_locations:
            #            print_result(img_file, faceLocation)
            try:
                with Image.open(img_file) as im:
                    print("Processing {}...".format(img_file))
                    top, right, bottom, left = faceLocation
                    myuuid = uuid.uuid1()

                    print("Face Location: {}".format(faceLocation))
                    print("Format: {0}\tSize: {1}\tMode: {2}".format(
                        im.format, im.size, im.mode))
                    im1 = im.crop((left, top, right, bottom))
                    print("Format: {0}\tSize: {1}\tMode: {2}".format(
                        im1.format, im1.size, im1.mode))

                    im1.save("./" + str(myuuid) + "." + str(im.format).lower(),
                             im.format)

            except IOError as err:
                print("Unable to load image: {} ({})".format(
                    img_file, err.errno))
                sys.exit(1)
Beispiel #8
0
def identityRecognition(testimg, known_face_encodings, known_face_IDs, Threshold):
    #testimg:当前截图,待匹配人脸
    #known_face_encodings: 人脸编码集合
    #known_face_IDs: 人脸标号集合
    face_locations = face_recognition.face_locations(testimg)#检测出图像中所有面部
    #针对多人脸的操作, 核心思想是只取最大的
    max_face_id = 0
    face_locations_select = []
    if len(face_locations)>1:
        for face_idx in range(len(face_locations)):
            if (face_locations[face_idx][2]>face_locations[max_face_id][2]) and  (face_locations[face_idx][3]>face_locations[max_face_id][3]):
                max_face_id = face_idx
        face_locations_select.append(face_locations[face_idx])
    else:
        face_locations_select = face_locations
    face_encodings = face_recognition.face_encodings(testimg, face_locations_select) #获取图像中所有面部的编码
    retname, retscore = np.array(0), np.array(0)
    top_k_idx = []
    for face_encoding in face_encodings:
        matches, score, top_k_idx = compare_faces(known_face_encodings, face_encoding, Threshold, top_k_num = 3)
        retname, retscore = np.array(0), np.array(0)   #
        if True in matches:
            # first_match_index = matches.index(True)
            # name = known_face_IDs[first_match_index]
            known_face_IDs_np = np.array(known_face_IDs)
            name = known_face_IDs_np[top_k_idx]
            if score > retscore:
                retname = name
                retscore = score
    return retname, top_k_idx
Beispiel #9
0
def anonymize_faces(img, min_faces=1, strokes=3):
    faces = face_recognition.face_locations(img)
    im = Image.fromarray(img)
    draw = ImageDraw.Draw(im)

    if len(faces) <= min_faces:
        show = -1
    else:
        show = int(random.uniform(0, len(faces)))

    for i, f in enumerate(faces):
        if i == show:
            continue
        fx = (f[3], f[1] + 1)
        fy = (f[0], f[2] + 1)
        fy = fy[0] - (fy[1] - fy[0]) // 4, fy[1]

        strike_sz = (fy[1] - fy[0]) // strokes
        irx, iry = int(random.uniform(-10, 10)), int(random.uniform(-4, 4))
        for oy in range(0, strokes * strike_sz, strike_sz):
            rx, ry = irx + int(random.uniform(-5, 5)), iry + int(
                random.uniform(-3, 3))
            draw.polygon([(fx[0] + rx, fy[0] + oy),
                          (fx[0], fy[0] + oy + strike_sz),
                          (fx[1], fy[0] + oy + strike_sz + ry),
                          (fx[1] + rx, fy[0] + oy + ry)],
                         fill=0)
    return np.array(im)
Beispiel #10
0
    def test_face_locations(self):
        img = api.load_image_file(
            os.path.join(os.path.dirname(__file__), "test_images",
                         "obama.jpg"))
        detected_faces = api.face_locations(img)

        assert len(detected_faces) == 1
        assert detected_faces[0] == (142, 617, 409, 349)
    def test_cnn_face_locations(self):
        img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg'))
        detected_faces = api.face_locations(img, model="cnn")

        self.assertEqual(len(detected_faces), 1)
        self.assertAlmostEqual(detected_faces[0][0], 144, delta=25)
        self.assertAlmostEqual(detected_faces[0][1], 608, delta=25)
        self.assertAlmostEqual(detected_faces[0][2], 389, delta=25)
        self.assertAlmostEqual(detected_faces[0][3], 363, delta=25)
    def test_cnn_face_locations(self):
        img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg'))
        detected_faces = api.face_locations(img, model="cnn")

        self.assertEqual(len(detected_faces), 1)
        self.assertAlmostEqual(detected_faces[0][0], 144, delta=25)
        self.assertAlmostEqual(detected_faces[0][1], 608, delta=25)
        self.assertAlmostEqual(detected_faces[0][2], 389, delta=25)
        self.assertAlmostEqual(detected_faces[0][3], 363, delta=25)
Beispiel #13
0
def test_image( unknown_image):
    with open('./names.txt') as f:
        known_names = f.read().splitlines()

    known_face_encodings=np.loadtxt('encodings.txt',dtype='float')
    unknown_image = cv2.cvtColor( unknown_image, cv2.COLOR_BGR2RGB )
    img = cv2.cvtColor( unknown_image, cv2.COLOR_BGR2GRAY )
    print("Image location start")
    #Find locations of faces in image
    face_locations = face_recognition.face_locations(unknown_image,number_of_times_to_upsample=2, model="hog")
    print("Face detection--- %s seconds ---" % (time.time() - start_time))
    print("locations",face_locations)
    if not face_locations: print("No face detected")
    unknown_encodings=[]
    username = "******"

    if face_locations:
        # for i, face_rect in enumerate(face_locations):
        #     top, right, bottom,left = face_rect
        #     d = dlib.rectangle(left,top, right, bottom )
        #     alignedFace=face_aligner.align(200, unknown_image,d, landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
        #     alignedFace=cv2.cvtColor( alignedFace, cv2.COLOR_BGR2RGB )
        #     cv2.imwrite("alignedfaces/aligned_face__{}_.jpg".format(i), alignedFace)
        #     unknown_encoding = face_recognition.face_encodings(alignedFace)
        #     unknown_encodings.extend(unknown_encoding)
        print(unknown_encodings)
        unknown_encodings = face_recognition.face_encodings(unknown_image,face_locations)
        pil_image = Image.fromarray(unknown_image)
        draw = ImageDraw.Draw(pil_image)
        count=0
        for (top, right, bottom, left), face_encoding in zip(face_locations, unknown_encodings):
            encodingsfile = open('encodings.txt', 'a+')
            print((face_encoding))
            for x in face_encoding:
                encodingsfile.write("%f " % x)
            encodingsfile.write("\n")
            username = raw_input("Enter name of Unknown : ")

            # making output annotation for face
            draw.rectangle(((left, top), (right, bottom)), outline=(0, 0, 255))
            text_width, text_height = draw.textsize(username)
            draw.rectangle(((left, bottom - text_height - 10), (right, bottom)), fill=(0, 0, 255), outline=(0, 0, 255))
            draw.text((left + 6, bottom - text_height - 5), username, fill=(255, 255, 255, 255))

            known_names.append(username)
            namefile = open('names.txt', 'w')
            for item in known_names:
                namefile.write("%s\n" % item)
            namefile.close()
        del draw
        pil_image.show()
        return username
def unrecognize_names(unidentified_image_dir):
    unidentified_face_encodings = []
    # Loop through each unidentified images
    for img_path in image_files_in_folder(unidentified_image_dir):
        image = face_recognition.load_image_file(img_path)
        # Find all the faces and face encodings in the current frame of video
        face_locations = face_recognition.face_locations(
            image, number_of_times_to_upsample=2)
        face_encodings = face_recognition.face_encodings(image,
                                                         face_locations,
                                                         num_jitters=10)
        unidentified_face_encodings.append(face_encodings)
    return unidentified_face_encodings
    def _trainImage(self, unknown_image, username="******"):
        with open(RECOGNISED_NAMES) as f:
            self.knownNames = f.read().splitlines()

        unknown_image = cv2.cvtColor(unknown_image, cv2.COLOR_BGR2RGB)
        img = cv2.cvtColor(unknown_image, cv2.COLOR_BGR2GRAY)

        face_locations = face_recognition.face_locations(
            unknown_image, number_of_times_to_upsample=2, model="hog")
        print "Locations : ", face_locations
        if not face_locations: print "No face detected"
        unknown_encodings = []

        if face_locations:
            print unknown_encodings
            unknown_encodings = face_recognition.face_encodings(
                unknown_image, face_locations)
            pil_image = Image.fromarray(unknown_image)
            draw = ImageDraw.Draw(pil_image)
            for (top, right, bottom,
                 left), face_encoding in zip(face_locations,
                                             unknown_encodings):
                encodingsfile = open(RECOGNISED_ENCODINGS, 'a+')
                for x in face_encoding:
                    encodingsfile.write("%f " % x)
                encodingsfile.write("\n")

                # making output annotation for face
                draw.rectangle(((left, top), (right, bottom)),
                               outline=(0, 0, 255))
                text_width, text_height = draw.textsize(username)
                draw.rectangle(
                    ((left, bottom - text_height - 10), (right, bottom)),
                    fill=(0, 0, 255),
                    outline=(0, 0, 255))
                draw.text((left + 6, bottom - text_height - 5),
                          username,
                          fill=(255, 255, 255, 255))

                self.knownNames.append(username)
                namefile = open(RECOGNISED_NAMES, 'w')
                for item in self.knownNames:
                    namefile.write("%s\n" % item)
                namefile.close()
            del draw

            # If one wants to see image at runtime
            # pil_image.show()
            return (True, pil_image)

        return (False, None)
Beispiel #16
0
def get_coordinates(image_path):
    img = face_recognition.load_image_file(image_path)
    locations = face_recognition.face_locations(img)
    location_json = []
    for coordinates in locations:
        json_dict = {
            'ymin': coordinates[0],
            'xmin': coordinates[1],
            'ymax': coordinates[2],
            'xmax': coordinates[3]
        }

        location_json.append(json_dict)
    return locations, location_json
Beispiel #17
0
def recognize_faces(face_image_path):
    # Read image from disk
    image = api.load_image_file(face_image_path)

    # Get face locations in image
    face_locations = api.face_locations(image, model="hog")

    # Create encodings from image
    face_encodings = api.face_encodings(image, known_face_locations=face_locations)

    # Get encodings from DB
    existing_face_encoding_objects = [list(g) for _, g in groupby(FaceEncoding.query.order_by(FaceEncoding.player_id).all(), attrgetter('player_id'))]

    outputs = []

    for i, face_encoding in enumerate(face_encodings):
        distances = {}

        for face_encoding_group in existing_face_encoding_objects:
            # Calculate distance to every encoding for this group (person)
            existing_face_encodings = np.array([fe.encoding for fe in face_encoding_group])
            face_distances = api.face_distance(existing_face_encodings, face_encoding)

            distances[face_encoding_group[0].player] = sum(face_distances) / len(face_distances)

        recognized_player = None
        if len(distances) > 0:
            recognized_player = min(distances, key=distances.get)
            if distances[recognized_player] > THRESHOLD:
                recognized_player = None

        # Create a crop of the face
        top, right, bottom, left = face_locations[i]
        image_crop = image[top:bottom, left:right]

        im = Image.fromarray(image_crop)
        with io.BytesIO() as output:
            im.save(output, format="JPEG")
            contents = output.getvalue()

        outputs.append(
            {
                "image": base64.b64encode(contents).decode("utf-8"),
                "embedding": face_encoding.tolist(),
                "player": recognized_player.serialize() if recognized_player else None
            })

    return outputs
def test():
    # loop over unknown faces
    print("processing unknown faces")
    for filename in os.listdir(UNKNOWN_FACES_DIR):
        # load the image
        print(f"Filename {filename}", end="")
        image = FR.load_image_file(f"{UNKNOWN_FACES_DIR}/{filename}")

        # Find the location of the faces
        locations = FR.face_locations(image, model=MODEL)

        # pass locations to face_encodings to cut down on processing time
        encodings = FR.face_encodings(image, locations)
        image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
        print(f",found {len(encodings)} face(s)")

        for face_encoding, face_location in zip(encodings, locations):
            results = FR.compare_faces(known_faces, face_encoding, TOLERENCE)
            match = None

            if True in results:
                match = known_names[results.index(True)]
                print(f"Match Found {match}")
                # dimensions of where the face is
                top_left = (face_location[3], face_location[0])
                bottom_right = (face_location[1], face_location[2])

                # draw rectangle on image
                color = [0, 255, 0]
                cv2.rectangle(image, top_left, bottom_right, color, FRAME_THICKNESS)

                top_left = (face_location[3], face_location[0])
                bottom_right = (face_location[1], face_location[2] + 22)
                # cv2.rectangle(image, top_left, bottom_right, color, cv2.FILLED)
                cv2.putText(
                    image,
                    match,
                    (face_location[3] + 10, face_location[2] + 15),
                    cv2.FONT_HERSHEY_COMPLEX,
                    0.5,
                    (200, 200, 200),
                    FONT_THICKNESS,
                )

        cv2.imshow(filename, image)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
Beispiel #19
0
def test_image(image_to_check, model):
    unknown_image = face_recognition.load_image_file(image_to_check)
    face_locations = face_recognition.face_locations(
        unknown_image, number_of_times_to_upsample=0, model=model)

    i = 0
    for face_location in face_locations:
        print_result(image_to_check, face_location)
        try:
            with Image.open(image_to_check) as im:
                box = face_location
                region = im.crop(box)
                fname = "/tmp/test_image_" + str(i) + ".jpeg"
                print("Printing: {}".format(fname))
                im.save(fname, "JPEG")
                i = i + 1
        except IOError:
            print("IOError")
def detect_faces_in_image(file_stream):
    # 载入用户上传的图片
    img = face_recognition.load_image_file(file_stream)
    # 为用户上传的图片中的人脸编码
    face_encodings = face_recognition.face_encodings(img)

    encodings = []
    for face_encoding in face_encodings:
        encodings.append(face_encoding.tolist())

    face_locations = face_recognition.face_locations(
        img, number_of_times_to_upsample=0, model="hog")

    # face_landmarks = face_recognition.face_landmarks(img, face_locations=face_locations, model='large')

    # 讲识别结果以json键值对的数据结构输出
    result = {"encodings": encodings, "locations": face_locations}
    return jsonify(result)
Beispiel #21
0
def save_keypoints_image(image_path, input_folder, output_folder, model):
    image = face_recognition.load_image_file(image_path)
    locations = face_recognition.face_locations(image, model=model)
    landmarks = face_recognition.face_landmarks(image,
                                                face_locations=locations,
                                                return_dict=False)

    num_faces = landmarks.shape[0]
    if num_faces == 0:
        return

    out_path = move_path(image_path, input_folder, output_folder)
    out_path = out_path.with_suffix('')
    out_path.mkdir(parents=True, exist_ok=True)

    for face_idx in range(num_faces):
        file_path = Path(out_path, F"{face_idx}.npy")
        landmark_array = landmarks[face_idx]
        np.save(file_path, landmark_array)

    print(F"finished image {out_path}")
Beispiel #22
0
def save_keypoints_video(video_path, input_folder, output_folder, model,
                         tolerance):
    video = cv2.VideoCapture(str(video_path))

    out_path = move_path(video_path, input_folder, output_folder)
    out_path = out_path.with_suffix('')
    known_faces = []
    frame_count = 0
    while video.isOpened():
        ret, frame = video.read()
        if not ret:
            break

        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        locations = face_recognition.face_locations(frame, model=model)
        encodings, landmarks = face_recognition.face_encodings_and_landmarks(
            frame, known_face_locations=locations)

        num_faces = landmarks.shape[0]
        if num_faces == 0:
            continue

        for face_idx, (encoding,
                       landmark_array) in enumerate(zip(encodings, landmarks)):
            distances = face_recognition.face_distance(known_faces, encoding)
            if len(distances) == 0 or distances.min() > tolerance:
                known_idx = len(known_faces)
                known_faces.append(encoding)
            else:
                known_idx = int(np.argmin(distances))
                known_faces[known_idx] = np.mean(
                    [known_faces[known_idx], encoding], axis=0)

            file_path = Path(out_path, F"{known_idx}", F"{frame_count}.npy")
            file_path.parent.mkdir(parents=True, exist_ok=True)
            np.save(file_path, landmark_array)

        frame_count += 1

    print(F"finished video {out_path}")
    def _predict(self, frame_number, frame, knn_clf, known_names,
                 known_face_encodings):

        unknown_image = frame[:, :, ::-1]
        if max(unknown_image.shape) > 1600:
            pil_image = Image.fromarray(unknown_image)
            pil_image.thumbnail((1600, 0), PIL.Image.LANCZOS)
            unknown_image = np.array(pil_img)

        img = cv2.cvtColor(unknown_image, cv2.COLOR_RGB2GRAY)
        pil_image = Image.fromarray(unknown_image)

        self.start_time = time.time()
        face_locations = face_recognition.face_locations(
            img, number_of_times_to_upsample=2, model="hog")
        print "Face detection--- %s seconds ---" % (time.time() -
                                                    self.start_time)

        if not face_locations:
            return False

        if face_locations:
            print face_locations
            unknown_encodings = face_recognition.face_encodings(
                unknown_image, face_locations)

            print "amy", type(unknown_encodings[0])
            closest_distances = knn_clf.kneighbors(unknown_encodings,
                                                   n_neighbors=1)

            matches = [
                closest_distances[0][i][0] <= DIST_THRESHOLD
                for i in range(len(face_locations))
            ]

            return [(pred, loc, encoding, rec) if rec else
                    ("unknown", loc, encoding, rec)
                    for pred, encoding, loc, rec in zip(
                        knn_clf.predict(unknown_encodings), unknown_encodings,
                        face_locations, matches)]
Beispiel #24
0
def main():
    vs = VideoStream()
    vs.start()
    names, known_encodings = load_known_faces('./faces/known_faces')
    print(len(known_encodings))
    while vs.isOpened():
        image = vs.read()

        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        face_locations = fr.face_locations(image, model='hog')
        img_face_encodings = fr.face_encodings(image, face_locations)
        match_matrix = [
            fr.compare_faces(known_encodings, f, tolerance=0.6)
            for f in img_face_encodings
        ]
        print(match_matrix)
        img_with_faces = draw_bbox_on_img(image, face_locations)

        cv2.imshow('frame', img_with_faces)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    vs.close()
    cv2.destroyAllWindows()
unknown_count = 0

while True:
    # Grab a single frame of video
    ret, frame = video_capture.read()

    # Resize frame of video to 1/4 size for faster face recognition processing
    small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

    # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
    rgb_small_frame = small_frame[:, :, ::-1]

    # Only process every other frame of video to save time
    if process_this_frame:
        # Find all the faces and face encodings in the current frame of video
        face_locations = face_recognition.face_locations(rgb_small_frame)
        face_encodings = face_recognition.face_encodings(
            rgb_small_frame, face_locations)

        face_names = []
        for face_encoding in face_encodings:
            # See if the face is a match for the known face(s)
            matches = face_recognition.compare_faces(known_face_encodings,
                                                     face_encoding,
                                                     tolerance=0.35)
            name = "Unknown"

            # # If a match was found in known_face_encodings, just use the first one.
            # if True in matches:
            #     first_match_index = matches.index(True)
            #     name = known_face_ids[first_match_index]
def test_image(image_to_check, model):
    unknown_image = face_recognition.load_image_file(image_to_check)
    face_locations = face_recognition.face_locations(unknown_image, number_of_times_to_upsample=0, model=model)

    for face_location in face_locations:
        print_result(image_to_check, face_location)
model = Face(train=False)
#model.load_weights('./face_weights/face_weights.26-val_loss-3.85-val_age_loss-3.08-val_gender_loss-0.22-val_race_loss-0.55.utk.h5')
model.load_weights(
    './face_weights/face_weights.18-val_loss-3.86-val_age_loss-3.05-val_gender_loss-0.22-val_race_loss-0.59.utk.h5'
)

gender_labels = ['Male', 'Female']
race_labels = ['Whites', 'Blacks', 'Asian', 'Indian', 'Others']
#https://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w11/papers/Rothe_DEX_Deep_EXpectation_ICCV_2015_paper.pdf
age_labels = np.reshape(np.arange(1, 94), (93, 1))

demo_image = cv2.imread('./demo_images/15_1_0_20170109213421667.jpg.chip.jpg')
image_h, image_w = demo_image.shape[0], demo_image.shape[1]
margin = 0.01

face_locations = face_recognition.face_locations(demo_image, model='hog')

if len(face_locations) > 0:
    face_batch = np.empty((len(face_locations), 200, 200, 3))

    # add face images into batch
    for i, rect in enumerate(face_locations):
        # crop with a margin
        top, bottom, left, right = rect[0], rect[2], rect[3], rect[1]
        top = max(int(top - image_h * margin), 0)
        left = max(int(left - image_w * margin), 0)
        bottom = min(int(bottom + image_h * margin), image_h - 1)
        right = min(int(right + image_w * margin), image_w - 1)

        face_img = demo_image[top:bottom, left:right, :]
        face_img = cv2.resize(face_img, (200, 200))
Beispiel #28
0
from __future__ import print_function
import shutil
import os
import face_recognition.api as face_recognition

if __name__ == "__main__":
    path_folder = r'C:\Users\tinnvt\Documents\BasicML\Project\Face Recognition\BML_Face_Recognition\images'
    path_remove_images = r'C:\Users\tinnvt\Documents\BasicML\Project\Face Recognition\BML_Face_Recognition\removed_images'
    for sub_folder_name in os.listdir(path_folder):
        sub_folder_path = os.path.join(path_folder, sub_folder_name)
        sub_folder_remove = os.path.join(path_remove_images, sub_folder_name)
        if not os.path.exists(sub_folder_remove):
            os.mkdir(sub_folder_remove)

        for img_name in os.listdir(sub_folder_path):
            try:
                path_img = os.path.join(sub_folder_path, img_name)

                unknown_image = face_recognition.load_image_file(path_img)
                face_locations = face_recognition.face_locations(
                    unknown_image, number_of_times_to_upsample=1, model="hog")
                if len(face_locations) == 0:
                    print('No faces in this image!', path_img)
                    shutil.move(path_img,
                                os.path.join(sub_folder_remove, img_name))
            except Exception as e:
                print(f'Image {img_name} has error {e}')
Beispiel #29
0
def analyseFootage(clipname):
    CLIP_PATH = FOOTAGES_PATH + "/" + clipname

    if os.path.isfile(CLIP_PATH) == False:
        return 0

    #Load the known face IDs and encodings for facial recognition
    try:
        with open(os.path.join(STORAGE_PATH, "known_face_ids.pickle"),
                  "rb") as fp:
            known_face_ids = pickle.load(fp)
        with open(os.path.join(STORAGE_PATH, "known_face_encodings.pickle"),
                  "rb") as fp:
            known_face_encodings = pickle.load(fp)
    except:
        known_face_encodings = []
        known_face_ids = []

    #Start the Video Stream
    fvs = FileVideoStream(CLIP_PATH).start()
    time.sleep(1.0)

    print("[INFO] Loading the facial detector")
    detector = dlib.get_frontal_face_detector()
    #predictor = dlib.shape_predictor(LANDMARK_PATH)
    #fa = FaceAligner(predictor, desiredFaceWidth = 96)
    name = "Unknown"
    face_locations = []
    face_encodings = []
    face_names = []
    process_this_frame = True
    #sanity_count = 0
    unknown_count = 0
    marked = True

    print("[INFO] Initializing CCTV Footage")
    while fvs.more():
        # grab the frame from the threaded video file stream, resize
        # it, and convert it to grayscale (while still retaining 3
        # channels)
        frame = fvs.read()

        if frame is None:
            break

        frame = imutils.resize(frame, width=600)

        frame = adjust_gamma(frame, gamma=1.5)
        gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        #To store the faces
        #This will detect all the images in the current frame, and it will return the coordinates of the faces
        #Takes in image and some other parameter for accurate result
        faces = detector(gray_frame, 0)
        #In above 'faces' variable there can be multiple faces so we have to get each and every face and draw a rectangle around it.

        #sampleNum = sampleNum+1
        for face in faces:
            #num_frames = num_frames + 1
            #print("inside for loop")

            if face is None:
                print("face is none")
                continue

            #face_aligned = fa.align(frame,gray_frame,face)
            #face_aligned = imutils.resize(face_aligned ,width = 600)

            if process_this_frame:
                # Find all the faces and face encodings in the current frame of video
                face_locations = face_recognition.face_locations(frame)
                face_encodings = face_recognition.face_encodings(
                    frame, face_locations)

                face_names = []
                for face_encoding in face_encodings:
                    # See if the face is a match for the known face(s)
                    matches = face_recognition.compare_faces(
                        known_face_encodings, face_encoding, tolerance=0.35)
                    name = "Unknown"

                    # # If a match was found in known_face_encodings, just use the first one.
                    # if True in matches:
                    #     first_match_index = matches.index(True)
                    #     name = known_face_ids[first_match_index]

                    # Or instead, use the known face with the smallest distance to the new face
                    face_distances = face_recognition.face_distance(
                        known_face_encodings, face_encoding)
                    # print(face_distances)
                    try:
                        best_match_index = np.argmin(face_distances)
                        if matches[best_match_index]:
                            name = known_face_ids[best_match_index]
                    except:
                        # print("No students have been marked")
                        #video_capture.release()
                        #cv2.destroyAllWindows()

                        marked = False
                        #return marked
                    #if matches[best_match_index]:
                    #    name = known_face_ids[best_match_index]

                    face_names.append(name)

            if name == "Unknown":
                unknown_count += 1
            else:
                unknown_count = 0

            if unknown_count == 600:
                # video_capture.release()
                # cv2.destroyAllWindows()
                # print("You haven't been registered")
                marked = False
                unknown_count = 0
                break

            process_this_frame = not process_this_frame

            for (top, right, bottom,
                 left), name in zip(face_locations, face_names):

                # Draw a box around the face
                cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255),
                              1)

                # Draw a label with a name below the face
                cv2.rectangle(frame, (left, bottom + 15), (right, bottom),
                              (0, 0, 255), cv2.FILLED)
                font = cv2.FONT_HERSHEY_DUPLEX
                cv2.putText(frame, name, (left + 6, bottom + 15), font, 0.4,
                            (255, 255, 255), 1)

        #Showing the image in another window
        #Creates a window with window name "Face" and with the image img
        #cv2.imshow("Video feed (PRESS Q TO QUIT",frame)
        frame = cv2.imencode('.jpg', frame)[1].tobytes()
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')

        #if cv2.waitKey(1) == ord("q") :
        #    break

    print("here")
    # do a bit of cleanup
    cv2.destroyAllWindows()
    #fvs.stop()
    return
Beispiel #30
0
    def test_face_locations(self):
        img = api.load_image_file(os.path.join(os.path.dirname(__file__), "test_images", "obama.jpg"))
        detected_faces = api.face_locations(img)

        assert len(detected_faces) == 1
        assert detected_faces[0] == (142, 617, 409, 349)
    def test_face_locations(self):
        img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg'))
        detected_faces = api.face_locations(img)

        self.assertEqual(len(detected_faces), 1)
        self.assertEqual(detected_faces[0], (142, 617, 409, 349))
Beispiel #32
0
    pool.starmap(test_image, function_parameters)


known_face_names, known_face_encodings = scan_known_people('./knownmans')
face_count = 0

while True:
    # Grab a single frame of video
    ret, frame = video_capture.read()

    # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
    rgb_frame = frame[:, :, ::-1]

    # Find all the faces and face enqcodings in the frame of video
    face_locations = face_recognition.face_locations(rgb_frame, model="cnn")
    face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)

    # Loop through each face in this frame of video
    for (top, right, bottom,
         left), face_encoding in zip(face_locations, face_encodings):
        # See if the face is a match for the known face(s)
        matches = face_recognition.compare_faces(known_face_encodings,
                                                 face_encoding,
                                                 tolerance=0.30)

        name = "Unknown"

        # If a match was found in known_face_encodings, just use the first one.
        if True in matches:
            first_match_index = matches.index(True)
Beispiel #33
0
import cv2
import face_recognition.api as face_recognition
import PIL.Image
import numpy as np
import logging as log
import datetime as dt
from time import sleep
import os

dir=os.path.dirname(os.path.abspath(__file__))
image_dir=os.path.join(dir,"Images")

for root,dirs,files in os.walk(image_dir):
    for file in files :

        if file.endswith("png")or file.endswith("jpg")or file.endswith("jpeg"):

            path = os.path.join(root, file)
            label=os.path.split(path)[-1]
            label , var = label.split('.')
            img = cv2.imread(path)
            resized_image = cv2.resize(img, (700, 700))
            faces = face_recognition.face_locations(resized_image)
            if len(faces) >= 0:    
                print(len(faces))
                print(path)
def mark_your_attendance(location, known_face_encodings, known_face_ids):

    mpl.rcParams['toolbar'] = 'None'

    if (os.path.exists(DB_PATH)):
        #rdbms='sqlite'
        #conn = psycopg2.connect(DB_PATH)
        #c=conn.cursor()
        rdbms = 'postgresql'
        conn = psycopg2.connect(host="localhost",
                                database="face_rec_db",
                                user="******",
                                password="******")
        c = conn.cursor()
    else:
        #os.mknod(DB_PATH)
        conn = sqlite3.connect(DB_PATH)
        c = conn.cursor()
        c.execute('''CREATE TABLE IF NOT EXISTS ATTENDANCE
         (ID        TEXT   NOT NULL,
         TIMESTAMP  TEXT       NOT NULL,
         LOCATION  TEXT);''')
        conn.commit()

    name = "Unknown"
    face_locations = []
    face_encodings = []
    face_names = []
    process_this_frame = True
    sanity_count = 0
    unknown_count = 0
    marked = True

    video_capture = cv2.VideoCapture(0, cv2.CAP_DSHOW)
    #_,frame = video_capture.read()

    while True:
        # Grab a single frame of video
        _, frame = video_capture.read()

        #Applying face enhancement steps
        frame = imageEnhancement.adjust_gamma(frame, gamma=1.5)

        # Resize frame of video to 1/4 size for faster face recognition processing
        #small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
        small_frame = frame
        # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
        rgb_small_frame = small_frame[:, :, ::-1]

        # Only process every other frame of video to save time
        if process_this_frame:
            # Find all the faces and face encodings in the current frame of video
            face_locations = face_recognition.face_locations(rgb_small_frame)
            face_encodings = face_recognition.face_encodings(
                rgb_small_frame, face_locations)

            face_names = []
            for face_encoding in face_encodings:
                # See if the face is a match for the known face(s)
                matches = face_recognition.compare_faces(known_face_encodings,
                                                         face_encoding,
                                                         tolerance=0.35)
                name = "Unknown"

                # # If a match was found in known_face_encodings, just use the first one.
                # if True in matches:
                #     first_match_index = matches.index(True)
                #     name = known_face_ids[first_match_index]

                # Or instead, use the known face with the smallest distance to the new face
                face_distances = face_recognition.face_distance(
                    known_face_encodings, face_encoding)
                # print(face_distances)
                try:
                    best_match_index = np.argmin(face_distances)
                except:
                    # print("No students have been marked")
                    video_capture.release()
                    cv2.destroyAllWindows()
                    marked = False
                    return marked
                if matches[best_match_index]:
                    name = known_face_ids[best_match_index]

                face_names.append(name)

        if name == "Unknown":
            unknown_count += 1
        else:
            unknown_count = 0

        if unknown_count == 600:
            # video_capture.release()
            # cv2.destroyAllWindows()
            # print("You haven't been registered")
            marked = False
            unknown_count = 0
            break

        process_this_frame = not process_this_frame

        # Display the results
        for (top, right, bottom, left), name in zip(face_locations,
                                                    face_names):
            # Scale back up face locations since the frame we detected in was scaled to 1/4 size
            #top *= 4
            #right *= 4
            #bottom *= 4
            #left *= 4

            # Draw a box around the face
            cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)

            # Draw a label with a name below the face
            cv2.rectangle(frame, (left, bottom - 35), (right, bottom),
                          (0, 0, 255), cv2.FILLED)
            font = cv2.FONT_HERSHEY_DUPLEX
            cv2.putText(frame, name, (left + 6, bottom - 6), font, 0.7,
                        (255, 255, 255), 1)

        # print("AFTER SHOWING")
        # Hit 'q' on the keyboard to quit!
        if (sanity_count == 0):
            prev_name = name
            sanity_count += 1

        elif sanity_count < 60:
            if (prev_name == name and name != "Unknown"):
                sanity_count += 1
                prev_name = name
            else:
                sanity_count = 0

        elif sanity_count == 60:
            # print("Face registered")
            # video_capture.release()
            # cv2.destroyAllWindows()
            sanity_count = 0
            # now = datetime.now()
            # if(entry_or_exit==0):
            #     c.execute("INSERT INTO ATTENDANCE VALUES (?,datetime('now'),'IN');",(name, ))
            # else:
            #     c.execute("INSERT INTO ATTENDANCE VALUES (?,datetime('now'),'OUT');",(name, ))

            if (rdbms == 'sqlite'):
                c.execute(
                    "INSERT INTO ATTENDANCE VALUES (?,datetime('now'),?);", (
                        name,
                        location,
                    ))
            elif (rdbms == 'postgresql'):
                c.execute("INSERT INTO attendance VALUES (%s,now(),%s);",
                          (name, location))
            conn.commit()

            break

        #OpenCV's implementation to show an image in window(doesn't work on production server)
        #cv2.imshow("Marking Attendance (PRESS Q TO QUIT)",frame)

        #Encoding the frame to be stream into browser
        frame = cv2.imencode('.jpg', frame)[1].tobytes()
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')

        #if cv2.waitKey(20) == ord("q"):
        #    break

    # Release handle to the webcam

    #plt.close()
    video_capture.release()
    cv2.destroyAllWindows()
    conn.close()

    return marked
Beispiel #35
0
video_capture = cv2.VideoCapture(0)
process_this_frame = True
while True:
    # Grab a single frame of video
    ret, frame = video_capture.read()
    # Resize frame of video to 1/4 size for faster face recognition processing
    small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

    # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
    rgb_small_frame = small_frame[:, :, ::-1]

    # Only process every other frame of video to save time
    if process_this_frame:
        # Find all the faces and face encodings in the current frame of video
        face_locations = face_recognition.face_locations(
            rgb_small_frame, number_of_times_to_upsample=2)
        face_encodings = face_recognition.face_encodings(
            rgb_small_frame, face_locations)

        face_names = []
        for face_encoding in face_encodings:
            # See if the face is a match for the known face(s)
            matches = face_recognition.compare_faces(x_train,
                                                     face_encoding,
                                                     tolerance=0.5)
            name = "Unknown"

            # If a match was found in known_face_encodings, just use the first one.
            if True in matches:
                first_match_index = matches.index(True)
                name = y_lables[first_match_index]