Example #1
0
def rekognition_thread(pipeline: queue.Queue):
    detector = face_detector.FaceDetector()
    while True:
        if stop_cap_event.is_set():
            logging.info(f"rekog thread detected stop capture")
            break
        try:
            frame = pipeline.get(block=True, timeout=2)
            _, frame_png = cv2.imencode('.png', frame)
            frame_bytes = frame_png.tobytes()
            if vir_lock.get_state() is lock.Locked:
                prob = detector.detect_face_from_bytes(frame_bytes)
                if prob > 0.75:
                    logging.info(f'Probability: {prob}  Face detected')
                    # check if this is an authorized person
                    try:
                        with open('images/Joe-Benczarski.jpg', 'rb') as tgt:
                            compare_bytes = tgt.read()
                    except OSError:
                        logging.error(f"cannot open image")
                    auth = detector.compare_face_from_bytes(
                        frame_bytes, compare_bytes, 99.5)
                    if auth:
                        logging.info(f"Detected authorized person")
                        # Unlock the door
                        logging.info(vir_lock.unlock())
                        mqtt_send_state("unlock")
                else:
                    logging.info(f'Probability: {prob}  No face detected')
        except queue.Empty:
            logging.debug(f"rekognition queue empty")
 def __init__(self):
     rospy.init_node('face_detector_node')
     self.faces = []
     self.keypoint_arrays = []
     self.bridge = CvBridge()
     self.debug = rospy.get_param('~debug', default=False)
     self.image_sub_topic_name = rospy.get_param(
         '~image_sub_topic_name', default='/kinect/qhd/image_color_rect')
     use_gpu = rospy.get_param('~use_gpu', default=True)
     self.use_compressed_image = rospy.get_param('~use_compressed_image',
                                                 default=False)
     self.face_detector = face_detector.FaceDetector(use_gpu)
Example #3
0
    def __init__(self, test_path="", image_format="jpeg", roi=200):
        self.test_path = test_path
        self.image_format = image_format
        self.roi = roi
        self.net = cv2.dnn.readNetFromCaffe(
            "./detection_models/deploy.prototxt.txt",
            "./detection_models/res10_300x300_ssd_iter_140000.caffemodel")
        self.face_detector = face_detector.FaceDetector()
        self.global_frame = 0
        self.temp_path = os.path.join(test_path, "temp")

        if os.path.isdir(self.temp_path):
            shutil.rmtree(self.temp_path)

        os.mkdir(self.temp_path)
def main(args):
    context = zmq.Context()
    socket = context.socket(zmq.REP)
    socket.bind('tcp://*:%d' % (args.port))

    face_detector = detector.FaceDetector(args.detect_multiple_faces)

    while True:
        img = socket.recv()
        npimg = np.fromstring(img, dtype=np.uint8)
        frame = cv2.imdecode(npimg, 1)
        if frame is not None:
            faces = face_detector.detect(frame)
            response = json.dumps(faces.tolist())
            socket.send_string(response)
Example #5
0
    def run(self):

        ret, frame = self.video_capture.read()  # Initialize the last_frame
        self.last_frame = frame
        fc = face_detector.FaceDetector()

        while True:

            frame = self.__get_next_frame()

            got_face, frame = fc.get_faces(frame, self.background)

            state = self.cs.get_state(got_face)
            self.__do_state(state, frame)

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        # When everything is done, release the capture
        self.video_capture.release()
        cv2.destroyAllWindows()
metrics = {}

for detector in detectors:
    metrics[detector] = {
        'TP': 0,
        'FP': 0,
        'FN': 0,
        'average_IOU': 0,
        'speed': 0,
        'precision': [],
        'recall': [],
        'accuracy': []
    }

Detection = namedtuple("Detection", ["image_path", "gt", "pred"])
detector4 = face_detector.FaceDetector()
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('Running on device: {}'.format(device))
mtcnn = MTCNN(keep_all=True, device=device)
cnn_face_detector = dlib.cnn_face_detection_model_v1(
    'mmod_human_face_detector.dat')


def ellipse_to_bb(major_radius, minor_radius, xcenter, ycenter,
                  angle):  # Перевод отметки лица эллипсом в bounding box
    ux = minor_radius * cos(angle)
    uy = minor_radius * sin(angle)
    vx = major_radius * cos(angle + pi / 2)
    vy = major_radius * cos(angle + pi / 2)
    bbox_halfwidth = sqrt(ux * ux + vx * vx)
    bbox_halfheight = sqrt(uy * uy + vy * vy)
Example #7
0
 def test_detect_positive(self):
     photo = 'images/cap1.jpg'
     c = face_detector.FaceDetector()
     self.assertEqual(c.detect_face_from_file(photo), True)
Example #8
0
 def test_compare(self):
     src = 'images/cap1.jpg'
     tgt = 'images/Joe-Benczarski.jpg'
     c = face_detector.FaceDetector()
     self.assertEqual(c.compare_face_from_file(src, tgt), True)
Example #9
0
 def test_detect_negative3(self):
     photo = 'images/framepic1.jpg'
     c = face_detector.FaceDetector()
     self.assertEqual(c.detect_face_from_file(photo), False)
Example #10
0
def get_subdirectories(a_dir):
    return [
        name for name in os.listdir(a_dir)
        if os.path.isdir(os.path.join(a_dir, name))
    ]


def is_blurry(image_path, threshold=90):
    image = cv2.imread(image_path)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    return cv2.Laplacian(gray, cv2.CV_64F).var() < threshold


if __name__ == "__main__":

    detector = face_detector.FaceDetector(image_size=128)

    if not os.path.exists(faces_folder):
        os.makedirs(faces_folder)
    if not os.path.exists(blurry_folder):
        os.makedirs(blurry_folder)

    subdirs = get_subdirectories(raw_folder)

    for subdir in subdirs:
        faces_dir = faces_folder + "/" + subdir
        if not os.path.exists(faces_dir):
            os.makedirs(faces_dir)

        raw_dir = raw_folder + "/" + subdir
        files = os.listdir(raw_dir)
Example #11
0
def main(args):
    PREVIEW_WINDOW = 'preview'
    QUIT_KEY = 27  # Escape
    ANCHOR_KEY = ord(' ')
    WIDTH = 640
    THRESHOLD = 0.85

    face_detector = detector.FaceDetector(args.detect_multiple_faces)
    face_extractor = extractor.FaceExtractor(160, 44)
    face_embedder = embedder.FaceEmbedder(args.model)

    cv2.namedWindow(PREVIEW_WINDOW)
    vc = cv2.VideoCapture(0)

    doCapture = True
    anchor_embedding = None
    with tf.Session() as sess:
        while doCapture:
            rval, frame = vc.read()
            if frame is None:
                continue

            height = int(frame.shape[0] * WIDTH / frame.shape[1])
            frame = cv2.resize(frame, (WIDTH, height))  # resize the frame
            image = cv2.cvtColor(
                frame, cv2.COLOR_BGR2RGB)  # convert frame to RGB space

            # analysis
            boxes = face_detector.detect(image)
            faces = face_extractor.extract(image, boxes)
            embeddings = face_embedder.compute(sess, faces)

            # rendering of results
            for i, (x1, y1, x2, y2) in enumerate(boxes):
                if isinstance(anchor_embedding, np.ndarray):
                    distance = facenet.distance([embeddings[i]],
                                                [anchor_embedding])
                    if distance[0] <= THRESHOLD:
                        color = (0, 255, 0)
                    else:
                        color = (0, 0, 255)
                    frame = cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
                    frame = cv2.putText(frame,
                                        str("{:.6f}".format(distance[0])),
                                        (x1, y1 - 10),
                                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, color,
                                        1, cv2.LINE_AA)
                else:
                    frame = cv2.rectangle(frame, (x1, y1), (x2, y2),
                                          (0, 0, 255), 2)

            cv2.imshow(PREVIEW_WINDOW, frame)

            # key handling
            key = cv2.waitKey(1)
            if key == QUIT_KEY or cv2.getWindowProperty(
                    PREVIEW_WINDOW, cv2.WND_PROP_VISIBLE) < 1:
                doCapture = False
            if key == ANCHOR_KEY and len(embeddings) > 0:
                anchor_embedding = embeddings[0]

    cv2.destroyWindow(PREVIEW_WINDOW)
Example #12
0
import time as time
import sys
import cv2
import frame_creator

user_name = '1'
save_face = False
default_save_path = './dataset/2/'
data_set_path = './dataset/data_' + user_name + '.txt'
rgb_video_path = './dataset/rgb_' + user_name + '.avi'
depth_video_path = './dataset/depth_' + user_name + '.avi'

user_tracker = ut.UserTracker(rgb_video_path,
                              depth_video_path,
                              data_set_path)
face_detector = fd.FaceDetector()


begining_time = 0
user_counter = 0
while user_tracker.frame_exist():
    face_detected, faces = face_detector.get_faces(user_tracker.get_current_frame())
    if save_face and face_detected:
        for face in faces:
            user_counter += 1
            save_path = default_save_path + str(user_counter) + '.jpg'
            # print save_path
            cv2.imwrite(save_path, face)
            cv2.waitKey(1)
    # print time.time() - begining_time
    #user_tracker.show_frames()
Example #13
0
def recognition_thread(frame_buffer, person_id_queue, led1_event, led2_event,
                       stop):
    import face_recognition
    #     from tensorflow.keras.preprocessing.image import img_to_array

    detector = face_detector.FaceDetector()

    with open("models/trained_knn_model.clf", 'rb') as f:
        face_encodings_database = pickle.load(f)

    authorized_users = {}
    face_cnt = 1
    unknown_faces_save_cnt = FACES_SAVE_INTERVAL

    #    if LIVENESS_TEST:
    #        liveness_net, liveness_labels = liveness_detector_init()

    for class_dir in os.listdir("media/Faces/"):
        face = cv2.imread(
            os.path.join("media/Faces/", class_dir, "face_ID.jpg"))
        face = cv2.resize(face, (360, 480))
        f = open(os.path.join("media/Faces/", class_dir, "cardID.txt"), "r")
        ID = int(f.read())
        f.close()

        authorized_users[class_dir] = KnownPerson(user_name=class_dir,
                                                  user_id=ID,
                                                  face_image=face)

    print("Loaded ID for {} users".format(len(authorized_users)))

    fps = 0
    fps_max = 10
    t0 = time.time()

    while True:
        if stop.is_set():
            break

        if frame_buffer.empty():
            time.sleep(0.01)
            continue

        door_id, door_frame = frame_buffer.get()
        height, width, channels = door_frame.shape
        rgb_small_rgb = detector.get_rgb_image(door_frame)
        found_faces = detector.detect(rgb_small_rgb)

        now_date = datetime.now()
        faces_save_dir = 'Logs/{}_{}_{}_door_{}'.format(
            now_date.day, now_date.month, now_date.year, door_id)
        unknown_faces_save_dir = '{}/Unknown'.format(faces_save_dir)
        if not os.path.isdir(faces_save_dir):
            os.mkdir(faces_save_dir)
            os.mkdir(unknown_faces_save_dir)

        found_real_faces = []
        scale_x, scale_y = width / detector.inference_size[
            0], height / detector.inference_size[1]
        for obj in found_faces:
            bbox = obj.bbox
            x0, y0 = int(bbox.xmin), int(bbox.ymin)
            x1, y1 = int(bbox.xmax), int(bbox.ymax)
            w = x1 - x0
            h = y1 - y0
            x0 = int(x0 + w / 10)
            y0 = int(y0 + h / 4)
            x1 = int(x1 - w / 10)
            y1 = int(y1)
            someone_face = door_frame[int(y0 * scale_y):int(y1 * scale_y),
                                      int(x0 * scale_x):int(x1 * scale_x)]

            #            if LIVENESS_TEST:
            #                (label, prob) = predict_tftrt(liveness_net, face, liveness_labels)
            #            else:
            label = 'real'
            prob = 1.0

            if (label == 'real') & (prob > 0.85):
                found_real_faces.append((y0, x1, y1, x0))
                if unknown_faces_save_cnt <= 0:
                    if LOG_UNKNOWN_FACES:
                        cv2.imwrite(
                            "{}/{}_{}_{}_{}.jpg".format(
                                unknown_faces_save_dir, now_date.hour,
                                now_date.minute, now_date.second, face_cnt),
                            someone_face)
                        face_cnt += 1
        if unknown_faces_save_cnt <= 0:
            unknown_faces_save_cnt = FACES_SAVE_INTERVAL
        else:
            unknown_faces_save_cnt -= 1

        if found_real_faces:
            if door_id == 0:
                led1_event.set()
            elif door_id == 1:
                led2_event.set()

            face_encodings = face_recognition.face_encodings(
                rgb_small_rgb, known_face_locations=found_real_faces)
            closest_distances = face_encodings_database.kneighbors(
                face_encodings, n_neighbors=1)
            are_matches = [
                closest_distances[0][i][0] <= FACE_RECOGNITION_ALLOWED_DISTANCE
                for i in range(len(found_real_faces))
            ]

            for predicted_user, face_location, found in \
                    zip(face_encodings_database.predict(face_encodings), found_real_faces, are_matches):
                y0, x1, y1, x0 = face_location
                if found:
                    person_found = authorized_users.get(predicted_user)
                    if person_found is not None:
                        if authorized_users[predicted_user].save_cnt <= 0:
                            if LOG_KNOWN_USERS:
                                someone_face = door_frame[y0:y1, x0:x1]
                                user_path = '{}/{}'.format(
                                    faces_save_dir, predicted_user)
                                if not os.path.isdir(user_path):
                                    os.mkdir(user_path)
                                cv2.imwrite(
                                    "{}/{}_{}_{}_{}.jpg".format(
                                        user_path, now_date.hour,
                                        now_date.minute, now_date.second,
                                        face_cnt), someone_face)
                                face_cnt += 1
                                authorized_users[predicted_user][
                                    "save_cnt"] = FACES_SAVE_INTERVAL
                        else:
                            authorized_users[predicted_user].save_cnt -= 1

                        authorized_users[
                            predicted_user].last_seen = datetime.now()

                        if authorized_users[
                                predicted_user].first_seen != datetime(
                                    1, 1, 1):
                            authorized_users[predicted_user].seen_frames += 1
                            if datetime.now() - authorized_users[predicted_user].first_seen_this_interaction > \
                                    timedelta(minutes=5):
                                authorized_users[
                                    predicted_user].first_seen_this_interaction = datetime.now(
                                    )
                                authorized_users[
                                    predicted_user].seen_frames = 0
                        else:
                            authorized_users[
                                predicted_user].first_seen = datetime.now()
                            authorized_users[
                                predicted_user].first_seen_this_interaction = datetime.now(
                                )
                else:
                    if unknown_faces_save_cnt <= 0:
                        if LOG_UNKNOWN_FACES:
                            someone_face = door_frame[y0:y1, x0:x1]
                            cv2.imwrite(
                                "{}/{}_{}_{}_{}.jpg".format(
                                    unknown_faces_save_dir, now_date.hour,
                                    now_date.minute, now_date.second,
                                    face_cnt), someone_face)
                            face_cnt += 1

            if unknown_faces_save_cnt <= 0:
                unknown_faces_save_cnt = FACES_SAVE_INTERVAL
            else:
                unknown_faces_save_cnt -= 1

        visitors_data = []
        for known_user in authorized_users:
            if datetime.now(
            ) - authorized_users[known_user].last_seen > timedelta(
                    seconds=ROBUST_SEEN_INTERVAL):
                authorized_users[known_user].seen_frames = 0
            if authorized_users[known_user].seen_frames > ROBUST_SEEN_TIMES:
                visitors_data.append((authorized_users[known_user].name,
                                      authorized_users[known_user].user_id,
                                      authorized_users[known_user].last_seen))
        if len(visitors_data) > 0:
            if person_id_queue.empty():
                person_id_queue.put((door_id, visitors_data))

        if fps == fps_max:
            print('Recognition done in {}ms (average)'.format(
                (time.time() - t0) * 1000 / fps))
            t0 = time.time()
            fps = 0
        else:
            fps += 1