コード例 #1
0
def index_to_name(ind):
    names=['goutham','lokesh','milind','nishant','pranjal','sumanth']
    return names[ind]
def _update_attendance(attendance_list):
    '''
    Updates attendance

    Args:
    attend

    '''
    with open(args.attendance_file,"w") as f:
        attendance_list=[a+"\n" for a in attendance_list]
        f.writelines(attendance_list)

face_detector = FaceDetector(PATH_TO_CKPT=detect_ckpt)
face_recognition = FaceRecognition(PATH_TO_CKPT=recog_ckpt)
face_classfier = FaceClassifier(args.trained_classifier)
face_classfier_svm=FaceClassifier('./classifier/trained_svm.pkl')
face_classfier_knn7=FaceClassifier('./classifier/knn_7.pkl')
face_classfier_knn7=FaceClassifier('./classifier/knn_5.pkl')
face_classfier_rf=FaceClassifier('./classifier/random_forests.pkl')

#video_capture = cv2.VideoCapture(args.camera)
video_capture = cv2.VideoCapture(0)
print('Start Recognition!')
prevTime = 0
count=0
attendance_list=[]
skip=0
while True:
コード例 #2
0
import cv2
import time
import numpy as np
from detection.FaceDetector import FaceDetector
from recognition.FaceRecognition import FaceRecognition
from classifier.FaceClassifier import FaceClassifier

face_detector = FaceDetector()
face_recognition = FaceRecognition()
face_classfier = FaceClassifier('./classifier/trained_classifier.pkl')
video_capture = cv2.VideoCapture(0)

print('Start Recognition!')
prevTime = 0
while True:
    ret, frame = video_capture.read()
    frame = cv2.resize(frame, (0, 0), fx=1, fy=1)  # resize frame (optional)
    #time.sleep(1)
    curTime = time.time()  # calc fps
    find_results = []

    frame = frame[:, :, 0:3]
    boxes, scores = face_detector.detect(frame)
    face_boxes = boxes[np.argwhere(scores>0.3).reshape(-1)]
    face_scores = scores[np.argwhere(scores>0.3).reshape(-1)]
    print('Detected_FaceNum: %d' % len(face_boxes))

    if len(face_boxes) > 0:
        for i in range(len(face_boxes)):
            box = face_boxes[i]
            cropped_face = frame[box[0]:box[2], box[1]:box[3], :]
from detection.FaceDetector import FaceDetector
from recognition.FaceRecognition import FaceRecognition
from classifier.FaceClassifier import FaceClassifier


if os.name == 'nt':
      SPERATOR = '\\'
else:
    SPERATOR = '/'

BASE_DIR = os.path.dirname(__file__) + SPERATOR

PROFILE_ROUND = '.4f'
FACE_DETEC_THRESHOLD = 0.5

face_detector = FaceDetector()
face_recognition = FaceRecognition()


class Face:

    def __init__(self, rep, identity):
        self.rep = rep
        self.identity = identity

    def __repr__(self):
        return "{{id: {}, rep[0:10]: {}}}".format(
            str(self.identity),
            self.rep[0:10],
        )
コード例 #4
0
def detect_faces():
    global people_folders, vector_size, embeddings, embeddings_ids, number_imgs_list

    from embeddings.FaceEmbeddings import FaceEmbeddings
    from detection.FaceDetector import FaceDetector

    face_detector = FaceDetector()
    face_recognition = FaceEmbeddings()
    vector_size = face_recognition.get_embedding_size()

    print("\nExecutando deteccao facial")

    if people_folders is None:
        people_folders = [f.path for f in scandir(input_folder) if f.is_dir()]
    assert len(people_folders) >= 1

    prog_bar = tqdm(total=len(people_folders), desc="Detectando", position=0, unit="pessoas")

    for person_path in people_folders:

        person_name = path.basename(person_path)
        person_imgs_path = [f.path for f in scandir(person_path) if f.is_file()]

        # if output_folder is not None:
        #     curr_output = path.join(output_folder, person_name)
        #     makedirs(curr_output, exist_ok=True)

        number_imgs_list.append([person_name, len(person_imgs_path), 0])

        for i, img_path in enumerate(person_imgs_path):
            try:
                img = Image.open(img_path)
            except OSError or IOError:
                tqdm.write('Open image file failed: ' + img_path)
                number_imgs_list[-1][-1] += 1
                continue

            if img is None:
                tqdm.write('Open image file failed: ' + img_path)
                number_imgs_list[-1][-1] += 1
                continue

            image_torch, score = face_detector.extract_face(img)
            # image_torch, score = face_detector.extract_face(img, save_path=path.join(curr_output, str(i) + "a.jpg"))

            if image_torch is None or score < 0.5:
                tqdm.write(f'No face found in {img_path}')
                if score is not None:
                    tqdm.write(f'(Score: {score}')
                number_imgs_list[-1][-1] += 1
                continue

            embeddings.append(face_recognition.describe(image_torch))
            embeddings_ids.append([str(person_name), i])
        prog_bar.update(1)
    prog_bar.close()

    vector_size = face_recognition.get_embedding_size()
    embeddings_to_df()
    people_to_df()

    del people_folders
コード例 #5
0
import cv2
import numpy as np
from recognition.FaceRecognition import FaceRecognition
from detection.FaceDetector import FaceDetector

face_detector = FaceDetector()
face_recognition = FaceRecognition()
image_files = ['./media/1.jpg', './media/2.jpg']
for input_str in image_files:

    img = cv2.imread(input_str)
    boxes, scores = face_detector.detect(img)
    face_boxes = boxes[np.argwhere(scores > 0.5).squeeze()]
    print('Number of face in image:', len(face_boxes))
    for box in face_boxes:
        cropped_face = img[box[0]:box[2], box[1]:box[3], :]
        cropped_face = cv2.resize(cropped_face, (160, 160),
                                  interpolation=cv2.INTER_AREA)

        print('Face descriptor:')
        print(face_recognition.recognize(cropped_face), '\n')
        cv2.imshow('image', cropped_face)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
コード例 #6
0
def main(args):
    face_detector = FaceDetector()
    face_recognition = FaceRecognition(args.model)
    face_classfier = FaceClassifier(args.classifier_filename)
    video_capture = cv2.VideoCapture(args.video_input)
    output_file = './media/result/' + os.path.basename(
        args.video_input) + '_result.avi'

    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter(
        output_file, fourcc, 24.0,
        (int(video_capture.get(3)), int(video_capture.get(4))))

    print('Start Recognition!')
    prevTime = 0
    while video_capture.isOpened():
        ret, frame = video_capture.read()

        curTime = time.time()  # calc fps
        find_results = []

        frame = frame[:, :, 0:3]
        boxes, scores = face_detector.detect(frame)
        face_boxes = boxes[np.argwhere(scores > 0.3).reshape(-1)]
        face_scores = scores[np.argwhere(scores > 0.3).reshape(-1)]
        print('Detected_FaceNum: %d' % len(face_boxes))

        if len(face_boxes) > 0:
            for i in range(len(face_boxes)):
                box = face_boxes[i]
                cropped_face = frame[box[0]:box[2], box[1]:box[3], :]
                cropped_face = cv2.resize(cropped_face, (160, 160),
                                          interpolation=cv2.INTER_AREA)
                feature = face_recognition.recognize(cropped_face)
                name = face_classfier.classify(feature)

                cv2.rectangle(frame, (box[1], box[0]), (box[3], box[2]),
                              (0, 255, 0), 2)

                # plot result idx under box
                text_x = box[1]
                text_y = box[2] + 20
                cv2.putText(frame,
                            name, (text_x, text_y),
                            cv2.FONT_HERSHEY_COMPLEX_SMALL,
                            1, (0, 0, 255),
                            thickness=1,
                            lineType=2)
        else:
            print('Unable to align')

        sec = curTime - prevTime
        prevTime = curTime
        fps = 1 / (sec)
        str = 'FPS: %2.3f' % fps
        text_fps_x = len(frame[0]) - 150
        text_fps_y = 20
        cv2.putText(frame,
                    str, (text_fps_x, text_fps_y),
                    cv2.FONT_HERSHEY_COMPLEX_SMALL,
                    1, (0, 0, 0),
                    thickness=1,
                    lineType=2)

        out.write(frame)

    video_capture.release()
    out.release()
    cv2.destroyAllWindows()
コード例 #7
0
import time

import cv2
import numpy as np

from classifier.FaceClassifier import FaceClassifier
from detection.FaceDetector import FaceDetector
from embeddings.FaceEmbeddings import FaceEmbeddings

model = input("Choose a model> ")
# model = "knn"

face_detector = FaceDetector()
face_recognition = FaceEmbeddings()
face_classifier = FaceClassifier(
    f'./classifier/{model.lower()}_classifier.pkl')

video_capture = cv2.VideoCapture(0)
prevTime = 0
cv2.namedWindow("Video", cv2.WINDOW_NORMAL)

print('Start Recognition!')
while True:

    ret, frame = video_capture.read()
    # if not ret:
    #     break

    frame = cv2.resize(frame, (0, 0), fx=1, fy=1)  # resize frame (optional)
    curTime = time.time()  # calc fps