コード例 #1
0
def smart_move(key):

  relayer.command(key) 
  face_finder = FaceFinder()
  
  for _ in range(0, COUNTS):
    if face_finder.face_detected(): relayer.command("move arms")
    face_finder.show(update = False)
コード例 #2
0
    def __init__(self, recognizer_path, retina=False, on_gpu=False, emotions=False):
        self.on_gpu = on_gpu

        if retina:
            self.finder = FaceFinder(on_gpu=on_gpu)
        else:
            self.finder = None

        if emotions:
            self.emotions = Emanalisis(on_gpu=on_gpu,path_to_classifier="net_714.pth", finder=self.finder)
        else:
            self.emotions = None

        self.recog = Recognizer(finder=self.finder)
        self.recog.load_model(recognizer_path)
        self.clust = Clusterizer(samples=5)
コード例 #3
0
ファイル: rofl.py プロジェクト: kayatovsky/face-recognition
    def __init__(self,
                 recognizer_path,
                 retina=False,
                 on_gpu=False,
                 emotions=False,
                 confidence_threshold=0.02,
                 top_k=5000,
                 nms_threshold=0.4,
                 keep_top_k=750,
                 vis_thres=0.6,
                 network='resnet50',
                 distance_threshold=0.4,
                 samples=5,
                 eps=0.3):
        self.on_gpu = on_gpu

        if retina:
            self.finder = FaceFinder(on_gpu=on_gpu,
                                     confidence_threshold=confidence_threshold,
                                     top_k=top_k,
                                     nms_threshold=nms_threshold,
                                     keep_top_k=keep_top_k,
                                     vis_thres=vis_thres,
                                     network=network)
        else:
            self.finder = None

        if emotions:
            self.emotions = Emanalisis(on_gpu=on_gpu,
                                       path_to_classifier="net_714.pth",
                                       finder=self.finder)
        else:
            self.emotions = None

        self.recognizer_retrained = True
        self.recog = Recognizer(finder=self.finder,
                                distance_threshold=distance_threshold)
        self.recog.load_model(recognizer_path)
        self.clust = Clusterizer(samples=samples, eps=eps)
        self.em_labels = [
            'ANGRY', 'DISGUST', 'FEAR', 'HAPPY', 'SAD', 'SURPRISE', 'NEUTRAL'
        ]
コード例 #4
0
def smart_camera():
  COUNT = 15
  face_finder = FaceFinder()
  for _ in range(COUNT):
    face_finder.show()
  face_finder.shutdown()
  responder.default()
コード例 #5
0
class InvaderDetectorWithFace:
    def __init__(self):
        self.face_finder = FaceFinder()
        self.recognizer = FaceRecognizer()
        self.recognizer.load('trainer.yml')
        self.security_trigger = Trigger(20, lambda similarity: similarity > 80)

    def on_enabled_start(self):
        pass

    def on_disabled_update(self):
        pass

    def detect(self, frame):
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        faces = self.face_finder.find(gray)

        best_face = None
        best_confidence = 1000
        for coordinates in faces:
            region_of_interest = get_region_of_interest(gray, coordinates)
            id_, conf = self.recognizer.predict(region_of_interest)

            if conf < best_confidence:
                best_face = region_of_interest
                best_confidence = conf

            print('{}, {}, {}'.format(datetime.now(), id_, conf))

            # save_region_of_interest(gray, coordinates)
            self.highlight_face(frame, coordinates)

        if best_face is not None:
            if self.security_trigger.update(best_confidence):
                print('Face not match!')

                return True

        return False

    def highlight_face(self, image, coordinates, color=(0, 0, 255)):
        x, y, w, h = coordinates
        x_end = x + w
        y_end = y + h
        stroke = 2
        cv2.rectangle(image, (x, y), (x_end, y_end), color, stroke)
コード例 #6
0
def main():
    # Components
    render_loop = RenderLoop()
    video_feed = VideoFeed()
    face_finder = FaceFinder()
    ad_feed = AdFeed()

    video_feed.add_listener(face_finder) # face finder listens to video feed frames
    ad_feed.add_listener(render_loop)  # render loop listens to advertise feed
    face_finder.add_listener(ad_feed) # ad feed listens to face_finder
 
    video_feed.daemon = True
    face_finder.daemon = True
    ad_feed.daemon = True
    render_loop.daemon = True
 
    # start threads
    face_finder.start()
    video_feed.start()
    ad_feed.start()

    render_loop.run()
コード例 #7
0
from gpiozero import MotionSensor
from robot_modules import Listener, Responder, Relayer, Directive
from face_finder import FaceFinder
from helper import get_response

COUNTS = 1000
TRIGGER_WORD = "super"
PIR_PIN = 40

directive = Directive(TRIGGER_WORD)
face_finder = FaceFinder()
pir = MotionSensor(PIR_PIN)
listener = Listener()  # listens to microphone and outputs text
responder = Responder()  # plays video on screen
relayer = Relayer(
)  # communicates commands to arduino, gets feedback from arduino


def smart_camera():

    face_finder.start()

    for _ in range(0, COUNTS):

        face_finder.run()
        face_finder.show()

        phrase = listen()

        if "cheese" in phrase:
            face_finder.run()
コード例 #8
0
 def __init__(self):
     self.face_finder = FaceFinder()
     self.recognizer = FaceRecognizer()
     self.recognizer.load('trainer.yml')
     self.security_trigger = Trigger(20, lambda similarity: similarity > 80)
コード例 #9
0
ファイル: rofl.py プロジェクト: kayatovsky/face-recognition
class ROFL:
    def __init__(self,
                 recognizer_path,
                 retina=False,
                 on_gpu=False,
                 emotions=False,
                 confidence_threshold=0.02,
                 top_k=5000,
                 nms_threshold=0.4,
                 keep_top_k=750,
                 vis_thres=0.6,
                 network='resnet50',
                 distance_threshold=0.4,
                 samples=5,
                 eps=0.3):
        self.on_gpu = on_gpu

        if retina:
            self.finder = FaceFinder(on_gpu=on_gpu,
                                     confidence_threshold=confidence_threshold,
                                     top_k=top_k,
                                     nms_threshold=nms_threshold,
                                     keep_top_k=keep_top_k,
                                     vis_thres=vis_thres,
                                     network=network)
        else:
            self.finder = None

        if emotions:
            self.emotions = Emanalisis(on_gpu=on_gpu,
                                       path_to_classifier="net_714.pth",
                                       finder=self.finder)
        else:
            self.emotions = None

        self.recognizer_retrained = True
        self.recog = Recognizer(finder=self.finder,
                                distance_threshold=distance_threshold)
        self.recog.load_model(recognizer_path)
        self.clust = Clusterizer(samples=samples, eps=eps)
        self.em_labels = [
            'ANGRY', 'DISGUST', 'FEAR', 'HAPPY', 'SAD', 'SURPRISE', 'NEUTRAL'
        ]

    def load_video(self, video, fps_factor):
        """load video for analysis.
        :param video - string, name of the file
        :param fps_factor - int/float, which fps output will be, mainly used for lowering amount of frames taken to analyse
        :returns array of images of corresponding fps"""

        cap = cv2.VideoCapture(video)
        # fps = cap.get(cv2.CAP_PROP_FPS)
        # cap = cv2.VideoCapture(0)
        ret, frame = cap.read()
        # t = time.time()
        ret = True
        # os.chdir(r"frames")
        out_arr = []
        i = 0
        while ret:
            ret, frame = cap.read()
            if i % fps_factor == 0:
                # t = time.time()
                out_arr.append(frame)
                # cv2.imwrite("frame " + str(count_frames) + ".jpg", frame)
            i += 1
        return np.asarray(out_arr), cap.get(cv2.CAP_PROP_FPS)

    def analyse(self,
                img_arr,
                recognize=False,
                emotions=False,
                one_array=False):
        face_predictions = []
        em_predictions = []
        i = 1
        for img in img_arr:
            if i == 2:
                t = time.time()
            face_loc = self.finder.detect_faces(img)
            if recognize:
                face_predictions.append(
                    self.recog.predict(img, X_face_locations=face_loc))
            if emotions:
                em_predictions.append(
                    self.emotions.classify_emotions(img,
                                                    face_locations=face_loc))
            if i == 2:
                t = (time.time() - t) * len(img_arr)
                m = t // 60
                s = t % 60
                print("Approximately " + str(m) + " minutes and " + str(s) +
                      " seconds to make predictions")
            print(str(i / len(img_arr) * 100) + "% of video is done")
            i += 1
        if one_array:
            out_array = []
            if recognize and emotions:
                for em, face in zip(em_predictions, face_predictions):
                    buf = []
                    for e, f in zip(em, face):
                        buf.append(
                            (e[1], self.em_labels[np.argmax(e[0])], f[0]))
                    out_array.append(buf)
            elif recognize:
                for face in face_predictions:
                    buf = []
                    for f in face:
                        buf.append((f[1], None, f[0]))
                    out_array.append(buf)
            elif emotions:
                for em in em_predictions:
                    buf = []
                    for e in em:
                        buf.append(
                            (e[1], self.em_labels[np.argmax(e[0])], None))
                    out_array.append(buf)
            return out_array

        return face_predictions, em_predictions

    def find_emotions(self, img_arr):
        predictions = []
        i = 1
        for img in img_arr:
            if i == 2:
                t = time.time()
            predictions.append(self.emotions.classify_emotions(img))
            if i == 2:
                t = (time.time() - t) * len(img_arr)
                m = t // 60
                s = t % 60
                print("Approximately " + str(m) + " minutes and " + str(s) +
                      " seconds to find faces")
            print(str(i / len(img_arr) * 100) + "% of video is done")
            i += 1
        return predictions

    def basic_run(self,
                  in_dir,
                  filename,
                  fps_factor=1,
                  recognize=False,
                  remember=False,
                  emotions=False):
        orig_img_arr, orig_fps = self.load_video(in_dir + "/" + filename,
                                                 fps_factor)
        new_fps = orig_fps / fps_factor

        face_predictions, em_predictions = self.analyse(orig_img_arr,
                                                        recognize=recognize,
                                                        emotions=emotions)

        if recognize:
            img_arr = video_maker.boxes(orig_img_arr,
                                        predictions=face_predictions,
                                        headcount=True,
                                        faces_on=recognize)
        if emotions:
            img_arr = video_maker.emotion_boxes(orig_img_arr,
                                                em_predictions,
                                                headcount=True,
                                                faces_on=recognize)

        filename = video_maker.render("video_output", filename, img_arr,
                                      new_fps)

        if remember and recognize:
            for img, pred in zip(orig_img_arr, face_predictions):
                for name, (top, right, bottom, left) in pred:
                    if name == "unknown":
                        # save_img = cv2.cvtColor(img[top:bottom, right:left], cv2.COLOR_BGR2RGB)
                        save_img = img[top:bottom, left:right]
                        # cv2.imshow("Haha", save_img)
                        # cv2.waitKey(0)
                        cv2.imwrite(
                            "./strangers/" +
                            datetime.datetime.now().strftime("%d%m%Y%H%M%S%f")
                            + ".jpg", save_img)

            encode.encode_cluster_sf("./strangers", "./enc_cluster.pickle")
            self.clust.remember_strangers("./enc_cluster.pickle",
                                          "./known_faces")
        return filename

    def json_run(self,
                 in_dir,
                 filename,
                 fps_factor=1,
                 recognize=False,
                 remember=False,
                 emotions=False):
        orig_img_arr, orig_fps = self.load_video(in_dir + "/" + filename,
                                                 fps_factor)
        new_fps = orig_fps / fps_factor

        array = self.analyse(orig_img_arr,
                             recognize=recognize,
                             emotions=emotions,
                             one_array=True)

        # if recognize:
        #     img_arr = video_maker.boxes(orig_img_arr, predictions=face_predictions, headcount=True, faces_on=recognize)
        # if emotions:
        #     img_arr = video_maker.emotion_boxes(orig_img_arr, em_predictions, headcount=True, faces_on=recognize)

        recording = {
            "name": filename,
            "fps": new_fps,
            "config": {
                "confidence_threshold": self.finder.confidence_threshold,
                "top_k": self.finder.top_k,
                "nms_threshold": self.finder.nms_threshold,
                "keep_top_k": self.finder.keep_top_k,
                "vis_thres": self.finder.vis_thres,
                "network": self.finder.network,
                "distance_threshold": self.recog.distance_threshold,
                "samples": self.clust.clt.min_samples,
                "eps": self.clust.clt.eps,
                "fps_factor": fps_factor
            },
            "frames": array
        }

        with open('recordings/' + filename.split('.')[0] + '.json', 'w') as f:
            json.dump(recording, f)

        if remember and recognize:
            for img, pred in zip(orig_img_arr, array):
                for (top, right, bottom, left), em, name in pred:
                    if name == "unknown":
                        # save_img = cv2.cvtColor(img[top:bottom, right:left], cv2.COLOR_BGR2RGB)
                        save_img = img[top:bottom, left:right]
                        # cv2.imshow("Haha", save_img)
                        # cv2.waitKey(0)
                        cv2.imwrite(
                            "./strangers/" +
                            datetime.datetime.now().strftime("%d%m%Y%H%M%S%f")
                            + ".jpg", save_img)

            encode.encode_cluster_sf("./strangers", "./enc_cluster.pickle")
            self.clust.remember_strangers("./enc_cluster.pickle",
                                          "./known_faces")
        return recording

    async def async_run(self,
                        loop,
                        in_dir,
                        filename,
                        fps_factor=1,
                        recognize=False,
                        remember=False,
                        emotions=False):
        orig_img_arr, orig_fps = await loop.run_in_executor(
            None, self.load_video, in_dir + "/" + filename, fps_factor)
        # img_arr, orig_fps = self.load_video(in_dir + "/" + filename, fps_factor)
        new_fps = orig_fps / fps_factor
        face_predictions, em_predictions = await loop.run_in_executor(
            None, self.analyse, in_dir, filename, fps_factor, recognize,
            remember, emotions)
        # face_predictions, em_predictions = self.analyse(img_arr, recognize=recognize, emotions=emotions)

        img_arr = video_maker.boxes(orig_img_arr,
                                    predictions=face_predictions,
                                    headcount=True,
                                    faces_on=recognize)

        filename = video_maker.render("video_output", filename, img_arr,
                                      new_fps)

        if remember:
            for img, pred in zip(img_arr, face_predictions):
                for name, (top, right, bottom, left) in pred:
                    if name == "unknown":
                        # save_img = cv2.cvtColor(img[top:bottom, right:left], cv2.COLOR_BGR2RGB)
                        save_img = img[top:bottom, left:right]
                        # cv2.imshow("Haha", save_img)
                        # cv2.waitKey(0)
                        cv2.imwrite(
                            "./strangers/" +
                            datetime.datetime.now().strftime("%d%m%Y%H%M%S%f")
                            + ".jpg", save_img)

            # encode.encode_cluster("./strangers", "./enc_cluster.pickle")
            await loop.run_in_executor(None, encode.encode_cluster_sf,
                                       "./strangers", "./enc_cluster.pickle")
            await loop.run_in_executor(None, self.clust.remember_strangers,
                                       "./enc_cluster.pickle", "./known_faces")
            # self.clust.remember_strangers("./enc_cluster.pickle", "./known_faces")

        return filename

    def run_from_queue(self,
                       fps_factor=1,
                       recognize=False,
                       remember=False,
                       emotions=False):

        f = open("queue.txt")
        q = [line.strip() for line in f]
        filename = None
        if len(q) > 0:
            filename = self.basic_run("queue",
                                      q[0].replace("\n", "").split("/")[1],
                                      fps_factor=fps_factor,
                                      emotions=emotions,
                                      recognize=recognize,
                                      remember=remember)
            os.remove(q[0])
            q.remove(q[0])

        f.close()
        if len(q) > 0:
            f = open("queue.txt", "w")
            for line in q:
                f.write(line + "\n")
            f.close()
        else:
            f = open("queue.txt", "w")
            f.close()
        return filename

    # async def async_load_video(self, video, fps_factor):    # Хз вообще, попробую TODO try to speed-up video loading
    #     pass

    async def async_run_from_queue(self,
                                   loop,
                                   fps_factor=1,
                                   recognize=False,
                                   remember=False,
                                   emotions=False):
        f = open("queue.txt")
        q = [line.strip() for line in f]
        filename = None
        if len(q) > 0:
            filename = await self.async_run(loop,
                                            "queue",
                                            q[0].replace("\n",
                                                         "").split("/")[1],
                                            fps_factor=fps_factor,
                                            emotions=emotions,
                                            recognize=recognize,
                                            remember=remember)
            os.remove(q[0])
            q.remove(q[0])

        f.close()
        if len(q) > 0:
            f = open("queue.txt", "w")
            for line in q:
                f.write(line + "\n")
            f.close()
        else:
            f = open("queue.txt", "w")
            f.close()
        return filename

    def update_queue(self, filename):
        f = open("queue.txt", "a")
        f.write(filename + "\n")
        f.close()

    def add_person(self, name, filename=None):
        os.mkdir('known_faces/' + name)
        if filename is not None:
            shutil.move(filename,
                        "known_faces/" + name + "/" + filename.split('/')[-1])
            self.recognizer_retrained = False

    def add_pics(self, name, filenames):
        for file in filenames:
            shutil.move(file,
                        "known_faces/" + name + "/" + file.split('/')[-1])
        self.recognizer_retrained = False
コード例 #10
0
from face_finder import FaceFinder
import os

FACE = "sudo fbi -T 1 -d /dev/fb0 -a -noverbose /home/pi/hellobot/images/default-eye.jpg"

face_finder = FaceFinder()

for _ in range(100):
    face_finder.show()

face_finder.shutdown()
os.system(FACE)
コード例 #11
0
def smart_camera():
    COUNT = 50
    face_finder = FaceFinder()
    for _ in range(COUNT):
        face_finder.show()
    face_finder.shutdown()
コード例 #12
0

if __name__ == "__main__":
    if len(sys.argv) > 2:
        action = sys.argv[1]
        target_name = sys.argv[2]
    else:
        print(
            "An action [add_images (init), train] and a target_name are both requierd"
        )

    if action == "add_images" or action == "init":
        img_urls = Har(target_name).get_media_urls('jpg')
        pool = Pool(processes=6)
        images_data = pool.map(request_img_data, img_urls)
        face_finder = FaceFinder(target_name)

        for i in range(len(images_data)):
            face_finder.save_face_from_data("{}.jpg".format(i), images_data[i])

        print(
            "Target images added for {}. Please double check that all these faces are in fact your target"
            .format(target_name))

    elif action == "train":
        print("training on {}".format(target_name))
        face_finder = FaceFinder(target_name)
        face_finder.learn_target()

    elif action == "scan_for":
        if len(sys.argv) < 3:
コード例 #13
0
class ROFL:

    def __init__(self, recognizer_path, retina=False, on_gpu=False, emotions=False):
        self.on_gpu = on_gpu

        if retina:
            self.finder = FaceFinder(on_gpu=on_gpu)
        else:
            self.finder = None

        if emotions:
            self.emotions = Emanalisis(on_gpu=on_gpu,path_to_classifier="net_714.pth", finder=self.finder)
        else:
            self.emotions = None

        self.recog = Recognizer(finder=self.finder)
        self.recog.load_model(recognizer_path)
        self.clust = Clusterizer(samples=5)

    def load_video(self, video, fps_factor):
        """load video for analysis.
        :param video - string, name of the file
        :param fps_factor - int/float, which fps output will be, mainly used for lowering amount of frames taken to analyse
        :returns array of images of corresponding fps"""

        cap = cv2.VideoCapture(video)
        # fps = cap.get(cv2.CAP_PROP_FPS)
        # cap = cv2.VideoCapture(0)
        ret, frame = cap.read()
        # t = time.time()
        ret = True
        # os.chdir(r"frames")
        out_arr = []
        i = 0
        while ret:
            ret, frame = cap.read()
            if i % fps_factor == 0:
                # t = time.time()
                out_arr.append(frame)
                # cv2.imwrite("frame " + str(count_frames) + ".jpg", frame)
            i += 1
        return np.asarray(out_arr), cap.get(cv2.CAP_PROP_FPS)

    def analyse(self, img_arr, recognize=False, emotions=False):
        face_predictions = []
        em_predictions = []
        i = 1
        for img in img_arr:
            if i == 2:
                t = time.time()
            face_loc = self.finder.detect_faces(img)
            if recognize:
                face_predictions.append(self.recog.predict(img, distance_threshold=0.5, X_face_locations=face_loc))
            if emotions:
                em_predictions.append(self.emotions.classify_emotions(img, face_locations=face_loc))
            if i == 2:
                t = (time.time() - t) * len(img_arr)
                m = t // 60
                s = t % 60
                print("Approximately " + str(m) + " minutes and " + str(s) + " seconds to make predictions")
            print(str(i / len(img_arr) * 100) + "% of video is done")
            i += 1
        return face_predictions, em_predictions

    def find_emotions(self, img_arr):
        predictions = []
        i = 1
        for img in img_arr:
            if i == 2:
                t = time.time()
            predictions.append(self.emotions.classify_emotions(img))
            if i == 2:
                t = (time.time() - t) * len(img_arr)
                m = t // 60
                s = t % 60
                print("Approximately " + str(m) + " minutes and " + str(s) + " seconds to find faces")
            print(str(i / len(img_arr) * 100) + "% of video is done")
            i += 1
        return predictions

    def basic_run(self, in_dir, filename, fps_factor=1, recognize=False, remember=False, emotions=False):
        img_arr, orig_fps = self.load_video(in_dir + "/" + filename, fps_factor)
        new_fps = orig_fps / fps_factor

        face_predictions, em_predictions = self.analyse(img_arr, recognize=recognize, emotions=emotions)

        if recognize:
            img_arr = video_maker.boxes(img_arr, predictions=face_predictions, headcount=True, faces_on=recognize)
        if emotions:
            img_arr = video_maker.emotion_boxes(img_arr, em_predictions, headcount=True, faces_on=recognize)

        filename = video_maker.render("video_output", filename, img_arr, new_fps)

        if remember:
            for img, pred in zip(img_arr, face_predictions):
                for name, (top, right, bottom, left) in pred:
                    if name == "unknown":
                        # save_img = cv2.cvtColor(img[top:bottom, right:left], cv2.COLOR_BGR2RGB)
                        save_img = img[top:bottom, left:right]
                        # cv2.imshow("Haha", save_img)
                        # cv2.waitKey(0)
                        cv2.imwrite("./strangers/" + datetime.datetime.now().strftime("%d%m%Y%H%M%S%f") + ".jpg",
                                    save_img)

            encode.encode_cluster("./strangers", "./enc_cluster.pickle")
            self.clust.remember_strangers("./enc_cluster.pickle", "./known_faces")

        return filename

    async def async_run(self, loop, in_dir, filename, fps_factor=1, recognize=False, remember=False, emotions=False):
        img_arr, orig_fps = await loop.run_in_executor(None, self.load_video, in_dir + "/" + filename, fps_factor)
        # img_arr, orig_fps = self.load_video(in_dir + "/" + filename, fps_factor)
        new_fps = orig_fps / fps_factor
        face_predictions, em_predictions = await loop.run_in_executor(None, self.analyse, in_dir, filename,
                                                                      fps_factor, recognize, remember, emotions)
        # face_predictions, em_predictions = self.analyse(img_arr, recognize=recognize, emotions=emotions)

        img_arr = video_maker.boxes(img_arr, predictions=face_predictions, headcount=True, faces_on=recognize,
                                    emotions_lapse=em_predictions)
        filename = video_maker.render("video_output", filename, img_arr, new_fps)

        if remember:
            for img, pred in zip(img_arr, face_predictions):
                for name, (top, right, bottom, left) in pred:
                    if name == "unknown":
                        # save_img = cv2.cvtColor(img[top:bottom, right:left], cv2.COLOR_BGR2RGB)
                        save_img = img[top:bottom, left:right]
                        # cv2.imshow("Haha", save_img)
                        # cv2.waitKey(0)
                        cv2.imwrite("./strangers/" + datetime.datetime.now().strftime("%d%m%Y%H%M%S%f") + ".jpg",
                                    save_img)

            # encode.encode_cluster("./strangers", "./enc_cluster.pickle")
            await loop.run_in_executor(None, encode.encode_cluster, "./strangers", "./enc_cluster.pickle")
            await loop.run_in_executor(None, self.clust.remember_strangers, "./enc_cluster.pickle", "./known_faces")
            # self.clust.remember_strangers("./enc_cluster.pickle", "./known_faces")

        return filename

    # def run_and_remember_strangers(self, in_dir, filename, fps_factor):
    #     img_arr, orig_fps = self.load_video(in_dir + "/" + filename, fps_factor)
    #     new_fps = orig_fps / fps_factor
    #     predictions = self.analyse(img_arr)
    #
    #     for img, pred in zip(img_arr, predictions):
    #         for name, (top, right, bottom, left) in pred:
    #             if name == "unknown":
    #                 # save_img = cv2.cvtColor(img[top:bottom, right:left], cv2.COLOR_BGR2RGB)
    #                 save_img = img[top:bottom, left:right]
    #                 # cv2.imshow("Haha", save_img)
    #                 # cv2.waitKey(0)
    #                 cv2.imwrite("./strangers/" + datetime.datetime.now().strftime("%d%m%Y%H%M%S%f") + ".jpg",
    #                             save_img)
    #
    #     encode.encode_cluster("./strangers", "./enc_cluster.pickle")
    #
    #     img_arr = video_maker.boxes(img_arr, predictions)
    #     filename = video_maker.render("video_output", filename, img_arr, new_fps)
    #     self.clust.remember_strangers("./enc_cluster.pickle", "./known_faces")
    #     return filename
    #
    # def prerun(self, in_dir, filename, fps_factor=1):
    #     img_arr, orig_fps = self.load_video(in_dir + "/" + filename, fps_factor)
    #     new_fps = orig_fps / fps_factor
    #     predictions = self.analyse(img_arr)
    #     return predictions, img_arr, new_fps
    #
    # def post_run(self, filename, predictions, img_arr, new_fps):
    #     for img, pred in zip(img_arr, predictions):
    #         for name, (top, right, bottom, left) in pred:
    #             if name == "unknown":
    #                 # save_img = cv2.cvtColor(img[top:bottom, right:left], cv2.COLOR_BGR2RGB)
    #                 save_img = img[top:bottom, left:right]
    #                 # cv2.imshow("Haha", save_img)
    #                 # cv2.waitKey(0)
    #                 cv2.imwrite("./strangers/" + datetime.datetime.now().strftime("%d%m%Y%H%M%S%f") + ".jpg",
    #                             save_img)
    #
    #     encode.encode_cluster("./strangers", "./enc_cluster.pickle")
    #
    #     img_arr = video_maker.boxes(img_arr, predictions)
    #     filename = video_maker.render("video_output", filename, img_arr, new_fps)
    #     self.clust.remember_strangers("./enc_cluster.pickle", "./strangers")
    #     return filename
    #
    # def run_emotions(self, in_dir, filename, fps_factor):
    #     img_arr, orig_fps = self.load_video(in_dir + "/" + filename, fps_factor)
    #     new_fps = orig_fps / fps_factor
    #     predictions = self.find_emotions(img_arr)
    #     img_arr = video_maker.emotion_boxes(img_arr, predictions, headcount=True)
    #     filename = video_maker.render("video_output", filename, img_arr, new_fps)
    #     return filename
    def run_from_queue(self, fps_factor=1, recognize=False, remember=False, emotions=False):

        f = open("queue.txt")
        q = [line.strip() for line in f]
        filename = None
        if len(q) > 0:
            filename = self.basic_run("queue", q[0].replace("\n", "").split("/")[1], fps_factor=fps_factor,
                                      emotions=emotions, recognize=recognize, remember=remember)
            os.remove(q[0])
            q.remove(q[0])

        f.close()
        if len(q) > 0:
            f = open("queue.txt", "w")
            for line in q:
                f.write(line + "\n")
            f.close()
        else:
            f = open("queue.txt", "w")
            f.close()
        return filename

    # async def async_load_video(self, video, fps_factor):    # Хз вообще, попробую TODO try to speed-up video loading
    #     pass

    async def async_run_from_queue(self, loop, fps_factor=1, recognize=False, remember=False, emotions=False):
        f = open("queue.txt")
        q = [line.strip() for line in f]
        filename = None
        if len(q) > 0:
            filename = await self.async_run(loop, "queue", q[0].replace("\n", "").split("/")[1], fps_factor=fps_factor,
                                            emotions=emotions, recognize=recognize, remember=remember)
            os.remove(q[0])
            q.remove(q[0])

        f.close()
        if len(q) > 0:
            f = open("queue.txt", "w")
            for line in q:
                f.write(line + "\n")
            f.close()
        else:
            f = open("queue.txt", "w")
            f.close()
        return filename

    def update_queue(self, filename):
        f = open("queue.txt", "a")
        f.write(filename + "\n")
        f.close()
コード例 #14
0
import cv2
import os

from face_finder import FaceFinder, get_region_of_interest
from face_recognizer import FaceRecognizer
from image_saver import ImageSaver

test_dir = os.path.join('dataset', 'test')

face_finder = FaceFinder()
recognizer = FaceRecognizer()
recognizer.load('trainer.yml')

for file in os.listdir(test_dir):
    if file.endswith('png') or file.endswith('jpg'):
        filepath = os.path.join(test_dir, file)
        image_array = cv2.imread(filepath)
        image_array = cv2.cvtColor(image_array, cv2.COLOR_BGR2GRAY)
        faces = face_finder.find(image_array)
        for coordinates in faces:
            region_of_interest = get_region_of_interest(
                image_array, coordinates)
            predicted_label, confidence = recognizer.predict(
                region_of_interest)
            print('{}: {}, {}'.format(file, predicted_label, confidence))