Ejemplo n.º 1
0
 def test_train_with_olivetti(self):
     fc = FaceClassifier()
     self.assertEqual(len(fc.data.shape), 2)
     # data stored as 64*64 row vectors
     self.assertEqual(fc.data.shape[1], 64 * 64)
     # Olivetti data contain 40 subjects
     self.assertEqual(len(np.unique(fc.labels)), 40)
     fc.train()
     # their coordinates in eigenface space as a matrix (.W)
     self.assertEqual(len(fc.W.shape), 2)
Ejemplo n.º 2
0
def get_feature_extractor():
    # 分類用モデル
    face_classifier = FaceClassifier()
    print('weight load...')
    face_classifier.load_weight('../../temp/model_weight/epoch_92')
    model = face_classifier.model

    # for layer in model.layers:
    #     print(layer.name, layer.output.get_shape())

    feature_extractor = Model(input=model.input,
                              output=model.get_layer('avg_pool').output)
    return feature_extractor
Ejemplo n.º 3
0
    def __init__(self):
        self._debug = conf['debug'] or False
        self._cv_debug = conf['cv_debug'] or False
        self.webcam = Webcam()
        self.tracker = Tracker()
        self.detector = FaceClassifier()
        self.last_tick = cv.getTickCount()
        self.got_face = False

        self.sample_stream = self.webcam.as_observable() \
            .sample(1000) \
            .do_action(lambda f: self._debug and print('sampled at {}'.format(datetime.now()))) \
            .map(self.detect) \
            .publish() \
            .auto_connect()
Ejemplo n.º 4
0
 def test_benchmark(self):
     img_dir = os.path.join(this_script_folder, 'images_yale')
     fc = FaceClassifier(ratio=.725)
     fc.add_img_data(img_dir)
     fc.benchmark()
     self.assertNotEqual(fc.classification_report, None)
     print(fc.classification_report)
     fc.benchmark(imshow=True,
                  wait_time=0.8,
                  which_labels=[0, 5, 13, 28, 40])
Ejemplo n.º 5
0
def train_classifier(data_folder_path="face_augmented"):
    print("train_classifier", "start")
    X = []
    y = []
    print("train_classifier", "embed_faces", "start")
    for label in tqdm(FileUtils.list_top_folders_names(data_folder_path)):
        faces = []
        for file_name in FileUtils.list_top_files_names(FileUtils.join(data_folder_path, label)):
            face = Image.open(FileUtils.join(data_folder_path, label, file_name))
            faces.append(face)
        embeds = ImageEmbedder.embeds(faces)
        X += [embed for embed in embeds]
        y += [label] * len(faces)
    print("train_classifier", "embed_faces", "completed")

    print("train_classifier", "fit_classifier", "start")
    FaceClassifier.fit(numpy.array(X), numpy.array(y))
    print("train_classifier", "fit_classifier", "completed")
Ejemplo n.º 6
0
    def test_export_import(self):
        img_dir = os.path.join(this_script_folder, 'images_yale')
        fc = FaceClassifier()
        fc.add_img_data(img_dir)
        # write as pickle files
        fc.export()

        fc2 = FaceClassifier(data_pkl='/tmp/data.pkl',
                             target_pkl='/tmp/labels.pkl')
        self.assertEqual(len(np.unique(fc2.labels)), 41)
Ejemplo n.º 7
0
    def __init__(self, master=tk.Tk(), size="400x300"):
        super().__init__(master)
        self.master = master
        # window close hook
        master.protocol("WM_DELETE_WINDOW", self.on_closing)
        # size of the root window
        self.master.geometry(size)
        # webcam instance
        # self.webcam = Webcam()

        # face detector
        self.classifier = FaceClassifier()

        self.service = FaceService()

        # initialize visual components on the window
        self.initialize_gui()
        # handler to stop streams
        self.webcam_subscription = None
        self.api_subscription = None
        self.service_subscription = None
        # temporarily useage
        self._timer = 1 
Ejemplo n.º 8
0
def test_classifier(frame):
    from face_classifier import FaceClassifier
    classifier = FaceClassifier()
    gray = classifier.to_gray_scale(frame)
    cv.imshow('gray', gray)
    faces = classifier.detect(gray)
    classifier.draw_retangles(frame, faces)
    cv.imshow('faces', frame)
    cv.waitKey(0)
    cv.destroyAllWindows()
Ejemplo n.º 9
0
 def test_train_with_subject(self):
     img_dir = os.path.join(this_script_folder, 'images_yale')
     fc = FaceClassifier()
     fc.add_img_data(img_dir)
     # data stored as 64*64 row vectors
     self.assertEqual(fc.data.shape[1], 64 * 64)
     # 40 + 1 subjects
     self.assertEqual(len(np.unique(fc.labels)), 41)
     fc.train()
     # their coordinates in eigenface space as a matrix (.W)
     self.assertEqual(len(fc.W.shape), 2)
Ejemplo n.º 10
0
def start_recognize_faces_stream():
    capture = cv2.VideoCapture(0)
    while True:
        _, frame = capture.read()
        pil_frame = ImageUtils.cv2_to_pillow_image(frame)
        time_label = []
        current_time = time.time()
        faces, bounding_boxes = FaceExtracter.extracts(pil_frame, return_bounding_boxes=True)
        extract_time = time.time() - current_time
        time_label.append("extract_time: {}".format(extract_time))

        if faces:
            for bounding_box in bounding_boxes:
                left, top, right, bottom = bounding_box.astype(numpy.int)
                cv2.rectangle(frame, (left, top), (right, bottom), (255, 255, 255))

            current_time = time.time()
            faces_embeddings = ImageEmbedder.embeds(faces)
            embed_time = time.time() - current_time
            time_label.append("embed_time: {}".format(embed_time))

            current_time = time.time()
            labels, confidents = FaceClassifier.classifies(faces_embeddings)
            classify_time = time.time() - current_time
            time_label.append("classify_time: {}".format(classify_time))

            for (label, confident, bounding_box) in zip(labels, confidents, bounding_boxes):
                left, top, right, bottom = bounding_box.astype(numpy.int)
                cv2.putText(frame,  f"{label}[{confident}]", (left, max(top - 5, 0)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255))

        cv2.putText(frame, ",".join(time_label), (8, 24), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)

        cv2.imshow("Video Stream", frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    # When everything done, release the capture
    capture.release()
    cv2.destroyAllWindows()
Ejemplo n.º 11
0
class APP(tk.Frame):
    def __init__(self, master=tk.Tk(), size="400x300"):
        super().__init__(master)
        self.master = master
        # window close hook
        master.protocol("WM_DELETE_WINDOW", self.on_closing)
        # size of the root window
        self.master.geometry(size)
        # webcam instance
        # self.webcam = Webcam()

        # face detector
        self.classifier = FaceClassifier()

        self.service = FaceService()

        # initialize visual components on the window
        self.initialize_gui()
        # handler to stop streams
        self.webcam_subscription = None
        self.api_subscription = None
        self.service_subscription = None
        # temporarily useage
        self._timer = 1 

    def initialize_gui(self):
        """ draw visual items on the window
        """
        # title
        self.master.title("Webcam")

        # allowing the widget to take the full space of the root window
        self.pack(fill=tk.BOTH, expand=1)

        # label as container to display video
        self.image_container = tk.Label(self)
        # self.image_container.place(x=0, y=0)
        self.image_container.pack()

        # label as container to display text
        self.text_container = tk.Label(self, 
            text='age:18 \n gender:Male', 
            justify=tk.CENTER)
        self.text_container.pack(side='bottom')

    def _detect_and_draw(self, frame):
        """ 
        step 1. convert cv_frame to gray scale
        step 2. detect faces in the gray frame
        step 3. draw rectangles over the face locations 
        """
        gray = self.classifier.to_gray_scale(frame)
        faces = self.classifier.detect(gray)
        modified_frame = self.classifier.draw_retangles(frame, faces)
        return self._convert_frame_to_pil_image(modified_frame)

    def _convert_frame_to_pil_image(self, frame):
        """ convert a cv_frame to PIL image """
        cv_img = cv.cvtColor(frame, cv.COLOR_BGR2RGB)
        return Image.fromarray(cv_img)
        
    def update_image(self, pil_image):
        """ update UI with PIL image """
        new_img = ImageTk.PhotoImage(pil_image)
        self.image_container.image = new_img
        self.image_container.configure(image = new_img)

    def analyze_face(self, face):
        """
        call remote API service for an analyzed face
        """
        # minics remote API service
        time.sleep(0.2)
        self._timer += 1
        data = {
            'age': self._timer,
            'gender': 'F'
        }
        return Observable.just(json.dumps(data)) 

    def update_text(self, json_text):
        """ update UI with json_text """
        self.text_container.config(text=json_text)

    def start(self):
        """ call it to let the GUI run """
        self._start_stream()
        # self._stream()
        self.mainloop()
        print('outa mainloop')

    def _stream(self):
        # pool_scheduler = ThreadPoolScheduler(2)
        scheduler = TkinterScheduler(self.master)
        self.service_subscription = self.service.as_observable() \
            .map(lambda frame: self._convert_frame_to_pil_image(frame)) \
            .observe_on(scheduler) \
            .subscribe(
                on_next = lambda pil_image: self.master.after(0, lambda: self.update_image(pil_image)),
                on_error=print)
        
    def _start_stream(self):
        # setup source
        # pool_scheduler = ThreadPoolScheduler(2)
        ui_scheduler = TkinterScheduler(self.master)
        # self.service_subscription = self.service.as_observable()

        self.service_subscription = self.service.as_observable() \
            .map(self._convert_frame_to_pil_image) \
            .observe_on(ui_scheduler) \
            .subscribe(
                on_next = lambda pil: self.master.after(0, lambda: self.update_image(pil)),
                on_error=print)

        self.api_subscription = self.service.api_stream() \
            .subscribe(
                on_next = lambda rst: self.master.after(0, lambda: self.update_text(json.dumps(rst))),
                on_error=print
            )
       
    def on_closing(self):
        prefix = 'app.on_closing'
        print('{} - stop streams'.format(prefix))
        if self.api_subscription:
            print('{} - stop aip stream'.format(prefix))
            self.api_subscription.dispose()
        if self.service_subscription:
            print('{} - stop webcam stream'.format(prefix))
            self.service_subscription.dispose()
      
        print('{} - destroy windows'.format(prefix))
        self.master.destroy()
        cv.destroyAllWindows()
        print('{} - system exit'.format(prefix))
        time.sleep(5)
        sys.exit()
Ejemplo n.º 12
0
 def test_show_album(self):
     fc = FaceClassifier()
     fc.show_album(wait_time=.1)
Ejemplo n.º 13
0
 def __init__(self):
     self.face_detector = dlib.get_frontal_face_detector()
     self.face_classifier = FaceClassifier()
     self.face_classifier.load_weight(
         '../temp/model_weight/keras/resnet/epoch_95')
Ejemplo n.º 14
0
class ImageAnalyzer():
    def __init__(self):
        self.face_detector = dlib.get_frontal_face_detector()
        self.face_classifier = FaceClassifier()
        self.face_classifier.load_weight(
            '../temp/model_weight/keras/resnet/epoch_95')

    def analyze(self, image_path):
        """
        :param image_path:
        :return: detects, idol_ids
        """
        io_image = io.imread(image_path)

        # face detection
        try:
            detects = self.face_detector(io_image, 1)
        except RuntimeError:
            print('detection failed. skip')

        cv2_image = data.load_image(image_path)
        if detects:
            idol_ids = self.classify(detects, cv2_image)
            return detects, idol_ids
        else:
            return None, None

    def classify(self, detects, image):
        """classify cropped faces"""
        idol_ids = []
        for i, d in enumerate(detects):
            cropped = image[d.top():d.bottom(), d.left():d.right()]

            if d.right() > 0 and d.left() > 0 and d.top() > 0 and d.bottom(
            ) > 0:
                in_image = True
            else:
                in_image = False

            # Exclude small faces
            size_threshold = 64
            if d.right() - d.left() > size_threshold:
                enough_size = True
            else:
                enough_size = False

            if in_image and enough_size:
                resized = cv2.resize(cropped, (224, 224))
                probability = self.face_classifier.predict(resized)
                idol_id = int(np.argmax(probability))
                idol_ids.append(idol_id)
            else:
                idol_ids.append(None)
        return idol_ids

    def draw_face_detection_result_to_image(self, detects, image):
        """
        :type image: cv2.Image
        """

        if len(detects) == 0 or detects is None:
            return image

        print('detected face num', len(detects))

        image_for_draw = image.copy()

        for i, d in enumerate(detects):
            cropped = image[d.top():d.bottom(), d.left():d.right()]

            if d.right() > 0 and d.left() > 0 and d.top() > 0 and d.bottom(
            ) > 0:
                in_image = True
            else:
                in_image = False

            # Exclude too small faces
            size_threshold = 64
            if d.right() - d.left() > size_threshold:
                enough_size = True
            else:
                enough_size = False

            if in_image and enough_size:
                resized = cv2.resize(cropped, (224, 224))
                cv2.imshow(str(i), resized)
                probability = self.face_classifier.predict(resized)
                label = np.argmax(probability)
                probability_max = np.max(probability)
                an_idol = idol.get_idol(int(label))

                # draw circle
                pos0 = (d.left(), d.top())
                pos1 = (d.right(), d.bottom())
                member_color = color.color_code_to_bgr_tuple(
                    an_idol.member_color)
                x_center = (d.left() + d.right()) / 2
                y_center = (d.top() + d.bottom()) / 2
                center = (int(x_center), int(y_center))
                radius = int(center[0] - pos0[0])
                thickness = 2
                cv2.circle(img=image_for_draw,
                           center=center,
                           radius=radius,
                           color=member_color,
                           thickness=thickness)

                font = cv2.FONT_HERSHEY_SIMPLEX
                # text = an_idol.alphabet_name() + ' {0:10.2f}'.format(probability_max)
                text = an_idol.alphabet_name() + ' ' + str(probability_max)
                # shadow text
                cv2.putText(img=image_for_draw,
                            text=text,
                            org=(pos0[0] + 1, pos0[1] + 1),
                            fontFace=font,
                            fontScale=1,
                            color=(0, 0, 0),
                            lineType=cv2.LINE_AA)
                # color text
                cv2.putText(img=image_for_draw,
                            text=text,
                            org=pos0,
                            fontFace=font,
                            fontScale=1,
                            color=member_color,
                            lineType=cv2.LINE_AA)

            else:
                print('事前チェックによりスルー', in_image, enough_size)
        return image_for_draw

    def test(self):
        image_path = PROJECT_ROOT + '/resources/test/cute1.jpg'
        image = cv2.imread(image_path)
        image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        dets = self.face_detector(image_rgb, 1)
        print("Number of faces detected: {}".format(len(dets)))
        for i, d in enumerate(dets):
            print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
                i, d.left(), d.top(), d.right(), d.bottom()))

        win = dlib.image_window()
        win.clear_overlay()
        win.set_image(image_rgb)
        win.add_overlay(dets)
        dlib.hit_enter_to_continue()
Ejemplo n.º 15
0
class FaceService:
    def __init__(self):
        self._debug = conf['debug'] or False
        self._cv_debug = conf['cv_debug'] or False
        self.webcam = Webcam()
        self.tracker = Tracker()
        self.detector = FaceClassifier()
        self.last_tick = cv.getTickCount()
        self.got_face = False

        self.sample_stream = self.webcam.as_observable() \
            .sample(1000) \
            .do_action(lambda f: self._debug and print('sampled at {}'.format(datetime.now()))) \
            .map(self.detect) \
            .publish() \
            .auto_connect()

    def as_observable(self):
        # frame stream
        frame_stream = self.webcam.as_observable()

        # detect stream
        # sample_stream = self.webcam.as_observable() \
        #     .sample(1000) \
        #     .do_action(lambda f: print(f'sampled at {datetime.now()}')) \
        #     .map(self.detect) \
        #     .publish() \
        #     .auto_connect()


        detect_stream = self.sample_stream \
            .do_action(self.init_tracker)

        return frame_stream \
            .combine_latest(detect_stream, self.merge_detect_result)

    def api_stream(self):
        return self.sample_stream \
            .filter(lambda rst: rst['box'] is not None) \
            .map(lambda rst: rst['frame']) \
            .flat_map(FaceApi().get_result_as_observable)

    # def track_or_detect(self, data):
    #     self._debug and print('merge')
    #     box = None
    #     frame = None
    #     if isinstance(data, dict):
    #         # detect result
    #         box = data['box']
    #         frame = data['frame']
    #         self._debug and print('detect result {}'.format(box))
    #         if box:
    #             self.tracker.set_bounding(frame, box)
    #         else:
    #             return frame
    #     else:
    #         self._debug and print('frame')
    #         # frame
    #         frame = data

    #     if self.got_face:
    #         ok, box_now = self.tracker.update_frame(frame)
    #         self._debug and print('update box={}'.format(box_now))
    #         if ok:
    #             box = [int(x) for x in box_now]
    #         else:
    #             return frame

    #         # update box
    #         self._debug and print('before draw box={}'.format(box))
    #         frame_now = self.detector.draw_retangles(frame, [box])
    #         return frame_now
    #     else:
    #         return frame

    def detect(self, frame):
        self._debug and print('try to detect')
        gray = self.detector.to_gray_scale(frame)
        boxes = self.detector.detect(gray)
        result = {'box': None, 'frame': frame}
        if len(boxes) == 0:
            self._debug and print('detect failed at {}'.format(datetime.now()))
            self.got_face = False
        else:
            # detect succeeded
            # merge into tracker
            result['box'] = [x for x in boxes[0]]
            self.got_face = True
            self._debug and print("face detected at {}".format(result['box']))
        return result

    def init_tracker(self, data):
        self._debug and print('init_tracker')
        if data['box']:
            box = data['box']
            frame = data['frame']
            self._debug and print('  tracker.init box={}'.format(box))
            self.tracker = Tracker()
            self.tracker.set_bounding(frame, box)

    def merge_detect_result(self, frame, result):
        if result['box']:
            # detectd a face, update frame
            ok, box = self.tracker.update_frame(frame)
            if ok:
                box = [int(x) for x in box]
                # self._debug and print('update box={}'.format(box))
                frame_now = self.detector.draw_retangles(frame, [box])
                # Display tracker type on frame
                cv.putText(frame, self.tracker.tracker_type + " Tracker",
                           (100, 20), cv.FONT_HERSHEY_SIMPLEX, 0.75,
                           (50, 170, 50), 2)
                return frame_now
            else:
                return frame
        else:
            return frame