コード例 #1
0
    def run(self):

        y_labels = []
        x_train = []
        cur_id = 0
        label_ids = {}

        # fetching images from dataset for training
        for root, dirs, files in os.walk(self.face_images_dataset_dir):

            # FIXME - adding talkative settings in prefs !!!
            # if our robot it too talkative, emit this signal
            g_emitter().emit_signal_to_feed_mouth("checking %s" %
                                                  os.path.basename(root))

            for file in files:
                # check file extension for image files
                extension = os.path.splitext(file)[1]
                if extension in [".jpg", ".jpeg", ".png"]:
                    full_path = os.path.join(root, file)
                    label = os.path.basename(root).replace(" ", "-").lower()

                    if label not in label_ids:
                        label_ids[label] = cur_id
                        cur_id += 1

                    img_id = label_ids[label]
                    log.debug("FaceTrainer :: %s - %s - %s" %
                              (str(label), str(img_id), str(full_path)))

                    self.processing_image.emit(label, full_path)

                    # convert image to grayscale
                    pil_image = Image.open(full_path).convert("L")

                    # convery grayscale image to numpy array
                    image_array = np.array(pil_image, "uint8")

                    faces = self.face_cascade.detectMultiScale(
                        image_array, 1.3, 5)

                    for (x, y, w, h) in faces:
                        # define roi for eyes detection,ideally,
                        # we should detect eyes within the rectangular
                        # bounds of a face
                        roi = image_array[y:y + h, x:x + w]
                        x_train.append(roi)
                        y_labels.append(img_id)

        # save trained labels
        with open("dataset/face_trainer_labels.pickle", 'wb') as f:
            pickle.dump(label_ids, f)

        self.recognizer.train(x_train, np.array(y_labels))
        self.recognizer.save("dataset/face_trainer.yml")

        self.face_training_finished.emit()
コード例 #2
0
    def run(self):
        while True:
            if not self.queue.empty():
                text = self.queue.get()
                log.info("Mouth speaking text: %s" % text)

                # ignore empty/None texts
                if not text or not len(text):
                    continue
                speak_text(text)

                # tell face to change mouth animations to speaking
                g_emitter().emit_signal_to_set_speaking_state()

                time.sleep(.1)
            else:
                # tell face to change mouth animations to idle
                time.sleep(.2)
                g_emitter().emit_signal_to_set_idle_state()
コード例 #3
0
    def __init__(self):
        QWidget.__init__(self)

        # loaind ui from xml
        uic.loadUi(os.path.join(DIRPATH, 'app.ui'), self)

        # FIXME - libpng warning: iCCP: known incorrect sRGB profile
        self.setWindowIcon(QIcon("./images/robot_icon.png"))

        # keep the window fixed sized
        self.setFixedSize(self.size())

        # button event handlers
        self.btnStartCaptureForVideoAnalysis.clicked.connect(
            self.start_capture_for_video_analysis)
        self.btnStopCaptureForVideoAnalysis.clicked.connect(
            self.stop_capture_for_video_analysis)

        self.btnChooseClassifierXML.clicked.connect(
            self.choose_classifier_file)

        self.btnChooseImage.clicked.connect(self.choose_image_for_analysis)

        self.setup_tray_menu()

        # add camera ids
        for i in range(0, 11):
            self.cboxCameraIds.addItem(str(i))
            self.cboxCameraIds1.addItem(str(i))

        # setting up handlers for menubar actions
        self.actionAbout.triggered.connect(self.about)
        self.actionExit.triggered.connect(qApp.quit)
        self.actionPreferences.triggered.connect(self.show_preferences)

        # video analysis image widget
        self.img_widget_vid_analysis = ImageWidget()
        self.hlayoutVideoAnalysis.addWidget(self.img_widget_vid_analysis)

        # face training image widget
        self.img_widget_face_training = ImageWidget()
        self.hlayoutFaceTrainingImg.addWidget(self.img_widget_face_training)

        # face identification image widget
        self.img_widget_identify_face = ImageWidget()
        self.hlayoutIdentifyFace.addWidget(self.img_widget_identify_face)

        # image analysis image widget
        self.img_widget_img_analysis = ImageWidget()
        self.hlayoutImageAnalysis.addWidget(self.img_widget_img_analysis)
        img = cv2.imread("images/human.png")
        self.img_widget_img_analysis.handle_image_data(img)

        self.vid_capture = VideoCapture()
        self.vid_capture.got_image_data_from_camera.connect(
            self.process_image_data_from_camera)

        self.highlight_faces = self.chkHighlightFaces.isChecked()
        self.chkHighlightFaces.stateChanged.connect(
            self.highlight_faces_checkbox_changed)
        self.chckGrayscale.stateChanged.connect(
            self.grayscale_checkbox_changed)

        # face trainer dataset browser btn handler
        self.btnBrowseDatasetForFaceTrainer.clicked.connect(
            self.browse_dataset_for_face_trainer)
        self.btnBrowseClassifierForFaceTrainer.clicked.connect(
            self.browse_classifier_file_for_face_trainer)
        self.btnStartFaceTrainer.clicked.connect(self.start_face_trainer)

        self.btnBrowseIdentifyFace.clicked.connect(self.browse_identify_face)

        self.btnTalk.clicked.connect(self.lets_talk)

        # create and start robot
        self.robot = Robot(self.lblRobot)

        self.mouth = Mouth()

        # connect global signals to slots
        g_emitter().feed_mouth.connect(self.mouth.feed_text)
        g_emitter().set_speaking_state.connect(self.robot.set_speaking_state)
        g_emitter().set_idle_state.connect(self.robot.set_idle_state)

        self.robot.start()
        self.mouth.start()
コード例 #4
0
 def face_training_finished(self):
     self.lblFaceTrainingStatus.setText("FACE TRAINING FINISHED")
     g_emitter().emit_signal_to_feed_mouth("face training finished")
コード例 #5
0
 def lets_talk(self):
     text = self.teTalk.toPlainText()
     self.teTalk.setText("")
     g_emitter().emit_signal_to_feed_mouth(text)