class ReceptionRobot():
    # N_EPOCH = 50
    N_EPOCH = 100
    BATCH_SIZE = 96
    # RECOGNITION_IMAGE_SIZE = (32, 32)
    RECOGNITION_IMAGE_SIZE = (64, 64)
    SAVE_IMAGE_SIZE = (64, 64)
    PERSON_IMAGES_NUM = 10
    PLATFORM = Platform.macos.value

    def __init__(self):
        self.load_face_recognition_model()
        self.app = QApplication(sys.argv)

    def run(self):
        """ Start face recognize system.
        """
        self.say("おはようございます")
        self.capture()

    def capture(self):
        """ Capture video. Wait to detect face.
        """
        def loop_func(**kwargs):
            current_persons = self.recognize(kwargs.get('face_imgs'))
            kwargs.get('past_recognized_persons').append(current_persons)
            if len(kwargs.get('past_recognized_persons')) > 3:
                kwargs.get('past_recognized_persons').pop(0)
            # continuous person filtering.
            person_ids = Person.objects().distinct(field='_id')
            for persons in kwargs.get('past_recognized_persons')[-3:]:
                tmp_person_ids = []
                for person in persons:
                    tmp_person_ids.append(person._id)
                person_ids = list(set(person_ids) & set(tmp_person_ids))
            for person_id in person_ids:
                self.greet(Person.objects(_id=person_id).first())

        face_capture.FaceCapture.capture(
            loop_func=loop_func,
            continue_condition_func=lambda **kwargs: False,
            break_condition_func=lambda **kwargs: False,
        )

    def recognize(self, imgs: [np.array((None, None, 3))]):
        """ Recognize detected face.
        Args:
            img: numpy.array(), (?, ?, 3)
                unknown size of rgb image.
        Returns:
            persons: [person.Person()]
        """
        person_id_indexes = \
            self.face_recognizer.recognize(imgs)
        persons = []
        for person_id_index in person_id_indexes:
            person_id = self.face_recognizer.person_ids[person_id_index]
            person = Person.objects(_id=person_id).first()
            persons.append(person)
        return persons

    def greet(self, person: Person):
        """ Greet to recognized person.
        """
        self.say("こんにちは。{}さん".format(person.nickname))

    @classmethod
    def say(cls, message, speaker="kyoko"):
        """ Speak with MacOSX talk application
        """
        # print(os.path.dirname(__file__))
        cls.print(message)
        if cls.PLATFORM == Platform.macos.value:
            cls.say_macos(message, speaker=speaker)
        elif cls.PLATFORM == Platform.linux.value:
            cls.say_linux(message, speaker=speaker)

    @classmethod
    def say_macos(cls, message, speaker="kyoko"):
        talk_application = 'default'
        # talk_application = 'open-jtalk'

        # Text to voice.

        voice_output_dir = os.path.dirname(__file__) + "/voices/"
        if talk_application == 'default':
            voice_filename = "{voice_output_dir}{message}.aiff".format(
                voice_output_dir=voice_output_dir, message=message
            )
            say_command = "say -v {speaker} {message} -o {voice_dir}{message}.aiff".format(
                speaker=speaker, message=message, voice_dir=voice_output_dir)

        elif talk_application == 'open-jtalk':
            dict_dir = '/usr/local/Cellar/open-jtalk/1.09/dic'
            voice_dir = '/usr/local/Cellar/open-jtalk/1.09/voice/mei' \
                        '/mei_normal.htsvoice'
            voice_filename = "{voice_outut_dir}{message}.wav".format(
                voice_outut_dir=voice_output_dir, message=message
            )
            say_command = 'open_jtalk -x {dict_dir} -m {voice_dir} ' \
                          '-ow {voice_filename} ' \
                          '{message}'.format(
                message=message, dict_dir=dict_dir, voice_dir=voice_dir,
                voice_filename=voice_filename)
            print(say_command)
        else:
            return False
        # Convert to robotic voice.
        pitch = 350
        tempo = 1.15
        sox_convert_command = "sox {voice_filename} " \
                              "{voice_dir}{message}.wav " \
                              "echo 0.8 0.8 5 0.7 " \
                              "echo 0.8 0.7 6 0.7 " \
                              "echo 0.8 0.7 10 0.7 " \
                              "echo 0.8 0.8 12 0.7 " \
                              "echo 0.8 0.88 30 0.7 " \
                              "pitch {pitch} tempo {tempo}".format(
            message=message, pitch=pitch, tempo=tempo,
            voice_dir=voice_output_dir, voice_filename=voice_filename,
        )
        play_command = "play {voice_dir}{message}.wav".format(
            message=message, voice_dir=voice_output_dir)
        # if not os.path.exists("{voice_dir}{message}.wav"):
        os.system(say_command)
        os.system(sox_convert_command)
        os.system(play_command)

    @classmethod
    def say_linux(cls):
        """ Linux platform say method.
        """

    @classmethod
    def print(cls, message):
        """ Print message to application interface.
        Args:
            message:
        Returns:
        """
        print(message)

    def load_face_recognition_model(self):
        # person.Person.objects().distinct('_id')
        # self.face_recognizer = FaceRecognizer.objects(
        #     n_epoch=self.N_EPOCH,
        #     batch_size=self.BATCH_SIZE,
        #     image_size=self.RECOGNITION_IMAGE_SIZE,
        #     person_ids=Person.ascendind_ids(),
        # ).first()
        # if self.face_recognizer is None:
        #     return None
        person_ids = [obj._id for obj in Person.objects]
        self.face_recognizer = FaceRecognizer(
            n_epoch=self.N_EPOCH,
            batch_size=self.BATCH_SIZE,
            image_size=self.RECOGNITION_IMAGE_SIZE,
            person_ids=person_ids,
            person_num=len(person_ids)
        )
        self.face_recognizer.generate_filename()
        print(self.face_recognizer.filename)
        self.face_recognition_model = \
            self.face_recognizer.load_model()
        return self.face_recognition_model

    @classmethod
    def save_person_capture(cls, max_faces=100):
        """ save person with video capture.
        """
        nickname, last_name, first_name, company = \
            cls.ask_personality()
        cls.say("{}さんのことを覚えたいので5秒ほどビデオを撮りますね。".format(nickname))
        cls.say("はい。とりまーす!")
        def face_yield(face, faces):
            if len(faces) > cls.PERSON_IMAGES_NUM:
                yield face
            yield face

        all_face_imgs = face_capture.FaceCapture.capture(
            loop_func=lambda **kwargs: True,
            continue_condition_func=lambda **kwargs: len(
            kwargs.get('face_positions')) > 1,
            break_condition_func=lambda **kwargs:
                len(kwargs.get('all_face_imgs')) > cls.PERSON_IMAGES_NUM)
        person_obj = Person(
            nickname=nickname,
            last_name=last_name,
            first_name=first_name,
            company=company,
        )
        person_obj.set_face_imgs(all_face_imgs, cls.SAVE_IMAGE_SIZE)
        cls.say("今{}さんのこと覚えてます。1分くらいかかるかもしれません。".format(
            nickname))
        # cls.say("たぶん大丈夫ですが、僕はロボットなのでデータの保存に失敗すると"
        #         "全て忘れてしまうので...")
        person_obj.save()
        cls.say("{}さんのことバッチリ覚えました!またお会いしましょう。".format(
            nickname))
        # cls.say("あ、でも一回寝ないと顔で思い出せない作りになっているので、"
        #         "また明日以降あったときはご挨拶させてもらいますね。")

    @classmethod
    def ask_personality(cls):
        app = QApplication(sys.argv)
        window = Window(title='Opus',
                             labels=['ニックネーム', '姓', '名', '会社名'])
        # cls.say("はじめまして。私は人工知能のオーパスです。")
        # cls.say("あなたのことを知りたいので名前と会社名を教えてください。"
        #         "ニックネームには、私に呼ばれたい名前を入れてください。")
        cls.say("名前を教えてね。")
        app.exit(app.exec_())

        # cls.say("私に呼ばれたい名前を入れてください")
        # nickname = input()
        # cls.say("{} さん ですね。よろしくお願いします。".format(nickname))
        # cls.say("あと苗字と名前、所属会社を教えてください。")
        # cls.say("まずは苗字をお願いします。")
        # last_name = input()
        # cls.say("次に名前")
        # first_name = input()
        # cls.say("最後に所属会社をお願いします。")
        # company = input()
        # cls.say("{}の{} {}さんですね。登録しておきます。".format(
        #     company, last_name, first_name))
        # return nickname, last_name, first_name, company
        texts = window.texts
        return texts

    @classmethod
    def memorize_face(self):
        """ Learn face model.
        Returns:
        """
        # FaceRecognizer.fit(n_epoch=5)
        FaceRecognizer.fit(
            n_epoch=self.N_EPOCH, batch_size=self.BATCH_SIZE,
            image_size=self.RECOGNITION_IMAGE_SIZE,)