예제 #1
0
def say(player, words, lang='en-US', volume=60, pitch=130):
    """Say the given words with TTS.

    Args:
      player: To play the text-to-speech audio.
      words: string to say aloud.
      lang: language for the text-to-speech engine.
      volume: volume for the text-to-speech engine.
      pitch: pitch for the text-to-speech engine.
    """

    english_words = re.match('^[a-zA-Z0-9_,.\s\!@#$%&\*\-=\+"\'\?]+$', words)
    print(words)

    if english_words:
        try:
            (fd, tts_wav) = tempfile.mkstemp(suffix='.wav', dir=TMP_DIR)
        except IOError:
            logger.exception('Using fallback directory for TTS output')
            (fd, tts_wav) = tempfile.mkstemp(suffix='.wav')
        os.close(fd)
        words = '<volume level="' + str(volume) + '"><pitch level="' + str(pitch) + \
                '">' + words + '</pitch></volume>'
        try:
            subprocess.call(
                ['pico2wave', '--lang', 'en-US', '-w', tts_wav, words])
            player.play_wav(tts_wav)
        finally:
            os.unlink(tts_wav)
    else:
        jtalk.jtalk(words)
예제 #2
0
def take_photo():
    with picamera.PiCamera() as camera:
        camera.resolution = (256, 196)
        camera.start_preview()
        jtalk('さんー、にーー、いちー。ぱしゃり')
        time.sleep(2)
        camera.capture('face.jpg')
예제 #3
0
def speak(d, word, speaker):
    '''
    speaker: in ['kubo','oshiro','nishida','yamada','zunko']
    d: in ['easy','normal','hard','reverse']
    word: string
    '''

    if d == 'reverse':
        dif = 'hard'
    else:
        dif = d

    if os.path.exists('./recorded/' + dif + '/' + speaker + '/' + word +
                      '.wav'):
        print('play wave file')
        wf = wave.open(
            './recorded/' + dif + '/' + speaker + '/' + word + '.wav', 'r')
        p = pyaudio.PyAudio()
        stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
                        channels=wf.getnchannels(),
                        rate=wf.getframerate(),
                        output=True)
        chunk = 1024
        data = wf.readframes(chunk)
        while data != '':
            stream.write(data)
            data = wf.readframes(chunk)
        stream.close()
        p.terminate()
    else:
        print('wave file not found')
        jtalk.jtalk(word.encode('utf-8'))
예제 #4
0
def main():
    if 0:
        print ("boice timer active")
        print (datetime.now().strftime("%Y/%m/%d %H:%M:%S"))
        print (os.getcwd())
        print ("\n")

        url = AUDIO_URL + "Init/init_voice.wav"
        audio.play(url)

    # とりあえず時間しゃべらせとく
        b = datetime.now()
        say_text = str(b.hour%12) + '時' + str(b.minute) + "分です"
        jtalk.jtalk(say_text)

    # 天気しゃべらす
    wea.say(2)

    # ニュースしゃべらす
    new = news_file.NewsClass()
    new.say_news("ねとらぼ",1)

# threads start
    t=threading.Timer(1,clock_func.clock)
    t.start()
예제 #5
0
    def say_topic(self, lim_):
        jtalk.jtalk(self.SITE_NAME + "のニュースをお伝えします")
        for topic in self.topic_list[:lim_]:
            jtalk.jtalk(topic["title"])
#            jtalk.jtalk(topic["text"])
# 指定秒間スリープする
        time.sleep(1)
예제 #6
0
def time_signal():
    # 時報を行う
    # TODO wavファイルがないときは例外を投げるようにしたい
    global now
    audio.play(main.AUDIO_URL + "Init/master.wav")
    say_text = str(now.hour % 12) + '時' + str(now.minute) + "分です"
    jtalk.jtalk(say_text)
예제 #7
0
파일: main.py 프로젝트: i80486dx2/ent
def a_water(count):
    if count == 0:
        display("Test", "water_plant", "0")
        jtalk.jtalk("喉が乾いたよ")
    else:
        display("Test", "water_plant", "1")
        jtalk.jtalk("さようなら、人類")
예제 #8
0
def say(client, userdata, message):
    """
        jtalk関数を呼び出すコールバック
    """
    data = json.loads(message.payload.decode('utf-8'))
    if not 'text' in data.keys():
        return
    jtalk(data['text'])
예제 #9
0
 def cheez(self, params, username, filters):
     jtalk('はいっチーーズ!')
     sleep(2)
     now = datetime.now() 
     fname = 'img/img%02dT%02d%02d%02d_%05d.jpg' % (now.day, now.hour, now.minute, now.second, now.microsecond)
     
     picam.shoot(fname)
     header_str = ';'.join(['(CHEEZ)'] + filters)
     self.showMessage('%s;<img src="%s">' % (header_str,fname))
예제 #10
0
def uttr(ut_text, ut_way, chrome):
    if ut_text:
        print(ut_text)
        if ut_way == "jtalk":
            jtalk.jtalk(ut_text)
        elif ut_way == "websp":
            uttr_websp(chrome, ut_text)
        else:
            print("invalid utter way")
예제 #11
0
    def cheez(self, params, username, filters):
        jtalk('はいっチーーズ!')
        sleep(2)
        now = datetime.now()
        fname = 'img/img%02dT%02d%02d%02d_%05d.jpg' % (
            now.day, now.hour, now.minute, now.second, now.microsecond)

        picam.shoot(fname)
        header_str = ';'.join(['(CHEEZ)'] + filters)
        self.showMessage('%s;<img src="%s">' % (header_str, fname))
예제 #12
0
    def __say(self, serif):
        self.__forget_last_serif()

        if not serif == self.__last_serif:
            self.exp.flash_eyes()
            jtalk.jtalk(serif)
            print("mugbot: {}".format(serif))
            self.exp.lighten_eyes()

            self.__last_serif = serif
            self.__last_serif_time = datetime.now()
예제 #13
0
def delete_task():
    global tasks
    if len(tasks) > 0:
        for task in tasks:
            task[1].cancel()
        tasks = []
        jtalk.jtalk("タスクを消去しました。", jtalk.MEI_NORMAL, True)
    else:
        jtalk.jtalk("タスクはありません。", jtalk.MEI_NORMAL, True)

    return True
예제 #14
0
def play_random(wordlist):
    global player
    global is_player_playing
    if is_player_playing:
        if is_includes(wordlist, ComList.stop.value):
            player.random = False
            jtalk.jtalk("ランダム再生はオフです。", jtalk.MEI_NORMAL, True)
        else:
            player.random = True
            jtalk.jtalk("ランダム再生はオンです。", jtalk.MEI_NORMAL, True)
        return True

    return False
예제 #15
0
파일: main.py 프로젝트: i80486dx2/ent
def a_human():
    display("Test", "detect_human", "1")
    jtalk.jtalk("人間がいるよ")
    jtalk.jtalk("人間さん、こんにちは")
    car_systems.go_Right()
    time.sleep(0.5)
    car_systems.go_Left()
    time.sleep(1)
    car_systems.go_Right()
    time.sleep(1)
    car_systems.go_Left()
    time.sleep(1)
    car_systems.go_Right()
    time.sleep(0.5)
    car_systems.stop_Stop()
예제 #16
0
    def reaction_for_mentions(self):
        """ 自分に返信があったときに反応して読み上げるスレッド """
        result = self.api.mentions_timeline(count=5)
        for i, status in enumerate(reversed(result)):
            # 投稿時間を見て、新しいメンションかどうかを判断する
            if status.created_at > self.mention_threshold_time:
                self.mention_threshold_time = status.created_at
                # プリントと読み上げを行う
                # TODO 逆順で複数のツイートを検知する -> reversed追加した
                print('---%3d---' % (i + 1))
                print('TWEET user name : {}', format(status.user.name))
                print('TWEET text      : {}', format(status.text))

                jtalk.jtalk("{}様より、メンションが確認されました".format(status.user.name))
                jtalk.jtalk("{}".format(self.format_text(status.text)))
예제 #17
0
파일: talk.py 프로젝트: naoki0130/python
def roop_or_not(t):
    i = 0
    t = t
    while i < 1:
        if t == "すみません 聞き取れませんでした もう一度お願いします":
            jtalk.jtalk(t)
            t = say_confirm()
        else:
            jtalk.jtalk(t)
            c = input("y or n:")
            if c == "y":
                i += 1
                time.sleep(1)
            else:
                t = say_confirm()
예제 #18
0
def specific_time():
    # 行事などがあったときのつぶやき
    global now
    '''
    if (now.month in json_list) == True:
        if (now.day in json_list) == True:
            print("行事***********************:")
    '''
    # pythonならもっとここを綺麗に書けるのでは?
    # 今日が何の日なのかと、設定されていたテキストを読み上げる
    for j in day_list.json_list:
        if j["month"] == now.month:
            if j["day"] == now.day:
                say_text = "今日は" + j["what"] + "です"
                jtalk.jtalk(say_text)
                say_text = j["voice"]
                jtalk.jtalk(say_text)
예제 #19
0
def reply_song_detail():

    global player
    global is_player_playing
    if is_player_playing:
        voice = ""
        if player.now_playing_playlist:
            voice += player.now_playing_playlist + " から、"
        if player.now_playing_artist:
            voice += player.now_playing_artist + " の "
        if player.now_playing_title:
            voice += player.now_playing_title + " です。"

        if not jtalk.google_tts(voice, True):
            jtalk.jtalk(voice, jtalk.MEI_NORMAL, True)
        return True

    return False
예제 #20
0
파일: main.py 프로젝트: i80486dx2/ent
def a_bright(data):
    temp = re.sub(r"\D", "", data)
    display("Test", "brightness", temp)
    if int(temp) > 5:
        jtalk.jtalk("お")
        jtalk.jtalk("明るい")
        car_systems.stop_Stop()
    else:
        jtalk.jtalk("あれ")
        jtalk.jtalk("暗くなっちゃった")
        car_systems.avoid(300)
예제 #21
0
def extract_keywords(text):
    tagger = MeCab.Tagger(
        '-Ochasen -d /usr/lib/mecab/dic/mecab-ipadic-neologd')
    tagger.parse('')
    node = tagger.parseToNode(text)
    keyword = []

    while node:
        if node.feature.split(',')[0] == '感動詞':
            keyword.append(node.surface)
            print(keyword[0])
            jtalk.jtalk(keyword[0])
            break
        elif node.feature.split(',')[0] == '名詞':
            keyword.append(node.surface)

        node = node.next

    return keyword
예제 #22
0
def play_radio(wordlist):
    print("Play radio!")
    channel = ""
    for word in wordlist:
        if (word in RADIO_CHANNEL.keys()):
            channel = RADIO_CHANNEL[word]
            break
    print("channel:" + channel)
    if channel == "":
        return False

    stop_all()
    voice = RADIO_CHANNEL_NAME[channel] + "をかけますね!"
    print(voice)
    jtalk.jtalk(voice, jtalk.MEI_HAPPY)

    cmd = SCRIPT_DIR + "/play_radiko.sh" + " " + channel
    Popen(cmd.strip().split(" "))

    return True
예제 #23
0
    def say(self, say_range_):
        # 内容:天気と気温を読み上げる
        # TODO 語尾や言い方にバリエーションを持たせるような造り込みがあるとおもしろい

        jtalk.jtalk("本日の" + self.city + "の天気をお伝えします")

        # 天気情報はスラスラ読み上げた方が秘書っぽい感じがしていいので、連続して読み上げさせる
        try:
            say_text = ""
            for data in self.forecasts[:say_range_]:
                # 「~最低気温不明、明日の~」の、『、』をつけるための処理
                # say_textになにかが入っていたら、をつける
                if say_text:
                    say_text += "、"
                say_text += data['dateLabel'] + "の天気は"
                try:
                    say_text += "最高気温" + data['temperature']['max'][
                        'celsius'] + "度、"
                except TypeError:
                    # 気温がnullだったときは例外処理
                    say_text += "最高気温不明、"

                try:
                    say_text += "最低気温" + data['temperature']['min'][
                        'celsius'] + "度"
                except TypeError:
                    # 気温がnullだったときは例外処理
                    say_text += "最低気温不明"
            else:
                say_text += "です"
                jtalk.jtalk(say_text)
        except:
            err_txt = "天気についてのエラーが発生しています"
            print(err_txt)
            jtalk.jtalk(err_txt)
예제 #24
0
    def timeline(self, _like_favo_threshold=-1, _search_num=5):
        """ 自分の新しいタイムラインを表示する機能 """
        # _like_favo_threshold  : ファボとリツイートを足して[_like_favo_threshold]以上のツイートだけを表示
        # _search_num : 最新[_search_num]件のデータを扱う
        result = self.api.home_timeline()
        for i, status in enumerate(reversed(result[:_search_num])):
            if status.user.notifications == True:
                print('広告がヒット {}'.format(status.user.notifications))


#                continue
# 投稿時間を見て、新しいメンションかどうかを判断する
            if status.created_at > self.timeline_threshold_time:
                self.timeline_threshold_time = status.created_at

                # ファボとリツイートの数をとる
                f = status.favorite_count
                r = status.retweet_count

                # 人気のツイートだけを表示
                if f + r >= _like_favo_threshold:
                    print('---%3d---' % (i + 1))
                    print('TWEET user name : {}'.format(status.user.name))
                    print('TWEET text      : {}'.format(status.text))
                    #                print(status.favorite_count)
                    #                print(status.retweet_count)

                    jtalk.jtalk("タイムラインが更新されました")
                    jtalk.jtalk("{}様より".format(status.user.name))
                    jtalk.jtalk("{}".format(self.format_text(status.text)))
예제 #25
0
def main():
    while (True):
        ret, frame = cam.read()
        cv2.imshow("Show FLAME Image", frame)

        k = cv2.waitKey(1)
        if k == ord('s'):
            cv2.imwrite("output.png", frame)
            cv2.imread("output.png")

            X = []
            img = load_img("./output.png",
                           target_size=(image_size, image_size))
            in_data = img_to_array(img)
            X.append(in_data)
            X = np.array(X)
            X = X.astype("float") / 256

            #model = apple.build_model(X.shape[1:])
            #model = tr.build_model(X.shape[1:], nb_classes)
            model = build_model(X.shape[1:], nb_classes)
            #model.load_weights("./image/apple-model.h5")
            model.load_weights("model.h5")

            pre = model.predict(X)
            print(pre)
            for i in range(len(categories)):
                if pre[0][i] > 0.5:
                    print(categories[i])
                    text = u'これは' + categories[i] + u'だよ'
                    text = text.encode('utf-8')
                    jtalk.jtalk(text)

        elif k == ord('q'):
            break

    cam.release()
    cv2.destroyAllWindows()
예제 #26
0
    def word_search(self,
                    _word="",
                    _count=10,
                    _lang='ja',
                    _result_type='popular',
                    _address=None,
                    _range=5.0):
        """ ツイッターの検索機能 """
        # _word : search _word
        # _count : search status count
        # _result_type : recent,popular,mixed
        # _address : 東京墨田区など町の名前

        # 指定が無いときはreturnする
        if _word == "" and _address == None: return
        # 住所や場所のキーワードが入力されているときは、GoogleMapApiから緯度経度を付与する
        if _address != None:
            geo_json = geophysics.get_geocode(_address)
            if geo_json == None: return
            lat = geo_json['results'][0]['geometry']['location']['lat']
            lng = geo_json['results'][0]['geometry']['location']['lng']
            geocode = "{},{},{}km".format(lat, lng, _range)
        else:
            geocode = None

        # _result_typeを指定するときはgeocodeを指定できない
        # _result_typeを有効にするとなにもでない??
        result = self.api.search(q=_word,
                                 count=_count,
                                 lang=_lang,
                                 geocode=geocode)

        if len(result) == 0:
            # 検索してなにも引っかからなかったとき
            jtalk.jtalk("検索しましたが、該当するものはありませんでした")
        else:
            # 得られた結果の分だけ結果を出力する
            jtalk.jtalk("{}個のツイートがヒットしました".format(len(result)))
            #            jtalk.jtalk("検索結果を読み上げます")
            for i, status in enumerate(reversed(result)):
                print('TWEET---%3d---' % (i + 1))
                print('広告がヒット'.format(status.user.notifications))
                if status.user.notifications == True: continue
                print('TWEET user name : {}'.format(status.user.name))
                print('TWEET text      : {}'.format(status.text))
                jtalk.jtalk("{}様より".format(status.user.name))
                jtalk.jtalk("{}".format(self.format_text(status.text)))
예제 #27
0
def play_news(wordlist):

    voice = "最新のニュースを流しますね。"
    print(voice)
    jtalk.jtalk(voice, jtalk.MEI_NORMAL)

    path = HOME_DIR + "/.cache/news.xml"
    mp3path = HOME_DIR + "/.cache/news.mp3"
    cmd = "wget https://www.nhk.or.jp/r-news/podcast/nhkradionews.xml -O " + path
    c = Popen(cmd.strip().split(" "))
    c.wait()

    tree = ET.parse(path)
    root = tree.getroot()
    mp3 = root[0][10][1].attrib['url']
    cmd = "wget " + mp3 + " -O " + mp3path
    c = Popen(cmd.strip().split(" "))
    c.wait()

    stop_all()
    cmd = LAUNCH_VLC_CMD + " " + mp3path
    Popen(cmd.strip().split(" "))

    return True
예제 #28
0
def measure():
    adc_val = adc.convertVolts(adc.readAdc(0))
    print("adc_volt=" + str(adc_val))
    if 0 < adc_val < 1.8:
        print("omoi")
        jtalk.jtalk("たくさん")
    elif 1.8 <= adc_val < 2.5:
        print("maamaa")
        jtalk.jtalk("はんぶんくらい にひゃくごじゅうカロリーぶん")
    else:
        print("karui")
        jtalk.jtalk("からっぽ ごひゃくかろりーくらい")
예제 #29
0
def play_repeat(wordlist):
    global player
    global is_player_playing
    if is_player_playing:
        if is_includes(wordlist, ComList.stop.value):
            player.repeat = Repeat.none
            jtalk.jtalk("リピート再生はオフです。", jtalk.MEI_NORMAL, True)
        elif is_includes(wordlist, ComList.playlist.value):
            player.random = Repeat.playlist
            jtalk.jtalk("プレイリストをリピート再生します。", jtalk.MEI_NORMAL, True)
        else:
            player.random = Repeat.song
            jtalk.jtalk("この曲をリピート再生します。", jtalk.MEI_NORMAL, True)
        return True

    return False
예제 #30
0
def main():
    GPIO.setmode(GPIO.BCM)
    GPIO.setup(PHOTO_BUTTON, GPIO.IN)
    GPIO.setup(EXIT_BUTTON, GPIO.IN)
    project_one = FaceApi(SUBSCRIPTION_KEY, 'project-one')
    users = Users('users.json')

    try:
        while True:
            if GPIO.input(PHOTO_BUTTON) == GPIO.HIGH:
                # TODO 連続して写真をとれるようにサブプロセス化
                take_photo()
                photo_path = './face.jpg'
                try:
                    detected_faceid = project_one.detect_face_local_image(
                        open(photo_path, 'rb'))
                    identified_person = project_one.identify_person(
                        detected_faceid)
                    # identified_person_name = project_one.get_person_name_by_personId(identified_person[0]['personId'])
                    name = users.get_name_by_person_id(
                        identified_person[0]['personId'])
                except (IndexError, KeyError):
                    # FaceApiで顔を判定できなかった場合
                    jtalk('もう一度撮影してください')
                    name = ''
                if name:
                    users.change_in_room_state(name)
                    users.dump_json()
                    ruby = users.get_ruby(name)
                    print(create_message(name, users.get_in_room(name)))
                    send_message(create_message(name, users.get_in_room(name)))
                    if name == 'やーしょー':
                        yasho_voice()
                    else:
                        jtalk(create_message(ruby, users.get_in_room(name)))
            elif GPIO.input(EXIT_BUTTON) == GPIO.HIGH:
                users.all_exit()
                users.dump_json()
                message = 'だれもいなくなった'
                jtalk(message)
                send_message(message)
    except KeyboardInterrupt:
        print('KeyboardInterrupt')
    finally:
        GPIO.cleanup()  # GPIO初期化
예제 #31
0
파일: main.py 프로젝트: i80486dx2/ent
def listen_talk():
    text, client, streaming_config = gcp_talk.get_talk()
    with text as stream:

        while not stream.closed:

            stream.audio_input = []

            audio_generator = stream.generator()

            requests = (speech.StreamingRecognizeRequest(audio_content=content)
                        for content in audio_generator)

            responses = client.streaming_recognize(streaming_config, requests)

            # Now, put the transcription responses to use.
            my_added_text = gcp_talk.listen_print_loop(responses, stream)
            if my_added_text is None:
                pass
            elif "おはよう" in my_added_text:
                ser.write('3'.encode('utf-8'))
                display("Test", "Speaking", "おはようございます")
                jtalk.jtalk("おはようございます")
            elif ("あなたの名前は" in my_added_text) or ("名前" in my_added_text):
                ser.write('3'.encode('utf-8'))
                display("Test", "Speaking", "おはようございますよろしくお願いします。")
                jtalk.jtalk("エントと申します。よろしくお願いします。")
            else:
                ser.write('3'.encode('utf-8'))
                reply = send_message(my_added_text)
                display("Test", "Speaking", reply)
                jtalk.jtalk(reply)

            if stream.result_end_time > 0:
                stream.final_request_end_time = stream.is_final_end_time
            stream.result_end_time = 0
            stream.last_audio_input = []
            stream.last_audio_input = stream.audio_input
            stream.audio_input = []
            stream.restart_counter = stream.restart_counter + 1
            stream.new_stream = True
            ser.write('2'.encode('utf-8'))
예제 #32
0
 def say(self, params, username, filters):
     jtalk(' '.join(params))