Beispiel #1
0
def sensorCheck():
    if get_distance() < actionDistance:
        bezelie.centering()
        subprocess.call(
            '/home/pi/aquestalkpi/AquesTalkPi -s 120 "ろくおんかいし" | aplay',
            shell=True)
        camera.stop_preview()
        #   Recording
        print("recording...")
        stream = audio.open(
            format=FORMAT,
            channels=CHANNELS,
            rate=RATE,
            input=True,  #入力モード
            input_device_index=0,  #デバイスのインデックス番号
            frames_per_buffer=CHUNK)
        frames = []
        for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
            data = stream.read(CHUNK)
            frames.append(data)
        print("finished recording")
        stream.stop_stream()  # streamを停止
        stream.close()  # streamを開放
        waveFile = wave.open(WAVE_OUTPUT_FILENAME, 'wb')  # wavファイルをwbモードで開く
        waveFile.setnchannels(CHANNELS)
        waveFile.setsampwidth(audio.get_sample_size(FORMAT))
        waveFile.setframerate(RATE)
        waveFile.writeframes(b''.join(frames))
        waveFile.close()

        #   Play
        subprocess.call('aplay test.wav', shell=True)

        camera.start_preview()
Beispiel #2
0
def sensorCheck():
    global photoNo
    if get_distance() < actionDistance:
        bezelie.centering()
        subprocess.call(
            '/home/pi/aquestalkpi/AquesTalkPi -s 120 "こんにちわー" | aplay',
            shell=True)
        camera.stop_preview()
        camera.capture('/home/pi/Pictures/image' + str(photoNo) + '.jpg')
        photoNo += 1
        #      camera.capture_continuous('image{counter}.jpg')
        camera.start_preview()
        time.sleep(0.5)
Beispiel #3
0
def sensorCheck():
    if get_distance() < actionDistance:
        bezelie.centering()
        #    time.sleep(3)
        #    subprocess.call('/home/pi/aquestalkpi/AquesTalkPi -s 120 "ろくおんかいし" | aplay', shell=True)
        #    time.sleep(3)
        #    camera.stop_preview()
        #   Recording
        print("recording...")
        audio = pyaudio.PyAudio()  #pyaudioのインスタンスaudioを生成
        stream = audio.open(
            format=FORMAT,
            channels=CHANNELS,
            rate=RATE,
            input=True,  #入力モード
            input_device_index=0,  #デバイスのインデックス番号
            frames_per_buffer=CHUNK)
        frames = []
        for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
            data = stream.read(CHUNK)
            frames.append(data)
        print("finished recording")
        #    time.sleep(1)
        stream.stop_stream()  # streamを停止
        stream.close()  # streamを開放
        audio.terminate()  # インスタンスaudioを終了
        waveFile = wave.open(WAVE_OUTPUT_FILENAME, 'wb')  # wavファイルをwbモードで開く
        waveFile.setnchannels(CHANNELS)
        waveFile.setsampwidth(audio.get_sample_size(FORMAT))
        waveFile.setframerate(RATE)
        waveFile.writeframes(b''.join(frames))
        waveFile.close()

        #   voice recognition
        r = requests.post(url, files=files)
        message = r.json()['text']
        print message

        #   tweet
        #    api.update_status(status = "hello" )
        api = tweepy.API(auth)
        api.update_status(status=message)
        time.sleep(1)
Beispiel #4
0
def pygame_imshow(array):
    b, g, r = cv2.split(array)
    rgb = cv2.merge([r, g, b])
    surface1 = pygame.surfarray.make_surface(rgb)
    surface2 = pygame.transform.rotate(surface1, -90)
    surface3 = pygame.transform.flip(surface2, True, False)
    screen.blit(surface3, (0, 0))
    pygame.display.flip()


cascade_path = "/usr/share/opencv/haarcascades/haarcascade_frontalface_alt.xml"
cascade = cv2.CascadeClassifier(cascade_path)

# Get Started
bezelie.centering()
yaw = 0
delta = 1

# Main Loop
with picamera.PiCamera() as camera:
    with picamera.array.PiRGBArray(camera) as stream:
        camera.resolution = (800, 480)  # ディスプレイの解像度に合わせてください。
        camera.hflip = True  # 上下反転。不要なら削除してください。
        camera.vflip = True  # 左右反転。不要なら削除してください。
        sleep(1)

        while True:
            # stream.arrayにBGRの順で映像データを格納
            camera.capture(stream, 'bgr', use_video_port=True)
            # グレースケール画像に変換しgrayに代入
Beispiel #5
0
def main():

    # TCPクライアントを作成し接続
    client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    client.connect(('localhost', 10500))
    bezelie.centering()
    sf = client.makefile('')

    reHungry = re.compile(u'WHYPO WORD="空いた" .* CM="(\d\.\d*)"')
    reHow = re.compile(u'WHYPO WORD="いくつ" .* CM="(\d\.\d*)"')
    reEat = re.compile(u'WHYPO WORD="食べていい" .* CM="(\d\.\d*)"')
    reDir = re.compile(u'WHYPO WORD="傾向" .* CM="(\d\.\d*)"')
    reLove = re.compile(u'WHYPO WORD="好き" .* CM="(\d\.\d*)"')
    reWeight = re.compile(u'WHYPO WORD="体重" .* CM="(\d\.\d*)"')
    reBezelie = re.compile(u'WHYPO WORD="ベゼリー" .* CM="(\d\.\d*)"')
    client = getWithingsClient()

    try:
        while True:
            line = sf.readline().decode('utf-8').strip("\n\r")
            word0 = reHungry.search(line)
            word1 = reHow.search(line)
            word2 = reEat.search(line)
            word3 = reDir.search(line)
            word4 = reLove.search(line)
            word5 = reWeight.search(line)
            word6 = reBezelie.search(line)
            if word0:
                print answer[0]
                if float(word0.group(1)) > 0.9:
                    m = MoveThread()
                    measureGrp = client.get_measures(limit=14)
                    weightRec = 0
                    weightOld = 0
                    for measure in measureGrp:
                        if weightRec == 0:
                            if measure.get_measure(1):
                                weightRec = measure.get_measure(1)
                        if measure.get_measure(1):
                            weightOld = measure.get_measure(1)
                    if weightRec > weightOld:
                        talk("体重増え気味だから、我慢しよう")
                    elif weightRec + 1 < weightOld:
                        talk("最近、体重凄く減ってるから無理しすぎないで、ちょっとだけ食べようか")
                    else:
                        talk("安心しちゃダメ、食べたらまた太るよ")
                    sleep(6)
                    m.stop()
            elif word1:
                print answer[1]
                if float(word1.group(1)) > 0.9:
                    with picamera.PiCamera() as camera:
                        camera.resolution = (800, 480)
                        camera.rotation = 180
                        camera.start_preview()
                        sleep(1)
                        bezelie.centering()
                        talk("どれどれ、写真撮って確認するよ")
                        camera.stop_preview()
                        camera.capture('/home/pi/bezelie/' + 'detect.jpg')
                        sleep(2)
                        m = MoveThread()
                        recogData = generatejson.imageRecog('image_detect.txt')
                        #you must vision API key
                        response = requests.post(
                            url=
                            'https://vision.googleapis.com/v1/images:annotate?key=',
                            data=recogData,
                            headers={'Content-Type': 'application/json'})
                        m.stop()
                        jsondata = json.loads(response.text)
                        print jsondata["responses"][0]["labelAnnotations"][0][
                            "mid"]
                        if jsondata["responses"][0]["labelAnnotations"][0][
                                "mid"] == "/m/06ht1":
                            talk("カップラーメンだね" + "346キロカロリーだよ")
                        else:
                            talk("カロリーメートだね" + "200キロカロリーだよ")
            elif word2:
                print answer[2]
                if float(word2.group(1)) > 0.9:
                    with picamera.PiCamera() as camera:
                        camera.resolution = (800, 480)
                        camera.rotation = 180
                        camera.start_preview()
                        sleep(1)
                        bezelie.centering()
                        talk("どれどれ、写真撮って確認するよ")
                        camera.stop_preview()
                        camera.capture('/home/pi/bezelie/' + 'detect.jpg')
                        sleep(2)
                        m = MoveThread()
                        recogData = generatejson.imageRecog('image_detect.txt')
                        #you must vision API key
                        response = requests.post(
                            url=
                            'https://vision.googleapis.com/v1/images:annotate?key=',
                            data=recogData,
                            headers={'Content-Type': 'application/json'})
                        m.stop()
                        jsondata = json.loads(response.text)
                        m = MoveThread()
                        measureGrp = client.get_measures(limit=14)
                        weightRec = 0
                        weightOld = 0
                        for measure in measureGrp:
                            if weightRec == 0:
                                if measure.get_measure(1):
                                    weightRec = measure.get_measure(1)
                            if measure.get_measure(1):
                                weightOld = measure.get_measure(1)
                        sleep(1)
                        m.stop()
                        if jsondata["responses"][0]["labelAnnotations"][0][
                                "mid"] == "/m/06ht1":
                            if weightRec > weightOld:
                                talk("体重が増加傾向にあるのにカップラーメン食べちゃダメだよ")
                            else:
                                talk("体重は減少傾向だけど、カップラーメン食べすぎないほうがいいんじゃないかな")
                        else:
                            if weightRec > weightOld:
                                talk("体重が増加傾向だけど、カロリーメートで少し不足分を補ったほうがいいかもね")
                            else:
                                talk("体重が減少傾向だね、カロリーメートぐらいなら大丈夫かもね")
            elif word3:
                print answer[3]
                if float(word3.group(1)) > 0.9:
                    m = MoveThread()
                    measureGrp = client.get_measures(limit=14)
                    weightRec = 0
                    weightOld = 0
                    for measure in measureGrp:
                        if weightRec == 0:
                            if measure.get_measure(1):
                                weightRec = measure.get_measure(1)
                        if measure.get_measure(1):
                            weightOld = measure.get_measure(1)
                    sleep(1)
                    m.stop()
                    diff = fabs(weightRec - weightOld)
                    if weightRec > weightOld:
                        talk("体重一週間前よりも" + str(diff) + "kg増え気味だね")
                    else:
                        talk("体重一週間前よりも" + str(diff) + "kg減ってるね")
            elif word4:
                print answer[4]
                if float(word4.group(1)) > 0.9:
                    talk("ありがとう、僕も好きだよ")

            elif word5:
                print answer[5]
                if float(word5.group(1)) > 0.9:
                    m = MoveThread()
                    measureGrp = client.get_measures(limit=14)
                    weightRec = 0
                    weightOld = 0
                    for measure in measureGrp:
                        if weightRec == 0:
                            if measure.get_measure(1):
                                weightRec = measure.get_measure(1)
                        if measure.get_measure(1):
                            weightOld = measure.get_measure(1)
                    sleep(2)
                    m.stop()
                    talk("現在" + str(weightRec) + "kgだね")
                    sleep(1)
                    talk("ちなみに、僕の体重はりんご一個分だよ")
            elif word6:
                print answer[6]
                if float(word6.group(1)) > 0.9:
                    random128bitdata = os.urandom(16)
                    if int(binascii.hexlify(random128bitdata), 16) % 2 == 0:
                        talk("どうしたの")
                    else:
                        talk("なーにー")
            else:
                pass
    except KeyboardInterrupt:
        print "KeyboardInterrupt occured."
        client.close()