Пример #1
0
def process_event(event):
    """Pretty prints events.

    Prints all events that occur with two spaces between each new
    conversation and a single space between turns of a conversation.

    Args:
        event(event.Event): The current event to process.
    """
    if event.type == EventType.ON_CONVERSATION_TURN_STARTED:
        print()
        pixels.wakeup()

    print(event)

    if event.type == EventType.ON_END_OF_UTTERANCE:
        pixels.think()

    if event.type == EventType.ON_RESPONDING_STARTED:
        pixels.speak()

    if event.type == EventType.ON_CONVERSATION_TURN_FINISHED:
        pixels.off()
        if event.args and event.args['with_follow_on_turn']:
            pixels.listen()
Пример #2
0
    def record():
        stream.start_stream()
        pixels.wakeup()
        print("* recording")
        frames = []
        for i in range(0, int(RESPEAKER_RATE / CHUNK * RECORD_SECONDS)):
            data = stream.read(CHUNK)
            frames.append(data)
        print("* done recording")
        stream.stop_stream()
        print("start to send to baidu")
	pixels.off()
        # audio_data should be raw_data
        text = baidu.server_api(generator_list(frames))
        if text:
            try:
                text = json.loads(text)
                for t in text['result']:
                    print(t)
                    return(t)
            except KeyError: 
                return("get nothing")
        else:
            print("get nothing")
            return("get nothing")
Пример #3
0
def on_message(client, userdata, msg):
    #print("Message received on topic {0}: {1}".format(msg.topic, msg.payload))
    if "hotword" in msg.topic:
        print("==> hotword message {0}: {1}".format(msg.topic, msg.payload))
        if "detected" in msg.topic:
            pixels.wakeup()
        if msg.topic == "hermes/hotword/toggleOn":
            pixels.off()
    if "asr" in msg.topic:
        print("==> asr message {0}: {1}".format(msg.topic, msg.payload))
        if "textCaptured" in msg.topic:
            pixels.think()
    if "nlu" in msg.topic:
        print("==> nlu message {0}: {1}".format(msg.topic, msg.payload))
    if "tts" in msg.topic:
        print("==> tts message {0}: {1}".format(msg.topic, msg.payload))
        if msg.topic == "hermes/tts/say":
            pixels.speak()
    elif "intent" in msg.topic:
        print("==> intent message {0}: {1}".format(msg.topic, msg.payload))
        #intent_topic = msg.topic.split("/") 
        payload = json.loads(msg.payload)
        if "intent" in payload:
            name = payload["intent"]["intentName"]
            slots = payload["slots"]
            print("====> intent {0} detected with slots {1}".format(name, slots))   
Пример #4
0
def process_event(event, device_id):
    """Pretty prints events.

    Prints all events that occur with two spaces between each new
    conversation and a single space between turns of a conversation.

    Args:
        event(event.Event): The current event to process.
        device_id(str): The device ID of the new instance.
    """
    if event.type == EventType.ON_CONVERSATION_TURN_STARTED:
        print()
        pixels.wakeup()

    print(event)

    if event.type == EventType.ON_END_OF_UTTERANCE:
        pixels.think()

    if event.type == EventType.ON_RESPONDING_STARTED:
        pixels.speak()

    if event.type == EventType.ON_CONVERSATION_TURN_FINISHED:
        if event.args and event.args['with_follow_on_turn']:
            pixels.listen()
        else:
            pixels.off()
            print()

    if event.type == EventType.ON_DEVICE_ACTION:
        for command, params in process_device_actions(event, device_id):
            print('Do command', command, 'with params', str(params))
Пример #5
0
def main(detect="", photo_file="", trans_lang=""):
    pixels.wakeup()
    if photo_file == "":
    pixels.off()

    credentials = GoogleCredentials.get_application_default()
    service = discovery.build('vision', 'v1', credentials=credentials,
            discoveryServiceUrl=DISCOVERY_URL)

    with open(photo_file, 'rb') as image:
        image_content = base64.b64encode(image.read())
        if detect == "": #No parameter
          DETECT = default_detect
        else: #Paremater specified
          DETECT = [detect.upper()]              elif DET in ["TEXT"]:
                tlocale = res["locale"]
                result += res["description"]+", "
                bounds += res["boundingPoly"]["vertices"]

              elif DET in ["FACE"]:
                if res["joyLikelihood"] == "VERY_LIKELY" or res["joyLikelihood"] == "LIKELY":
                  result += "Smile "
                if res["angerLikelihood"] == "VERY_LIKELY" or res["angerLikelihood"] == "LIKELY":
                  result += "Angry "
                if res["headwearLikelihood"] == "VERY_LIKELY" or res["headwearLikelihood"] == "LIKELY":
                  rsult += "Capped "
              pixels.off()
          except:
Пример #6
0
def main():

    mic = MicrophoneRecorder(RATE, 100 * CHUNK)
    mic.start()

    ch = mic.get_frames()

    while (1):
        ch = mic.get_frames()
        if (len(ch) > 0):

            t = range(0, len(ch))

            ch = ''.join(ch)
            ted = np.fromstring(ch, np.int16)

            ch = list_splice(ted, 4)

            ch1 = ch[:][0]  # Bottom Left
            ch2 = ch[:][1]  # Top Left
            ch3 = ch[:][2]  # Top Right
            ch4 = ch[:][3]  # Bottom Right

            ch1 = butter_bandpass_filter(ch1, LOW_CUT, HIGH_CUT, RATE, ORDER)
            ch2 = butter_bandpass_filter(ch2, LOW_CUT, HIGH_CUT, RATE, ORDER)
            ch3 = butter_bandpass_filter(ch3, LOW_CUT, HIGH_CUT, RATE, ORDER)
            ch4 = butter_bandpass_filter(ch4, LOW_CUT, HIGH_CUT, RATE, ORDER)

            temp = []
            for i in range(0, len(ch1)):
                temp.append(ch1[i])
                temp.append(ch2[i])
                temp.append(ch3[i])
                temp.append(ch4[i])

            direction = get_direction(np.array(temp))
            pixels.wakeup(direction)

            a1 = np.average((np.absolute(ch1)))
            a2 = np.average((np.absolute(ch2)))
            a3 = np.average((np.absolute(ch3)))
            a4 = np.average((np.absolute(ch4)))

            if (a1 > a2 and a1 > a3 and a1 > a4):
                print("Yes")
            else:
                print("No")

            print("Bottom Left: %f" % a1)
            print("Top Left: %f" % a2)
            print("Top Right : %f" % a3)
            print("Bottom Right: %f" % a4)

            #plot_4ch(ch1, ch2, ch3, ch4)
        else:
            pass

    mic.close()
def main():
    global assistant

    pixels.wakeup()

    #    GPIO.setmode(GPIO.BCM)
    #    GPIO.setup(PIN_LAMP, GPIO.OUT)
    #    for p in PIN_BUTTON:
    #        GPIO.setup(p, GPIO.IN, GPIO.PUD_UP)
    #        if p == 13: # toggle button
    #            GPIO.add_event_detect(p, GPIO.BOTH, callback=procButton, \
    #                    bouncetime=BOUNCE_MSEC)
    #        else:
    #            GPIO.add_event_detect(p, GPIO.FALLING, callback=procButton, \
    #                    bouncetime=BOUNCE_MSEC)

    parser = argparse.ArgumentParser(
        formatter_class=argparse.RawTextHelpFormatter)
    parser.add_argument('--credentials',
                        type=existing_file,
                        metavar='OAUTH2_CREDENTIALS_FILE',
                        default=os.path.join(os.path.expanduser('~/.config'),
                                             'google-oauthlib-tool',
                                             'credentials.json'),
                        help='Path to store and read OAuth2 credentials')
    parser.add_argument('--device_model_id',
                        type=str,
                        metavar='DEVICE_MODEL_ID',
                        required=True,
                        help='The device model ID registered with Google')
    parser.add_argument(
        '--project_id',
        type=str,
        metavar='PROJECT_ID',
        required=False,
        help='The project ID used to register device instances.')
    parser.add_argument('-v',
                        '--version',
                        action='version',
                        version='%(prog)s ' + Assistant.__version_str__())

    args = parser.parse_args()
    with open(args.credentials, 'r') as f:
        credentials = google.oauth2.credentials.Credentials(token=None,
                                                            **json.load(f))

    with Assistant(credentials, args.device_model_id) as assistant:
        events = assistant.start()

        print('device_model_id:', args.device_model_id + '\n' + 'device_id:',
              assistant.device_id + '\n')

        if args.project_id:
            register_device(args.project_id, credentials, args.device_model_id,
                            assistant.device_id)

        for event in events:
            process_event(event, assistant.device_id)
Пример #8
0
    def detect(self, keywords=None):
        self.decoder.end_utt()
        self.decoder.start_utt()
        pixels.off()

        self.detect_history.clear()

        self.detect_queue.queue.clear()
        self.status |= self.detecting_mask
        self.stream.start_stream()
        result = None
        logger.info('Start detecting')
        got = 0
        while not self.quit_event.is_set():
            size = self.detect_queue.qsize()
            if size > 4:
                logger.info('Too many delays, {} in queue'.format(size))

            data = self.detect_queue.get()
            self.detect_history.append(data)
            self.decoder.process_raw(data, False, False)

            hypothesis = self.decoder.hyp()
            if hypothesis:
                logger.info('Detected {}'.format(hypothesis.hypstr))
                if collecting_audio != 'no':
                    logger.debug(collecting_audio)
                    save_as_wav(b''.join(self.detect_history),
                                hypothesis.hypstr)
                self.detect_history.clear()
                if keywords:
                    for keyword in keywords:
                        if hypothesis.hypstr.find(keyword) >= 0:
                            result = hypothesis.hypstr
                            print(result)
                            pixels.wakeup()
                            got = 1
                            break
                    if got == 1:
                        break
                    else:
                        self.decoder.end_utt()
                        self.decoder.start_utt()
                        self.detect_history.clear()
                        pixels.off()
                else:
                    result = hypothesis.hypstr
                    print(result)
                    break

        self.status &= ~self.detecting_mask
        self.stop()

        return result
Пример #9
0
def process_event(event, device_id):
    """Pretty prints events.

    Prints all events that occur with two spaces between each new
    conversation and a single space between turns of a conversation.

    Args:
        event(event.Event): The current event to process.
        device_id(str): The device ID of the new instance.
    """
    if event.type == EventType.ON_CONVERSATION_TURN_STARTED:
        print()
        pixels.wakeup()

    print(event)

    if event.type == EventType.ON_END_OF_UTTERANCE:
        pixels.think()

    if event.type == EventType.ON_RESPONDING_STARTED:
        pixels.speak()

    if event.type == EventType.ON_CONVERSATION_TURN_FINISHED:
        if event.args and event.args['with_follow_on_turn']:
            pixels.listen()
        else:
            pixels.off()
            print()

    if event.type == EventType.ON_DEVICE_ACTION:
        for command, params in process_device_actions(event, device_id):
            print('Do command', command, 'with params', str(params))
            if command == "com.example.commands.Shutters":
                action = ''
                shutters_url = 'http://10.0.0.31:8181/json.htm'
                shutters_header = {'Authorization': 'Basic *******************'}
                if params['status'] == "CLOSE":
                    print('Closing shutters')
                    action = 'On'
                    shutters_params = {'type': 'command', 'param': 'switchlight', 'idx': '13', 'switchcmd': action }
                    r = requests.get(shutters_url, params=shutters_params, headers=shutters_header)
                    print(r.url)
                    print(r.status_code)
                if params['status'] == "OPEN":
                    print('Opening shutters')
                    action = 'Off'
                    shutters_params = {'type': 'command', 'param': 'switchlight', 'idx': '13', 'switchcmd': action }
                    r = requests.get(shutters_url, params=shutters_params, headers=shutters_header)
                    print(r.url)
                    print(r.status_code)
                    print(r.headers)
Пример #10
0
    def on_detected(keyword):
        position = doa.get_direction()
        pixels.wakeup(position)
        print('detected {} at direction {}'.format(keyword, position))
        if position <= 90:
            ser.write(b"1000\n")
        elif position > 90 and position <= 180:
            ser.write(b"0100\n")
        elif position > 180 and position <= 270:
            ser.write(b"0010\n")
        else:
            ser.write(b"0001\n")

        vibrate_time = 0.5
        time.sleep(vibrate_time)
        ser.write(b"0000\n")
Пример #11
0
def ActivateLeds(LedOn):

    if LedOn == True:
        pixels.wakeup()
        time.sleep(3)
        pixels.think()
        time.sleep(3)
        pixels.speak()
        time.sleep(6)
        pixels.off()
        time.sleep(3)

    else:
        LedOn = False
        pixels.off()
        time.sleep(1)
Пример #12
0
 def record_time_stamp(self):
     while True:
         t_tuple = None
         if not DIRECTIONS_QUEUE.empty():
             t_tuple = DIRECTIONS_QUEUE.get()
             frames = t_tuple[0]
             time_recorded = t_tuple[1]
             try:
                 direction = self.get_direction_helper(frames)
                 pixels.wakeup(direction)
                 print direction
                 self.record_time_stamp_helper(
                     self.convert_time(time_recorded), direction)
             except:
                 print 'could not get direction'
                 continue
         else:
             time.sleep(5)
Пример #13
0
    def on_detected(keyword):
        position = doa.get_direction()
        pixels.wakeup(position)
        print('detected {} at direction {}'.format(keyword, position))
        if position >= 30 and position <= 180:
            pwm.setPWM(0, 0, 175)
            pwm.setPWM(1, 0, 500)
        elif position > 180 and position <= 330:
            pwm.setPWM(0, 0, 560)
            pwm.setPWM(1, 0, 500)
        elif position > 330 or position < 30:
            pwm.setPWM(0, 0, 370)
            pwm.setPWM(1, 0, 6200)
        else:
            pwm.setPWM(0, 0, 370)
            pwm.setPWM(1, 0, 640)

        #talkassist.os.system("espeak 'may i help you'")
        print("How may I help you?")
        print("call google assistant here, delete this line.")
Пример #14
0
def test_4mic():
    import signal
    import time
    from pixels import Pixels, pixels

    is_quit = threading.Event()

    def signal_handler(sig, num):
        is_quit.set()
        print('Quit')

    signal.signal(signal.SIGINT, signal_handler)

    with MicArray(16000, 4, 16000) as mic:
        for chunk in mic.read_chunks():
            direction = mic.get_direction(chunk)
            print(int(direction))
            pixels.wakeup(direction)

            if is_quit.is_set():
                break
Пример #15
0
    def run(self):
        global moving, last_movement_timestamp, doa_valid
        src = Source(rate=16000, channels=4, frames_size=320)
        #ch1 = ChannelPicker(channels=4, pick=1)
        doa = DOA(rate=16000)
        #src.link(ch1)
        src.link(doa)
        src.recursive_start()

        self.running = True
        while self.running:
            try:
                time.sleep(1)
                current_timestamp = datetime.datetime.now()
                if doa_valid == True and (
                    (current_timestamp - last_movement_timestamp).seconds > 2):
                    position, amplitute = doa.get_direction()
                    if amplitute > 2000:
                        pixels.wakeup(position)
                        print amplitute, position
                        if position > 0 and position < 180:
                            pivot_right()
                            time.sleep(position / 200)
                            stop()
                        elif position >= 180 and position < 360:
                            pivot_left()
                            position = 360 - position
                            time.sleep(position / 200)
                            stop()
                        time.sleep(2)
                    else:
                        pixels.speak()
                else:
                    pixels.think()
            except:
                print sys.exc_info()

        src.recursive_stop()
Пример #16
0
 def on_detected(keyword):
     position = doa.get_direction()
     pixels.wakeup(position)
     print('detected {} at direction {}'.format(keyword, position))
     src.stream.start()
     print("* recording")
     frames = []
     for i in range(0, int(RESPEAKER_RATE / CHUNK * RECORD_SECONDS)):
         data = stream.read(CHUNK)
         frames.append(data)
     print("* done recording")
     src.stream.stop()
     print("start to send to baidu")
     # audio_data should be raw_data
     text = baidu.server_api(generator_list(frames))
     if text:
         try:
             text = json.loads(text)
             for t in text['result']:
                 print(t)
         except KeyError: 
             print("get nothing")
     else:
         print("get nothing")
Пример #17
0
def process_event(event, doa):
    """Pretty prints events.
    Prints all events that occur with two spaces between each new
    conversation and a single space between turns of a conversation.
    Args:
        event(event.Event): The current event to process.
    """
    global cam_flag

    set_angle(90)

    if event.type == EventType.ON_NO_RESPONSE:
        cam_flag = True

    if event.type == EventType.ON_CONVERSATION_TURN_STARTED:
        print()
        GPIO.output(led_gnd_pin, True)
        if not doa is None:
            direction = doa.get_direction()
            print('detected voice at direction {}'.format(direction))
            pixels.wakeup(direction)
            if direction > 270 or direction < 90:
                set_angle(20)
            else:
                set_angle(175)

        if cam_flag:
            tstamp = str(int(time.time()))
            imgfile = "one-shot-" + tstamp + ".jpg"
            detfile = "det-shot-" + tstamp + ".jpg"
            camera.capture(imgfile)

            img = cv2.imread(imgfile)
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            faces = faceCascade.detectMultiScale(gray,
                                                 scaleFactor=1.2,
                                                 minNeighbors=5,
                                                 minSize=(20, 20))

            print('detected ' + str(len(faces)) + ' faces')

            for (x, y, w, h) in faces:
                cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
                roi_gray = gray[y:y + h, x:x + w]
                roi_color = img[y:y + h, x:x + w]

                eyes = eyeCascade.detectMultiScale(
                    roi_gray,
                    scaleFactor=1.5,
                    minNeighbors=5,
                    minSize=(5, 5),
                )

                for (ex, ey, ew, eh) in eyes:
                    cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh),
                                  (0, 255, 0), 2)

                smile = smileCascade.detectMultiScale(
                    roi_gray,
                    scaleFactor=1.5,
                    minNeighbors=15,
                    minSize=(25, 25),
                )

                for (xx, yy, ww, hh) in smile:
                    cv2.rectangle(roi_color, (xx, yy), (xx + ww, yy + hh),
                                  (0, 255, 0), 2)

            cv2.imwrite(detfile, img)


#            networking.upload_images("192.168.1.69", 8888, ["one-shot.jpg"])
#            cam_flag = False

    print(event)

    if ((event.type == EventType.ON_CONVERSATION_TURN_FINISHED and event.args
         and not event.args['with_follow_on_turn'])
            or (event.type == EventType.ON_CONVERSATION_TURN_TIMEOUT)
            or (event.type == EventType.ON_NO_RESPONSE)):
        print()
        GPIO.output(led_gnd_pin, False)
        pixels.off()
Пример #18
0
import time
from pixels import Pixels, pixels
from alexa_led_pattern import AlexaLedPattern
from google_home_led_pattern import GoogleHomeLedPattern

if __name__ == '__main__':

    pixels.pattern = GoogleHomeLedPattern(show=pixels.show)

    while True:

        try:
            pixels.wakeup()
            time.sleep(3)
            pixels.think()
            time.sleep(3)
            pixels.speak()
            time.sleep(6)
            pixels.off()
            time.sleep(3)
        except KeyboardInterrupt:
            break


    pixels.off()
    time.sleep(1)
import time
from pixels import Pixels, pixels
from alexa_led_pattern import AlexaLedPattern
from google_home_led_pattern import GoogleHomeLedPattern

if __name__ == '__main__':

    pixels.pattern = GoogleHomeLedPattern(show=pixels.show)

    while True:

        try:
            print('Wakekup')
            pixels.wakeup()
            time.sleep(3)
            print('think')
            pixels.think()
            time.sleep(3)
            print('speak')
            pixels.speak()
            time.sleep(6)
            print('off')
            pixels.off()
            time.sleep(3)
        except KeyboardInterrupt:
            break


    pixels.off()
    time.sleep(1)
Пример #20
0
 def on_detected(keyword):
     position = doa.get_direction()
     pixels.wakeup(position)
     print('detected {} at direction {}'.format(keyword, position))
Пример #21
0
def onMessage(client, userData, message):
    global lang

    intent = message.topic
    payload = json.loads(message.payload)

    if intent == HERMES_ON_HOTWORD:
        last_hotword = utils.read_file("hotword.txt")
        current_hotword = payload['modelId'].encode('utf-8')
        if last_hotword != current_hotword:
            utils.write_to_file("hotword.txt", current_hotword)

        if settings.USE_LEDS:
            pixels.wakeup()
        return

    elif intent == HERMES_SAY:
        if settings.USE_LEDS:
            pixels.speak()
        return

    elif intent == HERMES_CAPTURED:
        if settings.USE_LEDS:
            pixels.think()
        return

    elif intent == HERMES_START_LISTENING:
        if settings.USE_LEDS:
            pixels.listen()
        return

    elif intent == HERMES_HOTWORD_TOGGLE_ON:
        if settings.USE_LEDS:
            pixels.off()
        return

    global recipe, currentStep, timers, confirm, sessionId, product, tipIndex, fromIntent

    sessionId = payload['sessionId']

    ##### TODO stabiliser avant réactivation

    if intent == OPEN_RECIPE:
        print("INTENT : OPEN_RECIPE")
        if 'slots' not in payload:
            error(sessionId)
            return

        slotRecipeName = payload['slots'][0]['value']['value'].encode('utf-8')

        if recipe is not None and currentStep > 0:
            if confirm <= 0:
                confirm = 1
                endTalk(sessionId, text=lang['warningRecipeAlreadyOpen'])
                return
            else:
                for timer in timers:
                    timer.cancel()

                timers = {}
                confirm = 0
                currentStep = 0

        if any(product.lower() in ingredients
               for ingredients in tips_list_from_paprika):
            recipe_nb = len(tips_list_from_paprika[product.lower()])
            if recipe_nb == 1:
                for recipe in tips_list_from_paprika[product.lower()]:
                    continueSession(sessionId,
                                    "j'ai trouvé une astuce: " + recipe +
                                    ". Tu veux faire ça ?",
                                    intents=['Pierrot-app:validateQuestion'])
            elif recipe_nb == 2:
                askForTwoTips(getTipList)
        else:
            endTalk(sessionId, text=lang['noTipsForProduct'])
        fromIntent = "OPEN_RECIPE"

    elif intent == NEXT_STEP:
        print("INTENT : NEXT_STEP")
        if recipe is None:
            endTalk(sessionId, text=lang['sorryNoRecipeOpen'])
        else:
            if str(currentStep + 1) not in recipe['steps']:
                endTalk(sessionId, text=lang['recipeEnd'])
            else:
                currentStep += 1
                step = recipe['steps'][str(currentStep)]

                ask = False
                if type(step) is dict and currentStep not in timers:
                    ask = True
                    step = step['text']

                endTalk(sessionId, text=lang['nextStep'].format(step))
                if ask:
                    say(text=lang['timerAsk'])
        fromIntent = "NEXT_STEP"

    elif intent == INGREDIENTS:
        print("INTENT : INGREDIENTS")
        if recipe is None:
            endTalk(sessionId, text=lang['sorryNoRecipeOpen'])
        else:
            ingredients = ''
            for ingredient in recipe['ingredients']:
                ingredients += u"{}. ".format(ingredient)

            endTalk(sessionId,
                    text=lang['neededIngredients'].format(ingredients))
        fromIntent = "INGREDIENTS"

    elif intent == PREVIOUS_STEP:
        print("INTENT : PREVIOUS_STEP")
        if recipe is None:
            endTalk(sessionId, text=lang['sorryNoRecipeOpen'])
        else:
            if currentStep <= 1:
                endTalk(sessionId, text=lang['noPreviousStep'])
            else:
                currentStep -= 1
                step = recipe['steps'][str(currentStep)]

                ask = False
                timer = 0
                if type(step) is dict and currentStep not in timers:
                    ask = True
                    timer = step['timer']
                    step = step['text']

                endTalk(sessionId, text=lang['previousStepWas'].format(step))
                if ask:
                    say(text=lang['hadTimerAsk'].format(timer))
        fromIntent = "PREVIOUS_STEP"

    elif intent == REPEAT_STEP:
        print("INTENT : REPEAT_STEP")
        if recipe is None:
            endTalk(sessionId, text=lang['sorryNoRecipeOpen'])
        else:
            if currentStep < 1:
                ingredients = ''
                for ingredient in recipe['ingredients']:
                    ingredients += u"{}. ".format(ingredient)

                endTalk(sessionId,
                        text=lang['neededIngredients'].format(ingredients))
            else:
                step = recipe['steps'][str(currentStep)]
                endTalk(sessionId, text=lang['repeatStep'].format(step))
        fromIntent = "REPEAT_STEP"

    elif intent == ACTIVATE_TIMER:
        print("INTENT : ACTIVATE_TIMER")
        if recipe is None:
            endTalk(sessionId, text=lang['noTimerNotStarted'])
        else:
            step = recipe['steps'][str(currentStep)]

            if type(step) is not dict:
                endTalk(sessionId, text=lang['notTimerForThisStep'])
            elif currentStep in timers:
                endTalk(sessionId, text=lang['timerAlreadyRunning'])
            else:
                timer = Timer(int(step['timer']),
                              onTimeUp,
                              args=[currentStep, step])
                timer.start()
                timers[currentStep] = timer
                endTalk(sessionId, text=lang['timerConfirm'])
        fromIntent = "ACTIVATE_TIMER"

    elif intent == GET_FOOD:
        print("INTENT : GET_FOOD")
        sayNoSession(lang['searching'])
        asTalk = False
        tipIndex = 1
        product = payload["slots"][0]["rawValue"]
        if lastIntent == "ASK_FOR_TIP" or getAssistant() == "marin":
            currentStep = 0
            readTipsProposition()
        else:
            continueSession(sessionId=sessionId,
                            text=lang['cookNowOrKeep'].format(product),
                            intents=['Pierrot-app:nowOrLater'])
        fromIntent = "GET_FOOD"

    elif intent == ASK_FOR_TIP:
        print("INTENT : ASK_FOR_TIP")
        if product in getTipList():
            currentStep = 0
            tipIndex = 1
            continueSession(sessionId=sessionId,
                            text=lang['tipFor'].format(product),
                            intents=[
                                'Pierrot-app:validateQuestion',
                                'Pierrot-app:invalidateQuestion'
                            ])
        else:
            continueSession(sessionId=sessionId,
                            text=lang['tipForWhat'],
                            intents=['Pierrot-app:getFoodRequest'])
        fromIntent = "ASK_FOR_TIP"

    # elif intent == GET_FOOD_COOK_NOW:
    # 	product = payload['slots'][0]['value']['value'].encode('utf-8')
    # 	if any(product.lower() in ingredients for ingredients in recipe_ingredients):
    # 		# endTalk(sessionId=sessionId, text=lang['startRecipe'].format(food), intents=['openRecipe'])
    # 		readRecipe(sessionId, product, payload)
    # 	else:
    # 		endTalk(sessionId, text=lang['recipeNotFound'])

    elif intent == COOK_NOW_OR_KEEP:
        print("INTENT : COOK_NOW_OR_KEEP")
        # if recipe is None:
        # 	endTalk(sessionId, text=lang['sorryNoRecipeOpen'])
        # else:
        readTipsProposition()
        fromIntent = "COOK_NOW_OR_KEEP"

    elif intent == VALIDATE_QUESTION:
        print("INTENT : VALIDATE_QUESTION")
        if recipe is None:
            endTalk(sessionId, text=lang['sorryNoRecipeOpen'])
        elif fromIntent == "ASK_FOR_TIP":
            readTipsProposition()
        else:
            if currentStep != 0:
                currentStep += 1
                step = recipe['steps'][str(currentStep)]

                ask = False
                if type(step) is dict and currentStep not in timers:
                    ask = True
                    step = step['text']

                endTalk(sessionId, text=lang['nextStep'].format(step))
            else:
                ingredients = ''
                for ingredient in recipe['ingredients']:
                    ingredients += u"{}, ".format(ingredient)

                endTalk(sessionId,
                        text=lang['neededIngredients'].format(ingredients))
        fromIntent = "VALIDATE_QUESTION"

    elif intent == INVALIDATE_QUESTION:
        print("INTENT : INVALIDATE_QUESTION")
        if recipe is None:
            endTalk(sessionId, text=lang['sorryNoRecipeOpen'])
        elif fromIntent == "GET_FOOD" or fromIntent == "INVALIDATE_QUESTION" or fromIntent == "COOK_NOW_OR_KEEP" or fromIntent == "VALIDATE_QUESTION":
            readTipsProposition()
        elif lastIntent == "ASK_FOR_TIP":
            continueSession(sessionId=sessionId,
                            text=lang['tipForWhat'],
                            intents=['Pierrot-app:getFoodRequest'])
        fromIntent = "INVALIDATE_QUESTION"

    elif intent == START_RECIPE:
        print("INTENT : START_RECIPE")
        if recipe is None:
            endTalk(sessionId, text=lang['sorryNoRecipeOpen'])
        else:
            currentStep += 1
            step = recipe['steps'][str(currentStep)]

            ask = False
            if type(step) is dict and currentStep not in timers:
                ask = True
                step = step['text']

            endTalk(sessionId, text=lang['firstStep'].format(step))
            if ask:
                say(text=lang['timerAsk'])
        fromIntent = "START_RECIPE"

    elif intent == CANCEL:
        if settings.USE_LEDS:
            pixels.off()
        error(sessionId)
        mqttClient.loop_stop()
        mqttClient.disconnect()
        running = False

    elif intent == JOKE:
        sayNoSession(
            "Je ne crois pas qu'il y ai de bons ou de mauvais assistant. ")
Пример #22
0
def detected_callback():
    print "收到了...."

    pixels.wakeup()
    time.sleep(3)
    pixels.off()
 def on_detected(keyword):
     direction = doa.get_direction()
     logging.info('detected {} at direction {}'.format(keyword, direction))
     pixels.wakeup(direction)
     alexa.listen()
Пример #24
0
def pixels_wakeup(client, userdata, msg):
  pixels.wakeup()
  time.sleep(3)
Пример #25
0
def main():
    recognizer = aiy.cloudspeech.get_recognizer()
    recognizer.expect_phrase('turn off the light')
    recognizer.expect_phrase('turn on the light')
    recognizer.expect_phrase('blink')
    recognizer.expect_phrase('repeat after me')

    button = aiy.voicehat.get_button()
    led = aiy.voicehat.get_led()
    aiy.audio.get_recorder().start()

    aiy.i18n.set_language_code(speech_lang)

    for i in range(3):
        pixels.wakeup()
        time.sleep(1)
        pixels.off()

    while True:
        print('Press the button and speak')
        pixels.wakeup()
        button.wait_for_press()

        while True:
            print('Listening...')

            bye_words = ['goodbye', 'good bye', 'see you', 'bye bye']
            pixels.think()
            text = recognizer.recognize()
            if not text:
                print('Sorry but please say again in ' + speech_lang)
            else:
                pixels.listen()
                print('Speech: ' + text)
                trans_text = translate_text(text, trans_lang)
                trans_text = trans_text.replace("&#39;", "")
                print('Trans: ' + trans_text)
                pixels.off()
                pixels.listen()
                if trans_lang in aiy_lang:
                    aiy.audio.say(trans_text, trans_lang)
                elif trans_lang == "ja-JP":
                    os.system(
                        '~/AIY-projects-python/src/aquestalkpi/AquesTalkPi -g {} {} | aplay -D plughw:{},{}'
                        .format(VOLUME, trans_text, CARD, DEVICE))
                else:
                    print("No lang to say")

                if 'turn on the light' in text:
                    led.set_state(aiy.voicehat.LED.ON)
                elif 'turn off the light' in text:
                    led.set_state(aiy.voicehat.LED.OFF)
                elif 'blink' in text:
                    led.set_state(aiy.voicehat.LED.BLINK)
                elif 'repeat after me' in text:
                    to_repeat = text.replace('repeat after me', '', 1)
                    aiy.audio.say(to_repeat)

                for b in bye_words:
                    if text.find(b) > -1:
                        keyw = "bye"
                        break
                if text in bye_words:
                    pixels.off()
                    break
                time.sleep(0.2)
                pixels.off()
Пример #26
0
 def on_detected(keyword):
     position = doa.get_direction()
     pixels.wakeup(position)
     print('detected {} at direction {}'.format(keyword, position))
Пример #27
0
 def on_detected(keyword):
     direction = doa.get_direction()
     logging.info('detected {} at direction {}'.format(keyword, direction))
     pixels.wakeup(direction)
Пример #28
0
def pixels_wakeup(client, userdata, msg):
#  print("pixels_wakeup")
  pixels.wakeup()
  time.sleep(3)
 def on_detected(keyword):
     direction = doa.get_direction()
     print('detected {} at direction {}'.format(keyword, direction))
     alexa.listen()
     pixels.wakeup(direction)
Пример #30
0
c = 343
fs = 16000
nfft = 512


#Possible dos algorithms: SRP, MUSIC, TOPS, CSSM, WAVES
doa = pra.doa.algorithms['SRP'](R, fs, nfft, c=c)



plt.figure()
with MicArray(fs, 4, fs/4) as mic:
    start = time.time()
    for chunk in mic.read_chunks():
        #print(chunk.shape)
        #pixels.wakeup(np.random.randint(0, 360, 1))

        X = np.array([pra.stft(chunk[i::4], nfft, nfft//2, transform=np.fft.rfft).T for i in range(4)])
        doa.locate_sources(X, freq_range=[500, 3000])
        direction = doa.azimuth_recon / np.pi * 180
        print('Time: ', time.time()-start, ' Recovered azimuth: ', direction)
        pixels.wakeup(direction)
        #plt.close()
        #doa.polar_plt_dirac()
        #plt.draw()
        #plt.pause(0.0001)

        if is_quit.is_set():
            break

Пример #31
0
def main(detect="", photo_file="", trans_lang=""):
    pixels.wakeup()
    if photo_file == "":
        photo_file = camera()
    pixels.off()

    credentials = GoogleCredentials.get_application_default()
    service = discovery.build('vision',
                              'v1',
                              credentials=credentials,
                              discoveryServiceUrl=DISCOVERY_URL)

    with open(photo_file, 'rb') as image:
        image_content = base64.b64encode(image.read())
        if detect == "":  #No parameter
            DETECT = default_detect
        else:  #Paremater specified
            DETECT = [detect.upper()]

        result = ""
        bounds = []
        tlocale = ""
        for DET in DETECT:
            pixels.listen()
            service_request = service.images().annotate(
                body={
                    'requests': [{
                        'image': {
                            'content': image_content.decode('UTF-8')
                        },
                        'features': [{
                            'type': DET + '_DETECTION',
                            'maxResults': default_max
                        }]
                    }]
                })
            response = service_request.execute()
            annotation = DET.lower() + 'Annotations'
            try:
                results = response['responses'][0][annotation]
                for res in results:
                    if DET in ["LABEL", "LOGO"]:
                        if res["score"] > 0.7:
                            result += res["description"] + ", "

                    elif DET in ["TEXT"]:
                        tlocale = res["locale"]
                        result += res["description"] + ", "
                        bounds += res["boundingPoly"]["vertices"]

                    elif DET in ["FACE"]:
                        if res["joyLikelihood"] == "VERY_LIKELY" or res[
                                "joyLikelihood"] == "LIKELY":
                            result += "Smile "
                        if res["angerLikelihood"] == "VERY_LIKELY" or res[
                                "angerLikelihood"] == "LIKELY":
                            result += "Angry "
                        if res["headwearLikelihood"] == "VERY_LIKELY" or res[
                                "headwearLikelihood"] == "LIKELY":
                            rsult += "Capped "

                    result += DET + ", "
            except:
                result += "No " + DET + ", "
            pixels.off()

        print('Result: ' + result)
        pixels.listen()
        if trans_lang:
            trans_text = translate_text(result, trans_lang)
            trans_text = trans_text.replace("&#39;", "")
            print('Trans: ' + trans_text)
            if trans_lang in aiy_lang:
                aiy.audio.say(trans_text, trans_lang)
            elif trans_lang == "ja-JP":
                os.system(aquest_dir +
                          ' -g {} {} | aplay -D plughw:{},{}'.format(
                              VOLUME, trans_text, CARD, DEVICE))
            else:
                aiy.audio.say('Nothing to trans!', 'en-US')

        else:  #trans_lang = null then default en-US
            aiy.audio.say(result, 'en-US')
        pixels.off()