Ejemplo n.º 1
0
def main(detect="", photo_file="", trans_lang=""):
    pixels.wakeup()
    if photo_file == "":
    pixels.off()

    credentials = GoogleCredentials.get_application_default()
    service = discovery.build('vision', 'v1', credentials=credentials,
            discoveryServiceUrl=DISCOVERY_URL)

    with open(photo_file, 'rb') as image:
        image_content = base64.b64encode(image.read())
        if detect == "": #No parameter
          DETECT = default_detect
        else: #Paremater specified
          DETECT = [detect.upper()]              elif DET in ["TEXT"]:
                tlocale = res["locale"]
                result += res["description"]+", "
                bounds += res["boundingPoly"]["vertices"]

              elif DET in ["FACE"]:
                if res["joyLikelihood"] == "VERY_LIKELY" or res["joyLikelihood"] == "LIKELY":
                  result += "Smile "
                if res["angerLikelihood"] == "VERY_LIKELY" or res["angerLikelihood"] == "LIKELY":
                  result += "Angry "
                if res["headwearLikelihood"] == "VERY_LIKELY" or res["headwearLikelihood"] == "LIKELY":
                  rsult += "Capped "
              pixels.off()
          except:
Ejemplo n.º 2
0
    def listen(self, duration=9, timeout=3):
        pixels.off()
        vad.reset()
        self.listen_countdown[0] = (duration * self.sample_rate +
                                    self.frames_per_buffer -
                                    1) / self.frames_per_buffer
        self.listen_countdown[1] = (timeout * self.sample_rate +
                                    self.frames_per_buffer -
                                    1) / self.frames_per_buffer
        self.end_silence = self.listen_countdown[1]
        self.num_speech = self.listen_countdown[0]
        self.listen_queue.queue.clear()
        self.status |= self.listening_mask
        self.start()

        logger.info('Start listening')

        def _listen():
            try:
                pixels.speak()
                data = self.listen_queue.get(timeout=timeout)
                while data and not self.quit_event.is_set():
                    yield data
                    data = self.listen_queue.get(timeout=timeout)
            except Queue.Empty:
                pass
                pixels.off()
            self.stop()

        self.stop()
        return _listen()
Ejemplo n.º 3
0
def process_event(event, device_id):
    """Pretty prints events.

    Prints all events that occur with two spaces between each new
    conversation and a single space between turns of a conversation.

    Args:
        event(event.Event): The current event to process.
        device_id(str): The device ID of the new instance.
    """
    if event.type == EventType.ON_CONVERSATION_TURN_STARTED:
        print()
        pixels.wakeup()

    print(event)

    if event.type == EventType.ON_END_OF_UTTERANCE:
        pixels.think()

    if event.type == EventType.ON_RESPONDING_STARTED:
        pixels.speak()

    if event.type == EventType.ON_CONVERSATION_TURN_FINISHED:
        if event.args and event.args['with_follow_on_turn']:
            pixels.listen()
        else:
            pixels.off()
            print()

    if event.type == EventType.ON_DEVICE_ACTION:
        for command, params in process_device_actions(event, device_id):
            print('Do command', command, 'with params', str(params))
Ejemplo n.º 4
0
def process_event(event):
    """Pretty prints events.

    Prints all events that occur with two spaces between each new
    conversation and a single space between turns of a conversation.

    Args:
        event(event.Event): The current event to process.
    """
    if event.type == EventType.ON_CONVERSATION_TURN_STARTED:
        print()
        pixels.wakeup()

    print(event)

    if event.type == EventType.ON_END_OF_UTTERANCE:
        pixels.think()

    if event.type == EventType.ON_RESPONDING_STARTED:
        pixels.speak()

    if event.type == EventType.ON_CONVERSATION_TURN_FINISHED:
        pixels.off()
        if event.args and event.args['with_follow_on_turn']:
            pixels.listen()
Ejemplo n.º 5
0
def on_message(client, userdata, msg):
    #print("Message received on topic {0}: {1}".format(msg.topic, msg.payload))
    if "hotword" in msg.topic:
        print("==> hotword message {0}: {1}".format(msg.topic, msg.payload))
        if "detected" in msg.topic:
            pixels.wakeup()
        if msg.topic == "hermes/hotword/toggleOn":
            pixels.off()
    if "asr" in msg.topic:
        print("==> asr message {0}: {1}".format(msg.topic, msg.payload))
        if "textCaptured" in msg.topic:
            pixels.think()
    if "nlu" in msg.topic:
        print("==> nlu message {0}: {1}".format(msg.topic, msg.payload))
    if "tts" in msg.topic:
        print("==> tts message {0}: {1}".format(msg.topic, msg.payload))
        if msg.topic == "hermes/tts/say":
            pixels.speak()
    elif "intent" in msg.topic:
        print("==> intent message {0}: {1}".format(msg.topic, msg.payload))
        #intent_topic = msg.topic.split("/") 
        payload = json.loads(msg.payload)
        if "intent" in payload:
            name = payload["intent"]["intentName"]
            slots = payload["slots"]
            print("====> intent {0} detected with slots {1}".format(name, slots))   
Ejemplo n.º 6
0
    def record():
        stream.start_stream()
        pixels.wakeup()
        print("* recording")
        frames = []
        for i in range(0, int(RESPEAKER_RATE / CHUNK * RECORD_SECONDS)):
            data = stream.read(CHUNK)
            frames.append(data)
        print("* done recording")
        stream.stop_stream()
        print("start to send to baidu")
	pixels.off()
        # audio_data should be raw_data
        text = baidu.server_api(generator_list(frames))
        if text:
            try:
                text = json.loads(text)
                for t in text['result']:
                    print(t)
                    return(t)
            except KeyError: 
                return("get nothing")
        else:
            print("get nothing")
            return("get nothing")
Ejemplo n.º 7
0
def timer_end():
    pixels.off()
    time.sleep(1)
    pixels.on()
    #pixels.think()
    print("Timer end")
    global timer_alive
    timer_alive = 0
Ejemplo n.º 8
0
 def _listen():
     try:
         pixels.speak()
         data = self.listen_queue.get(timeout=timeout)
         while data and not self.quit_event.is_set():
             yield data
             data = self.listen_queue.get(timeout=timeout)
     except Queue.Empty:
         pass
         pixels.off()
     self.stop()
Ejemplo n.º 9
0
def playtts(msg):
    pixels.think()
    print('Sending to server ....')
    r = requests.post(url2, json=msg,  headers=headers2)
    pixels.off()
    links = json.loads(r.text)['message']
    if links:
        for link in links:
            if len(link)>0:
                link = host + link
                os.system('mplayer -volume 100 ' + link)
Ejemplo n.º 10
0
    def detect(self, keywords=None):
        self.decoder.end_utt()
        self.decoder.start_utt()
        pixels.off()

        self.detect_history.clear()

        self.detect_queue.queue.clear()
        self.status |= self.detecting_mask
        self.stream.start_stream()
        result = None
        logger.info('Start detecting')
        got = 0
        while not self.quit_event.is_set():
            size = self.detect_queue.qsize()
            if size > 4:
                logger.info('Too many delays, {} in queue'.format(size))

            data = self.detect_queue.get()
            self.detect_history.append(data)
            self.decoder.process_raw(data, False, False)

            hypothesis = self.decoder.hyp()
            if hypothesis:
                logger.info('Detected {}'.format(hypothesis.hypstr))
                if collecting_audio != 'no':
                    logger.debug(collecting_audio)
                    save_as_wav(b''.join(self.detect_history),
                                hypothesis.hypstr)
                self.detect_history.clear()
                if keywords:
                    for keyword in keywords:
                        if hypothesis.hypstr.find(keyword) >= 0:
                            result = hypothesis.hypstr
                            print(result)
                            pixels.wakeup()
                            got = 1
                            break
                    if got == 1:
                        break
                    else:
                        self.decoder.end_utt()
                        self.decoder.start_utt()
                        self.detect_history.clear()
                        pixels.off()
                else:
                    result = hypothesis.hypstr
                    print(result)
                    break

        self.status &= ~self.detecting_mask
        self.stop()

        return result
Ejemplo n.º 11
0
def on_message(client, userdata, msg):
    if msg.topic == "hermes/asr/startListening":
        pixels.think()

    if msg.topic == "hermes/audioServer/default/playFinished":
        pixels.off()

    if msg.topic == "hermes/asr/stopListening":
        pixels.off()

    if msg.topic == "hermes/tts/say":
        pixels.speak()
Ejemplo n.º 12
0
def on_BLE():

    global timer_alive
    if (timer_alive == 1):
        return
    pixels.off()
    time.sleep(1)
    pixels.on()
    #pixels.speak()
    print("Timer start")
    timer = threading.Timer(30.0, timer_end)
    timer.start()
    timer_alive = 1
Ejemplo n.º 13
0
def process_event(event, device_id):
    """Pretty prints events.

    Prints all events that occur with two spaces between each new
    conversation and a single space between turns of a conversation.

    Args:
        event(event.Event): The current event to process.
        device_id(str): The device ID of the new instance.
    """
    if event.type == EventType.ON_CONVERSATION_TURN_STARTED:
        print()
        pixels.wakeup()

    print(event)

    if event.type == EventType.ON_END_OF_UTTERANCE:
        pixels.think()

    if event.type == EventType.ON_RESPONDING_STARTED:
        pixels.speak()

    if event.type == EventType.ON_CONVERSATION_TURN_FINISHED:
        if event.args and event.args['with_follow_on_turn']:
            pixels.listen()
        else:
            pixels.off()
            print()

    if event.type == EventType.ON_DEVICE_ACTION:
        for command, params in process_device_actions(event, device_id):
            print('Do command', command, 'with params', str(params))
            if command == "com.example.commands.Shutters":
                action = ''
                shutters_url = 'http://10.0.0.31:8181/json.htm'
                shutters_header = {'Authorization': 'Basic *******************'}
                if params['status'] == "CLOSE":
                    print('Closing shutters')
                    action = 'On'
                    shutters_params = {'type': 'command', 'param': 'switchlight', 'idx': '13', 'switchcmd': action }
                    r = requests.get(shutters_url, params=shutters_params, headers=shutters_header)
                    print(r.url)
                    print(r.status_code)
                if params['status'] == "OPEN":
                    print('Opening shutters')
                    action = 'Off'
                    shutters_params = {'type': 'command', 'param': 'switchlight', 'idx': '13', 'switchcmd': action }
                    r = requests.get(shutters_url, params=shutters_params, headers=shutters_header)
                    print(r.url)
                    print(r.status_code)
                    print(r.headers)
Ejemplo n.º 14
0
def play_ack(num):
    global proc

    if proc != None:
        proc.terminate()
    cmd = ['cvlc', '-q', '--play-and-exit', SOUND_ACK[num]]
    proc = subprocess.Popen(cmd,
                            stdout=subprocess.PIPE,
                            stderr=subprocess.STDOUT)

    if num == 0:
        pixels.off()
    if num == 1:
        pixels.think()
Ejemplo n.º 15
0
def ActivateLeds(LedOn):

    if LedOn == True:
        pixels.wakeup()
        time.sleep(3)
        pixels.think()
        time.sleep(3)
        pixels.speak()
        time.sleep(6)
        pixels.off()
        time.sleep(3)

    else:
        LedOn = False
        pixels.off()
        time.sleep(1)
def main():
    src = Source(rate=16000, frames_size=160, channels=2)
    ch0 = ChannelPicker(channels=src.channels, pick=0)
    ns = NS(rate=src.rate, channels=1)
    kws = KWS()
    doa = DOA(rate=16000, chunks=50)
    alexa = Alexa()

    alexa.state_listener.on_listening = pixels.listen
    alexa.state_listener.on_thinking = pixels.think
    alexa.state_listener.on_speaking = pixels.speak
    alexa.state_listener.on_finished = pixels.off


    # data flow between elements
    # ---------------------------
    # src -> ns -> kws -> alexa
    #    \
    #    doa
    src.pipeline(ch0, ns, kws, alexa)

    src.link(doa)

    def on_detected(keyword):
        direction = doa.get_direction()
        print('detected {} at direction {}'.format(keyword, direction))
        alexa.listen()
        pixels.wakeup(direction)

    kws.on_detected = on_detected

    is_quit = []
    def signal_handler(sig, frame):
        is_quit.append(True)
        print('quit')
    signal.signal(signal.SIGINT, signal_handler)

    src.pipeline_start()
    while not is_quit:
        time.sleep(1)

    src.pipeline_stop()
    pixels.off()

    # wait a second to allow other threads to exit
    time.sleep(1)
Ejemplo n.º 17
0
def action_wrapper(hermes, intentMessage, conf):
    """ Write the body of the function that will be executed once the intent is recognized. 
    In your scope, you have the following objects : 
    - intentMessage : an object that represents the recognized intent
    - hermes : an object with methods to communicate with the MQTT bus following the hermes protocol. 
    - conf : a dictionary that holds the skills parameters you defined. 
      To access global parameters use conf['global']['parameterName']. For end-user parameters use conf['secret']['parameterName'] 
     
    Refer to the documentation for further details. 
    """ 
    
    result_sentence = ''
    GPIO.output(12, GPIO.LOW)
    #GPIO.output(47, GPIO.LOW)
    GPIO.output(13, GPIO.HIGH)
    
    pixels.speak()
    time.sleep(3)
    pixels.off()
    
    hermes.publish_end_session(intentMessage.session_id, result_sentence)
Ejemplo n.º 18
0
def task(quit_event):
    mic = Microphone(quit_event=quit_event)
    while not quit_event.is_set():
        pixels.off()
        print("Waiting for wakeup word!")
        if mic.wakeup(['sen ơi','senoi','maioi','mai ơi']):                        
            print('waked up')
            f_wav=random.randint(1,8)
            mic.stop()
            os.system('aplay /dev/shm/waves/' + str(f_wav)+'.wav')           
            print("Speaking something ...")
            data=mic.listen(duration=6, timeout=1.5)
            mic.stop()
            out=json.loads(send_raw(data))
            trans=out['hypotheses'][0]['utterance'].encode('utf-8')
            print('Recognized output: ' + trans)
            if len(trans)>0:
                try:
                    jmsg=json.loads(msg)
                    jmsg['texts']=trans
                    playtts(jmsg)
                except:
                    print("TTS has some problem")
Ejemplo n.º 19
0
def main():
    recognizer = aiy.cloudspeech.get_recognizer()
    recognizer.expect_phrase('turn off the light')
    recognizer.expect_phrase('turn on the light')
    recognizer.expect_phrase('blink')
    recognizer.expect_phrase('repeat after me')

    button = aiy.voicehat.get_button()
    led = aiy.voicehat.get_led()
    aiy.audio.get_recorder().start()

    aiy.i18n.set_language_code(speech_lang)

    for i in range(3):
        pixels.wakeup()
        time.sleep(1)
        pixels.off()

    while True:
        print('Press the button and speak')
        pixels.wakeup()
        button.wait_for_press()

        while True:
            print('Listening...')

            bye_words = ['goodbye', 'good bye', 'see you', 'bye bye']
            pixels.think()
            text = recognizer.recognize()
            if not text:
                print('Sorry but please say again in ' + speech_lang)
            else:
                pixels.listen()
                print('Speech: ' + text)
                trans_text = translate_text(text, trans_lang)
                trans_text = trans_text.replace("'", "")
                print('Trans: ' + trans_text)
                pixels.off()
                pixels.listen()
                if trans_lang in aiy_lang:
                    aiy.audio.say(trans_text, trans_lang)
                elif trans_lang == "ja-JP":
                    os.system(
                        '~/AIY-projects-python/src/aquestalkpi/AquesTalkPi -g {} {} | aplay -D plughw:{},{}'
                        .format(VOLUME, trans_text, CARD, DEVICE))
                else:
                    print("No lang to say")

                if 'turn on the light' in text:
                    led.set_state(aiy.voicehat.LED.ON)
                elif 'turn off the light' in text:
                    led.set_state(aiy.voicehat.LED.OFF)
                elif 'blink' in text:
                    led.set_state(aiy.voicehat.LED.BLINK)
                elif 'repeat after me' in text:
                    to_repeat = text.replace('repeat after me', '', 1)
                    aiy.audio.say(to_repeat)

                for b in bye_words:
                    if text.find(b) > -1:
                        keyw = "bye"
                        break
                if text in bye_words:
                    pixels.off()
                    break
                time.sleep(0.2)
                pixels.off()
Ejemplo n.º 20
0
def process_event(event, doa):
    """Pretty prints events.
    Prints all events that occur with two spaces between each new
    conversation and a single space between turns of a conversation.
    Args:
        event(event.Event): The current event to process.
    """
    global cam_flag

    set_angle(90)

    if event.type == EventType.ON_NO_RESPONSE:
        cam_flag = True

    if event.type == EventType.ON_CONVERSATION_TURN_STARTED:
        print()
        GPIO.output(led_gnd_pin, True)
        if not doa is None:
            direction = doa.get_direction()
            print('detected voice at direction {}'.format(direction))
            pixels.wakeup(direction)
            if direction > 270 or direction < 90:
                set_angle(20)
            else:
                set_angle(175)

        if cam_flag:
            tstamp = str(int(time.time()))
            imgfile = "one-shot-" + tstamp + ".jpg"
            detfile = "det-shot-" + tstamp + ".jpg"
            camera.capture(imgfile)

            img = cv2.imread(imgfile)
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            faces = faceCascade.detectMultiScale(gray,
                                                 scaleFactor=1.2,
                                                 minNeighbors=5,
                                                 minSize=(20, 20))

            print('detected ' + str(len(faces)) + ' faces')

            for (x, y, w, h) in faces:
                cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
                roi_gray = gray[y:y + h, x:x + w]
                roi_color = img[y:y + h, x:x + w]

                eyes = eyeCascade.detectMultiScale(
                    roi_gray,
                    scaleFactor=1.5,
                    minNeighbors=5,
                    minSize=(5, 5),
                )

                for (ex, ey, ew, eh) in eyes:
                    cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh),
                                  (0, 255, 0), 2)

                smile = smileCascade.detectMultiScale(
                    roi_gray,
                    scaleFactor=1.5,
                    minNeighbors=15,
                    minSize=(25, 25),
                )

                for (xx, yy, ww, hh) in smile:
                    cv2.rectangle(roi_color, (xx, yy), (xx + ww, yy + hh),
                                  (0, 255, 0), 2)

            cv2.imwrite(detfile, img)


#            networking.upload_images("192.168.1.69", 8888, ["one-shot.jpg"])
#            cam_flag = False

    print(event)

    if ((event.type == EventType.ON_CONVERSATION_TURN_FINISHED and event.args
         and not event.args['with_follow_on_turn'])
            or (event.type == EventType.ON_CONVERSATION_TURN_TIMEOUT)
            or (event.type == EventType.ON_NO_RESPONSE)):
        print()
        GPIO.output(led_gnd_pin, False)
        pixels.off()
Ejemplo n.º 21
0
def handler_stop_signals(_signo, _stack_frame):
    # Raises SystemExit(0):
    pixels.off()
    time.sleep(3)
    sys.exit(0)
Ejemplo n.º 22
0
def pixels_off(client, userdata, msg):
    pixels.off()
    time.sleep(3)
Ejemplo n.º 23
0
def onMessage(client, userData, message):
    global lang

    intent = message.topic
    payload = json.loads(message.payload)

    if intent == HERMES_ON_HOTWORD:
        last_hotword = utils.read_file("hotword.txt")
        current_hotword = payload['modelId'].encode('utf-8')
        if last_hotword != current_hotword:
            utils.write_to_file("hotword.txt", current_hotword)

        if settings.USE_LEDS:
            pixels.wakeup()
        return

    elif intent == HERMES_SAY:
        if settings.USE_LEDS:
            pixels.speak()
        return

    elif intent == HERMES_CAPTURED:
        if settings.USE_LEDS:
            pixels.think()
        return

    elif intent == HERMES_START_LISTENING:
        if settings.USE_LEDS:
            pixels.listen()
        return

    elif intent == HERMES_HOTWORD_TOGGLE_ON:
        if settings.USE_LEDS:
            pixels.off()
        return

    global recipe, currentStep, timers, confirm, sessionId, product, tipIndex, fromIntent

    sessionId = payload['sessionId']

    ##### TODO stabiliser avant réactivation

    if intent == OPEN_RECIPE:
        print("INTENT : OPEN_RECIPE")
        if 'slots' not in payload:
            error(sessionId)
            return

        slotRecipeName = payload['slots'][0]['value']['value'].encode('utf-8')

        if recipe is not None and currentStep > 0:
            if confirm <= 0:
                confirm = 1
                endTalk(sessionId, text=lang['warningRecipeAlreadyOpen'])
                return
            else:
                for timer in timers:
                    timer.cancel()

                timers = {}
                confirm = 0
                currentStep = 0

        if any(product.lower() in ingredients
               for ingredients in tips_list_from_paprika):
            recipe_nb = len(tips_list_from_paprika[product.lower()])
            if recipe_nb == 1:
                for recipe in tips_list_from_paprika[product.lower()]:
                    continueSession(sessionId,
                                    "j'ai trouvé une astuce: " + recipe +
                                    ". Tu veux faire ça ?",
                                    intents=['Pierrot-app:validateQuestion'])
            elif recipe_nb == 2:
                askForTwoTips(getTipList)
        else:
            endTalk(sessionId, text=lang['noTipsForProduct'])
        fromIntent = "OPEN_RECIPE"

    elif intent == NEXT_STEP:
        print("INTENT : NEXT_STEP")
        if recipe is None:
            endTalk(sessionId, text=lang['sorryNoRecipeOpen'])
        else:
            if str(currentStep + 1) not in recipe['steps']:
                endTalk(sessionId, text=lang['recipeEnd'])
            else:
                currentStep += 1
                step = recipe['steps'][str(currentStep)]

                ask = False
                if type(step) is dict and currentStep not in timers:
                    ask = True
                    step = step['text']

                endTalk(sessionId, text=lang['nextStep'].format(step))
                if ask:
                    say(text=lang['timerAsk'])
        fromIntent = "NEXT_STEP"

    elif intent == INGREDIENTS:
        print("INTENT : INGREDIENTS")
        if recipe is None:
            endTalk(sessionId, text=lang['sorryNoRecipeOpen'])
        else:
            ingredients = ''
            for ingredient in recipe['ingredients']:
                ingredients += u"{}. ".format(ingredient)

            endTalk(sessionId,
                    text=lang['neededIngredients'].format(ingredients))
        fromIntent = "INGREDIENTS"

    elif intent == PREVIOUS_STEP:
        print("INTENT : PREVIOUS_STEP")
        if recipe is None:
            endTalk(sessionId, text=lang['sorryNoRecipeOpen'])
        else:
            if currentStep <= 1:
                endTalk(sessionId, text=lang['noPreviousStep'])
            else:
                currentStep -= 1
                step = recipe['steps'][str(currentStep)]

                ask = False
                timer = 0
                if type(step) is dict and currentStep not in timers:
                    ask = True
                    timer = step['timer']
                    step = step['text']

                endTalk(sessionId, text=lang['previousStepWas'].format(step))
                if ask:
                    say(text=lang['hadTimerAsk'].format(timer))
        fromIntent = "PREVIOUS_STEP"

    elif intent == REPEAT_STEP:
        print("INTENT : REPEAT_STEP")
        if recipe is None:
            endTalk(sessionId, text=lang['sorryNoRecipeOpen'])
        else:
            if currentStep < 1:
                ingredients = ''
                for ingredient in recipe['ingredients']:
                    ingredients += u"{}. ".format(ingredient)

                endTalk(sessionId,
                        text=lang['neededIngredients'].format(ingredients))
            else:
                step = recipe['steps'][str(currentStep)]
                endTalk(sessionId, text=lang['repeatStep'].format(step))
        fromIntent = "REPEAT_STEP"

    elif intent == ACTIVATE_TIMER:
        print("INTENT : ACTIVATE_TIMER")
        if recipe is None:
            endTalk(sessionId, text=lang['noTimerNotStarted'])
        else:
            step = recipe['steps'][str(currentStep)]

            if type(step) is not dict:
                endTalk(sessionId, text=lang['notTimerForThisStep'])
            elif currentStep in timers:
                endTalk(sessionId, text=lang['timerAlreadyRunning'])
            else:
                timer = Timer(int(step['timer']),
                              onTimeUp,
                              args=[currentStep, step])
                timer.start()
                timers[currentStep] = timer
                endTalk(sessionId, text=lang['timerConfirm'])
        fromIntent = "ACTIVATE_TIMER"

    elif intent == GET_FOOD:
        print("INTENT : GET_FOOD")
        sayNoSession(lang['searching'])
        asTalk = False
        tipIndex = 1
        product = payload["slots"][0]["rawValue"]
        if lastIntent == "ASK_FOR_TIP" or getAssistant() == "marin":
            currentStep = 0
            readTipsProposition()
        else:
            continueSession(sessionId=sessionId,
                            text=lang['cookNowOrKeep'].format(product),
                            intents=['Pierrot-app:nowOrLater'])
        fromIntent = "GET_FOOD"

    elif intent == ASK_FOR_TIP:
        print("INTENT : ASK_FOR_TIP")
        if product in getTipList():
            currentStep = 0
            tipIndex = 1
            continueSession(sessionId=sessionId,
                            text=lang['tipFor'].format(product),
                            intents=[
                                'Pierrot-app:validateQuestion',
                                'Pierrot-app:invalidateQuestion'
                            ])
        else:
            continueSession(sessionId=sessionId,
                            text=lang['tipForWhat'],
                            intents=['Pierrot-app:getFoodRequest'])
        fromIntent = "ASK_FOR_TIP"

    # elif intent == GET_FOOD_COOK_NOW:
    # 	product = payload['slots'][0]['value']['value'].encode('utf-8')
    # 	if any(product.lower() in ingredients for ingredients in recipe_ingredients):
    # 		# endTalk(sessionId=sessionId, text=lang['startRecipe'].format(food), intents=['openRecipe'])
    # 		readRecipe(sessionId, product, payload)
    # 	else:
    # 		endTalk(sessionId, text=lang['recipeNotFound'])

    elif intent == COOK_NOW_OR_KEEP:
        print("INTENT : COOK_NOW_OR_KEEP")
        # if recipe is None:
        # 	endTalk(sessionId, text=lang['sorryNoRecipeOpen'])
        # else:
        readTipsProposition()
        fromIntent = "COOK_NOW_OR_KEEP"

    elif intent == VALIDATE_QUESTION:
        print("INTENT : VALIDATE_QUESTION")
        if recipe is None:
            endTalk(sessionId, text=lang['sorryNoRecipeOpen'])
        elif fromIntent == "ASK_FOR_TIP":
            readTipsProposition()
        else:
            if currentStep != 0:
                currentStep += 1
                step = recipe['steps'][str(currentStep)]

                ask = False
                if type(step) is dict and currentStep not in timers:
                    ask = True
                    step = step['text']

                endTalk(sessionId, text=lang['nextStep'].format(step))
            else:
                ingredients = ''
                for ingredient in recipe['ingredients']:
                    ingredients += u"{}, ".format(ingredient)

                endTalk(sessionId,
                        text=lang['neededIngredients'].format(ingredients))
        fromIntent = "VALIDATE_QUESTION"

    elif intent == INVALIDATE_QUESTION:
        print("INTENT : INVALIDATE_QUESTION")
        if recipe is None:
            endTalk(sessionId, text=lang['sorryNoRecipeOpen'])
        elif fromIntent == "GET_FOOD" or fromIntent == "INVALIDATE_QUESTION" or fromIntent == "COOK_NOW_OR_KEEP" or fromIntent == "VALIDATE_QUESTION":
            readTipsProposition()
        elif lastIntent == "ASK_FOR_TIP":
            continueSession(sessionId=sessionId,
                            text=lang['tipForWhat'],
                            intents=['Pierrot-app:getFoodRequest'])
        fromIntent = "INVALIDATE_QUESTION"

    elif intent == START_RECIPE:
        print("INTENT : START_RECIPE")
        if recipe is None:
            endTalk(sessionId, text=lang['sorryNoRecipeOpen'])
        else:
            currentStep += 1
            step = recipe['steps'][str(currentStep)]

            ask = False
            if type(step) is dict and currentStep not in timers:
                ask = True
                step = step['text']

            endTalk(sessionId, text=lang['firstStep'].format(step))
            if ask:
                say(text=lang['timerAsk'])
        fromIntent = "START_RECIPE"

    elif intent == CANCEL:
        if settings.USE_LEDS:
            pixels.off()
        error(sessionId)
        mqttClient.loop_stop()
        mqttClient.disconnect()
        running = False

    elif intent == JOKE:
        sayNoSession(
            "Je ne crois pas qu'il y ai de bons ou de mauvais assistant. ")
Ejemplo n.º 24
0
import time
from pixels import Pixels, pixels
from alexa_led_pattern import AlexaLedPattern
from google_home_led_pattern import GoogleHomeLedPattern

if __name__ == '__main__':

    pixels.pattern = GoogleHomeLedPattern(show=pixels.show)

    while True:

        try:
            pixels.wakeup()
            time.sleep(3)
            pixels.think()
            time.sleep(3)
            pixels.speak()
            time.sleep(6)
            pixels.off()
            time.sleep(3)
        except KeyboardInterrupt:
            break


    pixels.off()
    time.sleep(1)
Ejemplo n.º 25
0
    pixels.pattern = GoogleHomeLedPattern(show=pixels.show)

    while True:

        try:
            data =[255,0,0] * 3
            pixels.show(data)
            time.sleep(2)
            data =[0,255,0] * 3
            pixels.show(data)
            time.sleep(2)
            data =[0,0,255] * 3
            pixels.show(data)
            time.sleep(2)
            data =[255,0,255] * 3
            pixels.show(data)
            time.sleep(2)
            
            data =[0,0,0] * 3
            pixels.show(data)
            time.sleep(2)
            pixels.show(data)
            
            
        except KeyboardInterrupt:
            break


    pixels.off()
    time.sleep(1)
Ejemplo n.º 26
0
def hamthucthi(row0_in_db, data, friendly_name_hass, sta):

    global player
    print('[MAIN] - THỰC THI TÁC VỤ')
    print('---------')
    processss.timlenhtrongdata(data)

    if row0_in_db == "KHỎE KHÔNG":
        if seed == 1:
            pixels.speak()
        answer(
            "em khỏe. ",
            "khỏe lắm anh ",
            "khỏe chứ anh. ",
        )
        if seed == 1:
            pixels.off()

#camera - dieu khien xoay
    elif row0_in_db == "CAMERA":
        print('vao camera')
        a = ptz.control('http://192.168.9.121', 'admin', 'Emilybro2013')
        datapreset = data
        datapreset = datapreset.split()
        iiii = 0
        while iiii < len(datapreset):
            if datapreset[iiii].isnumeric() == True:
                preset_number = datapreset[iiii]
                break
            else:
                iiii += 1
        try:
            a.set_preset(preset_number)
        except:
            pass
        if 'LÊN' in data.upper():
            print('vao len')
            a.len()
            time.sleep(2)
            return
        if 'XUỐNG' in data.upper():
            a.xuong()
            time.sleep(2)
            return
        if 'TRÁI' in data.upper():
            a.trai()
            time.sleep(2)
            return
        if 'PHẢI' in data.upper():
            a.phai()
            time.sleep(2)
            return
# Âm lịch
    elif row0_in_db == "ÂM LỊCH":
        check_day = []
        if seed == 1:
            pixels.speak()
        if 'MAI' in data:
            check_day = amlich.ngaymai()
        elif 'MỐT' in data:
            check_day = amlich.ngaymot()
        elif 'QUA' in data:
            check_day = amlich.homqua()
        elif 'NAY' in data:
            check_day = amlich.homnay()
        else:
            check_day = amlich.ngaykhac(data)
        amlich.kiemtra_amlich(check_day[0], check_day[1], check_day[2],
                              check_day[3], check_day[4])
        if seed == 1:
            pixels.off()

#Hỏi thứ
    elif row0_in_db == "THỨ MẤY":
        check_thu = []
        if seed == 1:
            pixels.speak()
        if 'MAI' in data:
            check_thu = thu.ngaymai()
        elif 'MỐT' in data:
            check_thu = thu.ngaymot()
        elif 'QUA' in data:
            check_thu = thu.homqua()
        elif 'NAY' in data:
            check_thu = thu.homnay()
        else:
            check_thu = thu.ngaykhac(data)

        result_thu = thu.kiemtra_thu(check_thu[0], check_thu[1], check_thu[2],
                                     check_thu[3], check_thu[4])
        speaking.speak(result_thu[0] + " là " + result_thu[1] + ' ' +
                       result_thu[2] + ' tháng ' + str(result_thu[3]))
        if seed == 1:
            pixels.off()
# Ngày lễ
    elif row0_in_db == "NGÀY LỄ":
        if seed == 1:
            pixels.speak()
#		answer('không có chi. ','rất vui vì giúp được anh ',' đừng bận tâm ')
        ngayle_res = []
        ngayle_res = ngayle.ngayle_check(data)
        speaking.speak(ngayle_res[0] + ' Còn ' + str(ngayle_res[1]) +
                       ' ngày nữa là đến ' + ngayle_res[2] + '. Đó là ngày ' +
                       ngayle_res[3] + ' tháng ' + ngayle_res[4] + ' năm ' +
                       ngayle_res[5])
        if seed == 1:
            pixels.off()
#Cảm ơn
    elif row0_in_db == "CẢM ƠN":
        if seed == 1:
            pixels.speak()
        answer('không có chi. ', 'rất vui vì giúp được anh ', ' đừng bận tâm ')
        if seed == 1:
            pixels.off()
#Gass
    elif row0_in_db == "BAO NHIÊU TUỔI":
        import textinput
        textinput.main()
#HELP
    elif row0_in_db == "TRỢ GIÚP":
        if seed == 1:
            pixels.speak()
        if 'THỜI TIẾT' in data:
            speaking.speak(
                'Có thể hỏi các câu hỏi bao gồm các từ như, Thời tiết hôm nay, ngày mai'
            )
        elif 'GIỜ' in data:
            speaking.speak('Đặt câu hỏi mấy giờ rồi')
        elif 'NGÀY' in data:
            speaking.speak(
                'Hỏi thứ mấy, ngày nào. Có thể hỏi ngày mai là thứ mấy, ngày 30 tháng 4 là thứ mấy, vv.'
            )
        elif 'ÂM LỊCH' in data:
            speaking.speak('Hỏi âm lịch hôm nay, ngày mai, ngày bất kì')
        elif 'DỊCH' in data:
            speaking.speak(
                'Dịch từ tHoặc dịch cả câu với cấu trức: Dịch câu sau sang tiếng nào đó. Sau đó chờ âm báo rồi đọc câu. Ngôn ngữ hỗ trợ: Việt, Anh, Trung, Nhật, Hàn.'
            )
        elif 'THÔNG TIN' in data:
            speaking.speak(
                'Có thể hỏi thông tin bằng câu với kết thúc là gì hoặc là ai. Ví dụ, Hồ Chí Minh là ai.'
            )
        elif 'LỆNH' in data:
            speaking.speak(
                'Có thể bật, tắt, điều chỉnh nhiệt độ máy lạnh .... bằng các câu lệnh đơn giản như: bật đèn, tắt đèn, ....'
            )
        elif 'NHẠC' in data:
            speaking.speak(
                'Dùng lệnh Phát rồi gọi tên bài hát, playlist muốn nghe. Muốn qua bài thì dùng lệnh Tiếp theo. Dừng với lệnh Dừng nhạc'
            )
        elif 'HẸN GIỜ' in data:
            speaking.speak(
                'Dùng lệnh hẹn giờ cộng thời gian, hủy với lệnh hủy hẹn giờ')
        elif 'NGÀY LỄ' in data:
            speaking.speak(
                'Hỏi còn bao nhiêu ngày nữa là đến ngày lễ. Các ngày lễ có sẵn bao gồm, Tết Tây, Tết ta, 30 tháng 4, trung thu, giỗ tổ, quốc khánh. '
            )
        else:
            speaking.speak(
                'Các lệnh thường dùng, Hỏi giờ, thời tiết, thứ ngày tháng, thông tin, lệnh, phát nhạc, hẹn giờ, dịch từ, dịch câu, âm lịch, ngày lễ. Dùng lệnh trợ giúp kèm theo các lệnh muốn tra cứu để được hướng dẫn chi tiết hơn.'
            )
        if seed == 1:
            pixels.off()
#Hỏi giờ
    elif row0_in_db == "MẤY GIỜ":
        if seed == 1:
            pixels.speak()
        from time import ctime, strftime
        gio = strftime("%H")
        gio = list(gio)
        phut = strftime("%M")
        phut = list(phut)
        if gio[0] == '1':
            if gio[1] == '0':
                docgio = 'mười giờ '
            else:
                docgio = 'mười ' + gio[1] + ' giờ '
        elif gio[0] == '0':
            docgio = gio[1] + ' giờ '
        elif gio[0] == '2':
            if gio[1] == '0':
                docgio = 'hai mươi giờ '
            elif gio[1] == '1':
                docgio = 'hai mươi mốt giờ '
            else:
                docgio = 'hai mươi ' + gio[1] + ' giờ '

        if phut[0] == '0':
            docphut = phut[1] + ' phút '
        elif phut[0] == '1':
            if phut[1] == '0':
                docphut = ' mười phút '
            else:
                docphut = ' mười ' + phut[1] + ' phút '
        else:
            if phut[1] == '0':
                docphut = phut[0] + ' mươi phút '
            elif phut[1] == '1':
                docphut = phut[0] + ' mươi mốt phút '
            else:
                docphut = phut[0] + ' mươi ' + phut[1] + ' phút '

        speaking.speak("BÂY GIỜ LÀ " + docgio + docphut)
        if seed == 1:
            pixels.off()
#Tin tức (TTS)
    elif row0_in_db == "TIN TỨC":
        pixels.speak()
        tintuc.tintucmoi()

    elif row0_in_db == "XỔ SỐ":
        pixels.speak()
        print('Kết quả xổ số')
        loto.check(data)
#Truyện cười
    elif row0_in_db == "CƯỜI":
        pixels.speak()
        truyen = fun.truyen()
        speaking.speak(truyen)

#Hỏi ngày
# if row0_in_db=="VỊ TRÍ":
#	 if "CỦA" in data:
#		 locationcua = data.find('CỦA')
#		 data = data[locationcua+4:len(data)]
#		 location = data.strip(" ")
#		 speaking.speak("đây là vị trí của  " + location )
#		 webbrowser.open("https://www.google.nl/maps/place/" + location + "/&amp;")
# elif row0_in_db == 'NHẮC':

    elif row0_in_db == "RADIO":
        pixels.speak()
        try:
            player.stop()
        except:
            pass
        player = radio.phat_radio(data)

    elif row0_in_db == "ĐI NGỦ":
        pass
    elif row0_in_db == "LÀ GÌ":
        if seed == 1:
            pixels.speak()

        def wifi(data):
            data = data[0:len(data) - 6]
            rep = wk.find_info(data)
            rep = rep.find_wiki()
            print(rep)
            rep = '. '.join(rep)
            speaking.speak('Theo wikipedia: ' + rep)

        speaking.speak('để em tìm xem nào')
        execute.run_thread(wifi, data)
        if seed == 1:
            pixels.off()

# Phát video
    elif row0_in_db == "PHÁT":
        if seed == 1:
            pixels.speak()
        global spotipy
        try:
            player.stop()
        except:
            pass
        if sp_act == 1:
            spotipy = spot.play_current_playlist(s_user, s_id, s_secret,
                                                 'http://localhost:9999/',
                                                 data)
        else:
            player = radio.play_nhac(data, friendly_name_hass)
        if seed == 1:
            pixels.off()
    elif row0_in_db == "TIẾP THEO":
        try:
            player.stop()
        except:
            pass
        player = radio.phat_tiep_theo()
        if seed == 1:
            pixels.off()
# Google word translate
    elif row0_in_db == "CÓ NGHĨA":
        if seed == 1:
            pixels.speak()
        from googletrans import Translator
        translator = Translator()
        print(data)
        data = data.replace('TỪ ', '')
        data = data.replace('TRONG ', '')
        #		print ('Edit ' + data)
        # To Vietnamese
        if 'VIỆT' in data:
            m = re.search('(.+?) TIẾNG VIỆT', data)
            dataen = m.group(1)
            print(dataen)
            translations = translator.translate(dataen, dest='vi')
            print(translations.text)
            speaking.speak('Từ ')
            speaking.speaken(dataen)
            speaking.speak('trong tiếng việt nghĩa là: ' + translations.text)
#To English
        elif 'TIẾNG ANH' in data:
            m = re.search('(.+?) TIẾNG ANH', data)
            dataen = m.group(1)
            print(dataen)
            translations = translator.translate(dataen, dest='en')
            print(translations.text)
            speaking.speak('Từ ' + dataen + ' trong tiếng anh nghĩa là: ')
            speaking.speaken(translations.text)
# To Korean
        elif 'TIẾNG HÀN' in data:
            m = re.search('(.+?) TIẾNG HÀN', data)
            dataen = m.group(1)
            print(dataen)
            translations = translator.translate(dataen, dest='ko')
            print(translations.text)
            speaking.speak('Từ ' + dataen + ' trong tiếng Hàn nghĩa là: ')
            speaking.speakko(translations.text)
# To Japanese
        elif 'TIẾNG NHẬT' in data:
            m = re.search('(.+?) TIẾNG NHẬT', data)
            dataen = m.group(1)
            print(dataen)
            translations = translator.translate(dataen, dest='ja')
            print(translations.text)
            speaking.speak('Từ ' + dataen + ' trong tiếng Nhật nghĩa là: ')
            speaking.speakja(translations.text)
# To Chinese
        elif 'TIẾNG TRUNG' in data:
            m = re.search('(.+?) TIẾNG TRUNG', data)
            dataen = m.group(1)
            print(dataen)
            translations = translator.translate(dataen, dest='zh-cn')
            print(translations.text)
            speaking.speak('Từ ' + dataen + ' trong tiếng TRUNG nghĩa là: ')
            speaking.speakzh(translations.text)
# Google sentence translate
    elif row0_in_db == "DỊCH CÂU":
        from googletrans import Translator
        translator = Translator()
        continue_go = 1
        speaking.speak('OK, đọc câu cần dịch đi anh')
        more_data = processss.re_ask()
        print(more_data)
        #		def gconv (data,more_data):
        #			continue_go = 1
        #			empty = []
        if len(more_data) > 0:
            while True:
                print('Google translate: ' + data)
                print('Data translate: ' + more_data)
                #			   speaking.speak('')
                processss.mixer.music.load('resources/ding.wav')
                processss.mixer.music.play()
                if 'TIẾNG ANH' in data:
                    translations = translator.translate(more_data, dest='en')
                    print(translations.text)
                    speaking.speaken(translations.text)
                    continue_go = 1
                if 'TIẾNG VIỆT' in data:
                    translations = translator.translate(more_data, dest='vi')
                    print(translations.text)
                    speaking.speak(translations.text)
                    continue_go = 1
                if 'TIẾNG HÀN' in data:
                    translations = translator.translate(more_data, dest='ko')
                    print(translations.text)
                    speaking.speakko(translations.text)
                    continue_go = 1
                if 'TIẾNG TRUNG' in data:
                    translations = translator.translate(more_data,
                                                        dest='zh-cn')
                    print(translations.text)
                    speaking.speakzh(translations.text)
                    continue_go = 1
                if 'TIẾNG NHẬT' in data:
                    translations = translator.translate(more_data, dest='ja')
                    print(translations.text)
                    speaking.speakja(translations.text)
                    continue_go = 1
                if 'HỦY' in more_data:
                    speaking.speak('thoát khỏi chế độ dịch')
                    continue_go = 0
                    break

                else:
                    break

#		return more_data, continue_go
# Hẹn giờ
    elif row0_in_db == "HẸN GIỜ":
        onoff.hen_gio(data)
    elif row0_in_db == "HỦY HẸN GIỜ":
        onoff.t1.cancel()
        speaking.speak("đã hủy hẹn giờ ")
    elif row0_in_db == "DỪNG":
        try:
            player.stop()
        except Exception as e:
            print(e)
            pass
        at = spot.spo(s_user, s_id, s_secret, 'http://localhost:9999/')
        sp = at.assign()
        de = sp.devices()
        de = de['devices']

        for des in de:
            print(des['id'])
            try:
                at.pause(sp, des['id'])
            except Exception as t:
                print(t)
                pass
    elif row0_in_db == "TO LÊN":
        pixels.speak()
        radio.to_len()
    elif row0_in_db == "NHỎ XUỐNG":
        pixels.speak()
        radio.nho_xuong()
    elif row0_in_db == "ÂM LƯỢNG":
        pixels.speak()
        vol_extract = radio.amluong(data)
        speaking.speak("thiết lập âm lượng mức " + str(vol_extract))
    elif row0_in_db == "THIẾT LẬP":
        pixels.speak()
        onoff.thietlap(friendly_name_hass, sta, data)
    elif row0_in_db == "MỞ":
        pixels.speak()
        onoff.on_mo(friendly_name_hass, data)
    elif row0_in_db == "TẮT":
        onoff.off_tat(friendly_name_hass, data)
    elif row0_in_db == "TÊN":
        if "EM" in data:
            answer(
                'em là BOT LB',
                'em là LB ',
                'em tên LB',
            )

    elif row0_in_db == "":
        speaking.speak('em không hiểu rồi đại ca ơi')
# Trạng thái
    elif row0_in_db == "TRẠNG THÁI":
        if seed == 1:
            pixels.speak()
        onoff.trangthai(sta)
        if seed == 1:
            pixels.off()
#Thời tiết
    elif row0_in_db == "THỜI TIẾT":
        if seed == 1:
            pixels.speak()

        def wt(data):
            fio = weth.darksky_weather()
            if "HIỆN TẠI" in data or "HÔM NAY" in data:
                icon = ''
                current_weather = weth.darksky_currently(fio)
                if current_weather[2].upper() == 'RAIN':
                    icon = 'mưa'
                elif current_weather[2].upper() == 'PARTLY-CLOUDY-NIGHT':
                    icon = 'buổi tối trời nhiều mây'
                elif current_weather[2].upper() == 'PARTLY-CLOUDY-DAY':
                    icon = 'trời nhiều mây'
                elif current_weather[2].upper() == 'CLEAR-DAY':
                    icon = 'trời trong xanh'
                elif current_weather[2].upper() == 'CLEAR-NIGHT':
                    icon = 'đêm trời đẹp'
                elif current_weather[2].upper() == 'WIND':
                    icon = 'có gió lớn'
                elif current_weather[2].upper() == 'CLOUDY':
                    icon = 'trời nhiều mây'
                elif current_weather[2].upper() == 'FOG':
                    icon = 'trời nhiều sương mù'

                speaking.speak(current_weather[0] + current_weather[1] + icon)
            elif "NGÀY MAI" in data:
                hourly_weather = weth.darksky_hourly(fio)
                speaking.speak(' nhìn chung ' + hourly_weather)
            else:
                speaking.speak('em không hiểu')

        answer('đang kiểm tra thông tin thời tiết', 'để em kiểm tra',
               'em kiểm tra ngay')
        execute.run_thread(wt, data)
# TIN RADIO
    elif row0_in_db == "TIN VẮN":
        import zing
        if seed == 1:
            pixels.speak()
        playlist = news.getlink(data)
        player = zing.phat_zing(playlist)
        speaking.speak('Đang chuẩn bị phát tin vắn radio')
        player.play()
        if 'TIẾP' in data:
            print('Next')
            player.next()
        if 'TRƯỚC' in data:
            print('Prev')
            player.previous()
        if seed == 1:
            pixels.off()


# ZING
    elif row0_in_db == "ZING":
        if seed == 1:
            pixels.speak()
        import zing, vlc
        playlist = zing.zing_song(data)
        player = zing.phat_zing(playlist)
        speaking.speak('Đang chuẩn bị phát top 100 ca khúc trên Zing')
        player.play()
        if 'TIẾP' in data:
            print('Next')
            player.next()
        if 'TRƯỚC' in data:
            print('Prev')
            player.previous()
        if seed == 1:
            pixels.off()

    else:
        if seed == 1:
            pixels.speak()
        answer('em không hiểu', 'em nghe không rõ', ' vui lòng nói lại đi')
        if seed == 1:
            pixels.off()
Ejemplo n.º 27
0
 args = parser.parse_args()
 #model_name = args.model if args.model else ''
 #fname = args.image if args.image else ''
 exec_flag = args.exec_flag if args.exec_flag else 'B'
 print exec_flag #model_name, fname
 if exec_flag == "B":
   for i in range(3):
     GPIO.output(LED, GPIO.HIGH)
     pixels.listen()
     for pulse in range(50, 200, 1):
       wiringpi.pwmWrite(SERVO, pulse)
       time.sleep(delay_period)
     #GPIO.output(LED, GPIO.HIGH)
     #time.sleep(0.5)
     GPIO.output(LED, GPIO.LOW)
     pixels.off()
     time.sleep(0.5)
     
 GPIO.add_event_detect(BUTTON,GPIO.FALLING)
 while exec_flag != "N": #True:
     pixels.listen()
     time.sleep(0.5)
     pixels.off()
     #print "Press #" + str(BUTTON) + " button!"
     if GPIO.event_detected(BUTTON) or exec_flag == "Y":
         GPIO.remove_event_detect(BUTTON)
         now = time.time()
         count = 1 if exec_flag == "Y" else 0
         GPIO.add_event_detect(BUTTON,GPIO.RISING)
         while time.time() < now + hold_time:
             if GPIO.event_detected(BUTTON):
Ejemplo n.º 28
0
def detected_callback():
    print "收到了...."

    pixels.wakeup()
    time.sleep(3)
    pixels.off()
Ejemplo n.º 29
0
def pixels_off(client, userdata, msg):
#    print("pixels_off")
    pixels.off()
    time.sleep(3)
Ejemplo n.º 30
0
def main(detect="", photo_file="", trans_lang=""):
    pixels.wakeup()
    if photo_file == "":
        photo_file = camera()
    pixels.off()

    credentials = GoogleCredentials.get_application_default()
    service = discovery.build('vision',
                              'v1',
                              credentials=credentials,
                              discoveryServiceUrl=DISCOVERY_URL)

    with open(photo_file, 'rb') as image:
        image_content = base64.b64encode(image.read())
        if detect == "":  #No parameter
            DETECT = default_detect
        else:  #Paremater specified
            DETECT = [detect.upper()]

        result = ""
        bounds = []
        tlocale = ""
        for DET in DETECT:
            pixels.listen()
            service_request = service.images().annotate(
                body={
                    'requests': [{
                        'image': {
                            'content': image_content.decode('UTF-8')
                        },
                        'features': [{
                            'type': DET + '_DETECTION',
                            'maxResults': default_max
                        }]
                    }]
                })
            response = service_request.execute()
            annotation = DET.lower() + 'Annotations'
            try:
                results = response['responses'][0][annotation]
                for res in results:
                    if DET in ["LABEL", "LOGO"]:
                        if res["score"] > 0.7:
                            result += res["description"] + ", "

                    elif DET in ["TEXT"]:
                        tlocale = res["locale"]
                        result += res["description"] + ", "
                        bounds += res["boundingPoly"]["vertices"]

                    elif DET in ["FACE"]:
                        if res["joyLikelihood"] == "VERY_LIKELY" or res[
                                "joyLikelihood"] == "LIKELY":
                            result += "Smile "
                        if res["angerLikelihood"] == "VERY_LIKELY" or res[
                                "angerLikelihood"] == "LIKELY":
                            result += "Angry "
                        if res["headwearLikelihood"] == "VERY_LIKELY" or res[
                                "headwearLikelihood"] == "LIKELY":
                            rsult += "Capped "

                    result += DET + ", "
            except:
                result += "No " + DET + ", "
            pixels.off()

        print('Result: ' + result)
        pixels.listen()
        if trans_lang:
            trans_text = translate_text(result, trans_lang)
            trans_text = trans_text.replace("&#39;", "")
            print('Trans: ' + trans_text)
            if trans_lang in aiy_lang:
                aiy.audio.say(trans_text, trans_lang)
            elif trans_lang == "ja-JP":
                os.system(aquest_dir +
                          ' -g {} {} | aplay -D plughw:{},{}'.format(
                              VOLUME, trans_text, CARD, DEVICE))
            else:
                aiy.audio.say('Nothing to trans!', 'en-US')

        else:  #trans_lang = null then default en-US
            aiy.audio.say(result, 'en-US')
        pixels.off()