Esempio n. 1
0
def main():
    """Face detection camera inference example."""
    parser = argparse.ArgumentParser()
    parser.add_argument('--num_frames', '-n', type=int, dest='num_frames', default=None,
        help='Sets the number of frames to run for, otherwise runs forever.')
    args = parser.parse_args()

    leds = Leds()
    leds.reset()
    leds.update(Leds.privacy_on())

    noCustomerDelay = 0;


    with PiCamera(sensor_mode=4, resolution=(1640, 1232)) as camera:
    # with PiCamera(sensor_mode=4, resolution=(1640, 1232), framerate=30) as camera:
    # with PiCamera() as camera:
        camera.start_preview()

        with CameraInference(face_detection.model()) as inference:
            for result in inference.run():
                if len(face_detection.get_faces(result)) >= 1:
                    noCustomerDelay = 0
                    leds.update(Leds.rgb_on(GREEN))
                    # stream = io.BytesIO()
                    # camera.capture(stream, format='jpeg')
                    # stream.seek(0)
                    camera.capture('faces.jpg')

                    faces = GetFaceId('faces.jpg')
                    print(faces)
                    if(len(faces) > 0):
                        result = GetUserId(faces[0])
                        print(result)

                        highestScore = 0
                        userId = ""
                        for face in result:
                            for candidate in face['candidates']:
                                if(highestScore < candidate['confidence']):
                                    userId = candidate['personId']


                        InfoVendingMachine("10", userId)

                        print(userId)
                    # break
                else:
                    if noCustomerDelay >= 30:
                        leds.update(Leds.rgb_on(WHITE))
                        InfoVendingMachine("10", '')
                        noCustomerDelay = 0;
                    else:
                        noCustomerDelay += 1;


        camera.stop_preview()

    leds.reset()
Esempio n. 2
0
def main():

    status_ui = aiy.voicehat.get_status_ui()
    status_ui.status('starting')
    assistant = aiy.assistant.grpc.get_assistant()
    #button = aiy.voicehat.get_button()
    leds = Leds()
    with aiy.audio.get_recorder():
        while True:
            status_ui.status('ready')
            #print('Press the button and speak')
            #button.wait_for_press()
            leds.reset()
            status_ui.status('listening')
            print('Listening...')
            text, audio = assistant.recognize()
Esempio n. 3
0
def main():
    with PiCamera(resolution=(1640, 922)) as camera:
        with CameraInference(face_detection.model()) as inference:
            for result in inference.run():
                if len(face_detection.get_faces(result)) >= 1:
                    print("face detected!")
                    h264_file_path = generate_filename(datetime.datetime.now())

                    leds = Leds()
                    with PrivacyLed(leds):
                        camera.start_recording(h264_file_path, format='h264')
                        sleep(5)
                        camera.stop_recording()
                    leds.reset()

                    output_file_path = h264_to_mp4(h264_file_path)

                    upload_video_to_slack(output_file_path, SLACK_TOKEN,
                                          SLACK_CHANNEL_ID)
Esempio n. 4
0
    time.sleep(0.1)

print('RGB: Blend between GREEN and BLUE for 3.2 seconds')
for i in range(32):
    leds.update(Leds.rgb_on(blend(BLUE, GREEN, i / 32)))
    time.sleep(0.1)

print('RGB: Off for 1 second')
leds.update(Leds.rgb_off())
time.sleep(1)

print('Privacy: On for 2 seconds')
with PrivacyLed(leds):
    time.sleep(2)

print('RGB: Solid GREEN for 2 seconds')
with RgbLeds(leds, Leds.rgb_on(GREEN)):
    time.sleep(2)

print('Custom configuration for 5 seconds')
leds.update({
    1: Leds.Channel(Leds.Channel.PATTERN, 128),  # Red channel
    2: Leds.Channel(Leds.Channel.OFF, 0),        # Green channel
    3: Leds.Channel(Leds.Channel.ON, 128),       # Blue channel
    4: Leds.Channel(Leds.Channel.PATTERN, 64),   # Privacy channel
})
time.sleep(5)

print('Done')
leds.reset()
Esempio n. 5
0
def check_mic_works():
    temp_file, temp_path = tempfile.mkstemp(suffix='.wav')
    print(temp_file, temp_path)
    os.close(temp_file)

    ah = '3fa2f6d6-d7c2-4799-b97c-8b5789f9c898'
    bh = 'df211582-f284-42ef-b676-2a0592b45783'
    ma = 'f07dce9f-0f84-43dd-85e7-592ea4a9d0e0'
    yj = '4338b960-0de8-413e-b22b-8e3630f8553f'

    headers = {
        # Request headers
        #'Content-Type': 'multipart/form-data',
        'Content-type': 'multipart/form-data',
        'Sample-Rate': '16000',
        'Ocp-Apim-Subscription-Key': 'b7a5dab14dd54627b39b00a4e0a8d051',
    }
    headers2 = {
        'Ocp-Apim-Subscription-Key': 'b7a5dab14dd54627b39b00a4e0a8d051'
    }

    params = urllib.parse.urlencode({
        # Request parameters
        'shortAudio': 'true',
    })

    #    try:
    input("When you're ready, press enter and say 'Testing, 1 2 3'...")
    print('Recording...')
    aiy.audio.record_to_wave(temp_path, RECORD_DURATION_SECONDS)

    leds = Leds()
    leds.reset()
    try:
        url = 'westus.api.cognitive.microsoft.com'
        conn = http.client.HTTPSConnection(url)
        body = open(temp_path, 'rb')
        conn.request(
            "POST",
            "/spid/v1.0/identify?identificationProfileIds=3fa2f6d6-d7c2-4799-b97c-8b5789f9c898&%s"
            % params, body, headers)
        response = conn.getresponse()
        print(response.status)
        #print(response.headers)
        #data = response.read()
        #print(data)
        print(response.headers.get("Operation-Location"))
        conn.close()
        res = 0
        print("=========================identirying...======")
        next_string = response.headers.get("Operation-Location")
        next_url = next_string[len(url) + 8:]
        while res == 0:
            time.sleep(3)
            conn = http.client.HTTPSConnection(url)
            print(next_string)

            conn.request("GET", next_url, "body", headers2)
            response = conn.getresponse()
            print(response.status)
            myres = response.read()
            my_json_res = json.loads(myres.decode())
            print(my_json_res.get("status"))
            api_status = my_json_res.get("status")
            if api_status != "running" and api_status != "notstarted":
                json_res = my_json_res.get("processingResult")
                owner = json_res.get("identifiedProfileId")
                print(owner)
                print(ah)
                if owner == ah:
                    RED = (0xff, 0x00, 0x00)
                    leds.pattern = Pattern.blink(300)
                    leds.update(Leds.rgb_pattern(RED))
                    aiy.audio.say("hello ah Hyoung")
                    time.sleep(3)
                    leds.reset()
                else:
                    aiy.audio.say("Who Are You?")
                res = 1
            conn.close()

    except Exception as e:
        print("[Errno {0}] {1}".format(e.errno, e.strerror))