예제 #1
0
def main():
    """Face detection camera inference example."""
    parser = argparse.ArgumentParser()
    parser.add_argument('--num_frames', '-n', type=int, dest='num_frames', default=None,
        help='Sets the number of frames to run for, otherwise runs forever.')
    args = parser.parse_args()

    leds = Leds()
    leds.reset()
    leds.update(Leds.privacy_on())

    noCustomerDelay = 0;


    with PiCamera(sensor_mode=4, resolution=(1640, 1232)) as camera:
    # with PiCamera(sensor_mode=4, resolution=(1640, 1232), framerate=30) as camera:
    # with PiCamera() as camera:
        camera.start_preview()

        with CameraInference(face_detection.model()) as inference:
            for result in inference.run():
                if len(face_detection.get_faces(result)) >= 1:
                    noCustomerDelay = 0
                    leds.update(Leds.rgb_on(GREEN))
                    # stream = io.BytesIO()
                    # camera.capture(stream, format='jpeg')
                    # stream.seek(0)
                    camera.capture('faces.jpg')

                    faces = GetFaceId('faces.jpg')
                    print(faces)
                    if(len(faces) > 0):
                        result = GetUserId(faces[0])
                        print(result)

                        highestScore = 0
                        userId = ""
                        for face in result:
                            for candidate in face['candidates']:
                                if(highestScore < candidate['confidence']):
                                    userId = candidate['personId']


                        InfoVendingMachine("10", userId)

                        print(userId)
                    # break
                else:
                    if noCustomerDelay >= 30:
                        leds.update(Leds.rgb_on(WHITE))
                        InfoVendingMachine("10", '')
                        noCustomerDelay = 0;
                    else:
                        noCustomerDelay += 1;


        camera.stop_preview()

    leds.reset()
예제 #2
0
class MyLed:
    def __init__(self, led=(0x00, 0x00, 0x00)):
        self.logger = MyLogger(level=logging.INFO, get="LED")
        self.leds = Leds()
        self.leds.update(Leds.rgb_on(led))
        self.logger.logger.debug("Init LED drivers")

    def set_color(self, led):
        self.leds.update(Leds.rgb_on(led))
        self.logger.logger.debug("set LED colors")

    def __exit__(self):
        led = (0x00, 0x00, 0x00)
        self.leds.update(Leds.rgb_on(led))
        self.logger.logger.debug("exit LED drivers")
예제 #3
0
GREEN = (0x00, 0xFF, 0x00)
YELLOW = (0xFF, 0xFF, 0x00)
BLUE = (0x00, 0x00, 0xFF)
PURPLE = (0xFF, 0x00, 0xFF)
CYAN = (0x00, 0xFF, 0xFF)
WHITE = (0xFF, 0xFF, 0xFF)


def blend(color_a, color_b, alpha):
    return tuple([math.ceil(alpha * color_a[i] + (1.0 - alpha) * color_b[i]) for i in range(3)])


leds = Leds()

print('RGB: Solid RED for 1 second')
leds.update(Leds.rgb_on(RED))
time.sleep(1)

print('RGB: Solid GREEN for 1 second')
leds.update(Leds.rgb_on(GREEN))
time.sleep(1)

print('RGB: Solid YELLOW for 1 second')
leds.update(Leds.rgb_on(YELLOW))
time.sleep(1)

print('RGB: Solid BLUE for 1 second')
leds.update(Leds.rgb_on(BLUE))
time.sleep(1)

print('RGB: Solid PURPLE for 1 second')
예제 #4
0
ready = [
    'C6q',
    'G5q',
    'E5q',
    'C5q',
]

player = aiy.toneplayer.TonePlayer(22)
player.play(*ready)

# Initialize the button (on the top of AIY Google Vision box)
button = Button(BUTTON_GPIO_PIN)

# Initialize LED (in the button on the top of AIY Google Vision box)
leds = Leds()
leds.update(Leds.rgb_off())

# Global variables
input_img_width = 1640
input_img_height = 1232
output_img_size = 160
faces_buffer_size = 40
hand_gesture_buffer_size = 5
threshold = 0.6

# Length of long buffer (to make a decision to de/activate app)
# and short buffer (to declare a specific hand gesture command)
long_buffer_length = 10
short_buffer_length = 3

# Number of seconds app waits for activation before going into face detection mode
예제 #5
0
button = Button(23, hold_time=2, hold_repeat=False)
camera = PiCamera()
leds = Leds()

print('Script starting...')

camera.resolution = (512, 512)
sleep(2)
camera.start_preview()

while True:
    try:
        if button.is_held is True:
            raise KeyboardInterrupt
        else:
            leds.update(Leds.rgb_on(Color.GREEN))
            button.wait_for_press()
            leds.pattern = Pattern.blink(2000)
            leds.update(Leds.privacy_on(5))
            for i in camera.capture_continuous(
                    '/home/pi/images/' +
                    'tire{timestamp:%Y-%m-%d-%H-%M-%S}.jpg'):
                print('Captured %s' % i)
                if button.is_held is True:
                    leds.pattern = Pattern.blink(300)
                    leds.update(Leds.privacy_off())
                    leds.update(Leds.rgb_pattern(Color.RED))
                    sleep(2)
                    leds.update(Leds.rgb_on(Color.GREEN))
                    break
                else:
# led colors
RED = (0xFF, 0x00, 0x00)
GREEN = (0x00, 0xFF, 0x00)
YELLOW = (0xFF, 0xFF, 0x00)
BLUE = (0x00, 0x00, 0xFF)
PURPLE = (0xFF, 0x00, 0xFF)
CYAN = (0x00, 0xFF, 0xFF)
WHITE = (0xFF, 0xFF, 0xFF)

# setup vars
button = Button(23, hold_time=2)
camera = PiCamera()
leds = Leds()

# turn on the privacy light
leds.update(Leds.privacy_on())


def shutdown():
    leds.update(Leds.privacy_off())
    camera.close()
    for i in range(3):
        leds.update(Leds.rgb_on(RED))
        time.sleep(0.2)
        leds.update(Leds.rgb_off())
        time.sleep(0.2)
    check_call(['sudo', 'poweroff'])


def capture():
    time.sleep(0.1)
# led colors
RED = (0xFF, 0x00, 0x00)
GREEN = (0x00, 0xFF, 0x00)
YELLOW = (0xFF, 0xFF, 0x00)
BLUE = (0x00, 0x00, 0xFF)
PURPLE = (0xFF, 0x00, 0xFF)
CYAN = (0x00, 0xFF, 0xFF)
WHITE = (0xFF, 0xFF, 0xFF)

button = Button(23, hold_time=3)
camera = PiCamera()
leds = Leds()

# turn privacy light on
leds.update(Leds.privacy_on())

def capture():
    #print('button pressed')
    leds.update(Leds.rgb_on(GREEN))
    time.sleep(0.5)
    camera.resolution = (1920, 1080)
    timestamp = datetime.now().isoformat()
    camera.capture('/home/pi/Pictures/{}.jpg'.format(timestamp))
    print('captured {}.jpg'.format(timestamp))
    leds.update(Leds.rgb_off())

while True:
    button.when_released= capture
    n = input('push the button to capture. press any key to exit\n')
    if n:
예제 #8
0
def check_mic_works():
    temp_file, temp_path = tempfile.mkstemp(suffix='.wav')
    print(temp_file, temp_path)
    os.close(temp_file)

    ah = '3fa2f6d6-d7c2-4799-b97c-8b5789f9c898'
    bh = 'df211582-f284-42ef-b676-2a0592b45783'
    ma = 'f07dce9f-0f84-43dd-85e7-592ea4a9d0e0'
    yj = '4338b960-0de8-413e-b22b-8e3630f8553f'

    headers = {
        # Request headers
        #'Content-Type': 'multipart/form-data',
        'Content-type': 'multipart/form-data',
        'Sample-Rate': '16000',
        'Ocp-Apim-Subscription-Key': 'b7a5dab14dd54627b39b00a4e0a8d051',
    }
    headers2 = {
        'Ocp-Apim-Subscription-Key': 'b7a5dab14dd54627b39b00a4e0a8d051'
    }

    params = urllib.parse.urlencode({
        # Request parameters
        'shortAudio': 'true',
    })

    #    try:
    input("When you're ready, press enter and say 'Testing, 1 2 3'...")
    print('Recording...')
    aiy.audio.record_to_wave(temp_path, RECORD_DURATION_SECONDS)

    leds = Leds()
    leds.reset()
    try:
        url = 'westus.api.cognitive.microsoft.com'
        conn = http.client.HTTPSConnection(url)
        body = open(temp_path, 'rb')
        conn.request(
            "POST",
            "/spid/v1.0/identify?identificationProfileIds=3fa2f6d6-d7c2-4799-b97c-8b5789f9c898&%s"
            % params, body, headers)
        response = conn.getresponse()
        print(response.status)
        #print(response.headers)
        #data = response.read()
        #print(data)
        print(response.headers.get("Operation-Location"))
        conn.close()
        res = 0
        print("=========================identirying...======")
        next_string = response.headers.get("Operation-Location")
        next_url = next_string[len(url) + 8:]
        while res == 0:
            time.sleep(3)
            conn = http.client.HTTPSConnection(url)
            print(next_string)

            conn.request("GET", next_url, "body", headers2)
            response = conn.getresponse()
            print(response.status)
            myres = response.read()
            my_json_res = json.loads(myres.decode())
            print(my_json_res.get("status"))
            api_status = my_json_res.get("status")
            if api_status != "running" and api_status != "notstarted":
                json_res = my_json_res.get("processingResult")
                owner = json_res.get("identifiedProfileId")
                print(owner)
                print(ah)
                if owner == ah:
                    RED = (0xff, 0x00, 0x00)
                    leds.pattern = Pattern.blink(300)
                    leds.update(Leds.rgb_pattern(RED))
                    aiy.audio.say("hello ah Hyoung")
                    time.sleep(3)
                    leds.reset()
                else:
                    aiy.audio.say("Who Are You?")
                res = 1
            conn.close()

    except Exception as e:
        print("[Errno {0}] {1}".format(e.errno, e.strerror))