예제 #1
0
sensor.set_framesize(sensor.B64X64)

# Disable automatic exposure and gain
sensor.set_auto_exposure(False)
sensor.set_auto_gain(False)
sensor.set_contrast(0)
sensor.set_brightness(0)

# Wait two seconds for settings take effect.
sensor.skip_frames(time=2000)

# Create a clock object to track the FPS.
clock = time.clock()

# Load the Tensorflow Lite model
model = tf.load('whattolabel_resnet9.tflite')


def setLED(color='none'):
    """Function to set LED to a color (red, green, blue, none)
    """

    red_LED = pyb.LED(1)
    green_LED = pyb.LED(2)
    blue_LED = pyb.LED(3)

    if color == 'red':
        red_LED.on()
        green_LED.off()
        blue_LED.off()
    elif color == 'green':
예제 #2
0
# Face Recognition
#
# Use this script to run a TensorFlow lite image classifier on faces detected within an image.
# The classifier is free to do facial recognition, expression detection, or whatever.

import sensor, image, time, tf

sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(time=2000)

clock = time.clock()

net = tf.load("trained.tflite", load_to_fb=True)
labels = [l.rstrip('\n') for l in open("labels.txt")]

while (True):
    clock.tick()

    # Take a picture and brighten things up for the frontal face detector.
    img = sensor.snapshot().gamma_corr(contrast=1.5)

    # Returns a list of rects (x, y, w, h) where faces are.
    faces = img.find_features(image.HaarCascade("frontalface"))

    for f in faces:

        # Classify a face and get the class scores list
        scores = net.classify(img, roi=f)[0].output()
예제 #3
0
# TensorFlow Lite Object Detection Example
#
# This example shows off object detection. Object detect is much more powerful than
# object classification. It can locate multiple objects in the image.

import sensor, image, time, os, tf

sensor.reset()  # Reset and initialize the sensor.
sensor.set_pixformat(
    sensor.RGB565)  # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA)  # Set frame size to QVGA (320x240)
sensor.set_windowing((240, 240))  # Set 240x240 window.
sensor.skip_frames(time=2000)  # Let the camera adjust.

net = tf.load('<object_detection_network>', load_to_fb=True)
labels = []

try:  # Load labels if they exist
    labels = [line.rstrip('\n') for line in open("labels.txt")]
except:
    pass

colors = [  # Add more colors if you are detecting more than 7 types of classes at once.
    (255, 0, 0),
    (0, 255, 0),
    (255, 255, 0),
    (0, 0, 255),
    (255, 0, 255),
    (0, 255, 255),
    (255, 255, 255),
]
예제 #4
0
sensor.set_framesize(sensor.B64X64)

# Disable automatic exposure and gain
sensor.set_auto_exposure(False)
sensor.set_auto_gain(False)
sensor.set_contrast(0)
sensor.set_brightness(0)

# Wait two seconds for settings take effect.
sensor.skip_frames(time=2000)

# Create a clock object to track the FPS.
clock = time.clock()

# Load the Tensorflow Lite model
model = tf.load('lightly_resnet9.tflite')


def setLED(color='none'):
    """Function to set LED to a color (red, green, blue, none)
    """

    red_LED = pyb.LED(1)
    green_LED = pyb.LED(2)
    blue_LED = pyb.LED(3)

    if color == 'red':
        red_LED.on()
        green_LED.off()
        blue_LED.off()
    elif color == 'green':
예제 #5
0
import audio, time, tf, micro_speech, pyb
labels = ['Silence', 'Unknown', 'Yes', 'No']

led_red = pyb.LED(1)
led_green = pyb.LED(2)

model = tf.load('/model.tflite')
speech = micro_speech.MicroSpeech()
audio.init(channels=1, frequency=16000, gain=24, highpass=0.9883)

# Start audio streaming
audio.start_streaming(speech.audio_callback)

while (True):
    # Run micro-speech without a timeout and filter detections by label index.
    idx = speech.listen(model, timeout=0, threshold=0.78, filter=[2, 3])
    led = led_green if idx == 2 else led_red
    print(labels[idx])
    for i in range(0, 4):
        led.on()
        time.sleep_ms(25)
        led.off()
        time.sleep_ms(25)

# Stop streaming
audio.stop_streaming()
예제 #6
0
# TensorFlow Lite Cats vs Dogs Classification Example
#
# Classify in view dogs and cats.

import sensor, image, time, os, tf

sensor.reset()  # Reset and initialize the sensor.
sensor.set_pixformat(
    sensor.RGB565)  # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA)  # Set frame size to QVGA (320x240)
sensor.set_windowing((240, 240))  # Set 240x240 window.
sensor.skip_frames(time=2000)  # Let the camera adjust.

# Load the built-in cats vs dogs classification network (the network is in the OpenMV Cam's firmware contained in this repo).
net = tf.load('catsvsdogs_classification')
labels = ['cat', 'dog']

clock = time.clock()
while (True):
    clock.tick()

    img = sensor.snapshot()

    for obj in net.classify(img,
                            min_scale=1.0,
                            scale_mul=0.99,
                            x_overlap=0.0,
                            y_overlap=0.0):
        print("**********\nClassification at [x=%d,y=%d,w=%d,h=%d]" %
              obj.rect())
        for i in range(len(obj.output())):