예제 #1
0
파일: openmv.py 프로젝트: ya060505/final
def image_classification():

    sensor.reset()                         # Reset and initialize the sensor.

    sensor.set_pixformat(sensor.RGB565)    # Set pixel format to RGB565 (or GRAYSCALE)

    sensor.set_framesize(sensor.QVGA)      # Set frame size to QVGA (?x?)

    sensor.set_windowing((240, 240))       # Set 240x240 window.

    sensor.skip_frames(time=2000)          # Let the camera adjust.


    labels = ['3', '4', '0', 'other']


    img = sensor.snapshot()


    for obj in tf.classify('/model_demo.tflite',img, min_scale=1.0, scale_mul=0.5, x_overlap=0.0, y_overlap=0.0):

        img.draw_rectangle(obj.rect())

        img.draw_string(obj.x()+3, obj.y()-1, labels[obj.output().index(max(obj.output()))], mono_space = False)

    return labels[obj.output().index(max(obj.output()))]
예제 #2
0
파일: main.py 프로젝트: AQZ0216/EE-2405
def image_classification():
    sensor.reset()  # Reset and initialize the sensor.
    sensor.set_pixformat(
        sensor.RGB565)  # Set pixel format to RGB565 (or GRAYSCALE)
    sensor.set_framesize(sensor.QVGA)  # Set frame size to QVGA (?x?)
    sensor.set_windowing((240, 240))  # Set 240x240 window.
    sensor.skip_frames(time=2000)  # Let the camera adjust.

    labels = ['3', '4', '0', 'other']

    img = sensor.snapshot()

    for obj in tf.classify('/model_demo.tflite',
                           img,
                           min_scale=1.0,
                           scale_mul=0.5,
                           x_overlap=0.0,
                           y_overlap=0.0):
        img.draw_rectangle(obj.rect())
        img.draw_string(obj.x() + 3,
                        obj.y() - 1,
                        labels[obj.output().index(max(obj.output()))],
                        mono_space=False)

    RED_LED_PIN = 1
    BLUE_LED_PIN = 3

    sensor.reset()  # Initialize the camera sensor.
    sensor.set_pixformat(sensor.RGB565)  # or sensor.GRAYSCALE
    sensor.set_framesize(sensor.QVGA)  # or sensor.QQVGA (or others)
    sensor.skip_frames(time=2000)  # Let new settings take affect.

    pyb.LED(RED_LED_PIN).on()
    sensor.skip_frames(time=2000)  # Give the user time to get ready.

    pyb.LED(RED_LED_PIN).off()
    pyb.LED(BLUE_LED_PIN).on()

    print("You're on camera!")
    sensor.snapshot().save("example.jpg")  # or "example.bmp" (or others)

    pyb.LED(BLUE_LED_PIN).off()
    print("Done! Reset the camera to see the saved image.")

    return labels[obj.output().index(max(obj.output()))]
예제 #3
0
def predict_cough(img):
    for obj in tf.classify(net,
                           img,
                           min_scale=1.0,
                           scale_mul=0.8,
                           x_overlap=0.5,
                           y_overlap=0.5):
        print("**********\nPredictions Cough at [x=%d,y=%d,w=%d,h=%d]" %
              obj.rect())
        img.draw_rectangle(obj.rect())

        # This combines the labels and confidence values into a list of tuples
        predictions_list = list(zip(labels, obj.output()))

        for i in range(len(predictions_list)):
            print("%s = %f" % (predictions_list[i][0], predictions_list[i][1]))

            # draw a rectangle and the predicted label on screen
            img.draw_rectangle(obj.rect())
            img.draw_string(obj.x() + 3,
                            obj.y() - 1,
                            labels[obj.output().index(max(obj.output()))],
                            mono_space=False)
예제 #4
0
while(True):
    clock.tick()

    img = sensor.snapshot()

    # net.classify() will run the network on an roi in the image (or on the whole image if the roi is not
    # specified). A classification score output vector will be generated for each location. At each scale the
    # detection window is moved around in the ROI using x_overlap (0-1) and y_overlap (0-1) as a guide.
    # If you set the overlap to 0.5 then each detection window will overlap the previous one by 50%. Note
    # the computational work load goes WAY up the more overlap. Finally, for multi-scale matching after
    # sliding the network around in the x/y dimensions the detection window will shrink by scale_mul (0-1)
    # down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%.
    # Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small...

    # Setting x_overlap=-1 forces the window to stay centered in the ROI in the x direction always. If
    # y_overlap is not -1 the method will search in all vertical positions.

    # Setting y_overlap=-1 forces the window to stay centered in the ROI in the y direction always. If
    # x_overlap is not -1 the method will serach in all horizontal positions.

    # default settings just do one detection... change them to search the image...
    for obj in tf.classify(mobilenet, img, min_scale=1.0, scale_mul=0.5, x_overlap=-1, y_overlap=-1):
        print("**********\nTop 5 Detections at [x=%d,y=%d,w=%d,h=%d]" % obj.rect())
        img.draw_rectangle(obj.rect())
        # This combines the labels and confidence values into a list of tuples
        # and then sorts that list by the confidence values.
        sorted_list = sorted(zip(labels, obj.output()), key = lambda x: x[1], reverse = True)
        for i in range(5):
            print("%s = %f" % (sorted_list[i][0], sorted_list[i][1]))
    print(clock.fps(), "fps")
예제 #5
0
def Image_Classification_MNIST():
    sensor.skip_frames(time = 2000)
    img = sensor.snapshot()
    img.save("Mission1_Snapshot.jpg")
    for obj in tf.classify('/model_demo.tflite',img, min_scale=1.0, scale_mul=0.5, x_overlap=0.0, y_overlap=0.0):
        return str(labels[obj.output().index(max(obj.output()))]))
예제 #6
0
sensor.set_pixformat(
    sensor.RGB565)  # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA)  # Set frame size to QVGA (320x240)
sensor.set_windowing((240, 240))  # Set 240x240 window.
sensor.skip_frames(time=2000)  # Let the camera adjust.

net = "trained.tflite"
labels = [line.rstrip('\n') for line in open("labels.txt")]

clock = time.clock()
while (True):
    clock.tick()

    img = sensor.snapshot()

    for obj in tf.classify(net,
                           img,
                           min_scale=1.0,
                           scale_mul=0.8,
                           x_overlap=0.5,
                           y_overlap=0.5):
        print("**********\nPredictions at [x=%d,y=%d,w=%d,h=%d]" % obj.rect())
        img.draw_rectangle(obj.rect())
        # This combines the labels and confidence values into a list of tuples
        predictions_list = list(zip(labels, obj.output()))

        for i in range(len(predictions_list)):
            print("%s = %f" % (predictions_list[i][0], predictions_list[i][1]))

    print(clock.fps(), "fps")
예제 #7
0
def person_detection(data):
    sensor.set_pixformat(sensor.GRAYSCALE)
    sensor.set_framesize(sensor.QVGA)
    scores = tf.classify("person_detection", sensor.snapshot())[0].output()
    return ['unsure', 'person', 'no_person'][scores.index(max(scores))].encode()
예제 #8
0
파일: 4_6_2.py 프로젝트: wutongtoby/mbed14
import sensor, image, time, os, tf

sensor.reset()  # Reset and initialize the sensor.
sensor.set_pixformat(
    sensor.RGB565)  # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA)  # Set frame size to QVGA (?x?)
sensor.set_windowing((240, 240))  # Set 240x240 window.
sensor.skip_frames(time=2000)  # Let the camera adjust.

labels = ['3', '4', '0', 'other']

img = sensor.snapshot()

for obj in tf.classify('model_demo.tflite',
                       img,
                       min_scale=1.0,
                       scale_mul=0.5,
                       x_overlap=0.0,
                       y_overlap=0.0):
    for i in range(len(obj.output())):
        print("%s = %f" % (labels[i], obj.output()[i]))

img.draw_rectangle(obj.rect())
img.draw_string(obj.x() + 3,
                obj.y() - 1,
                labels[obj.output().index(max(obj.output()))],
                mono_space=False)
print("This is : ", labels[obj.output().index(max(obj.output()))])
예제 #9
0
# Test Portenta - By: khairunnasulfahmi - Sun Nov 1 2020

import image, tf, os


def transfer(trigger, conf):
    time_current = time.localtime()
    time_s = '{:04d}-{:02d}-{:02d} {:02d}:{:02d}:{:02d}'.format(
        time_current[0], time_current[1], time_current[2], time_current[3],
        time_current[4], time_current[5])

    data = {'data': str(time_s) + ", " + trigger + ", " + str(conf)}
    print(data['data'])


# Load the custom trained tflite model
net = "trained.tflite"
labels = [line.rstrip('\n') for line in open("labels.txt")]

print("starting...")
img = image.Image("test_img.PPM", copy_to_fb=True)

obj = tf.classify(net, img)
predictions_list = list(zip(labels, obj[0].output()))

for i in range(len(predictions_list)):
    print("%s = %f" % (predictions_list[i][0], predictions_list[i][1]))