def start_streaming(s):
    print ('Waiting for connections..')
    client, addr = s.accept()
    # set client socket timeout to 2s
    client.settimeout(2.0)
    print ('Connected to ' + addr[0] + ':' + str(addr[1]))

    # Read request from client
    data = client.recv(1024)
    # Should parse client request here

    # Send multipart header
    client.send("HTTP/1.1 200 OK\r\n" \
                "Server: OpenMV\r\n" \
                "Content-Type: multipart/x-mixed-replace;boundary=openmv\r\n" \
                "Cache-Control: no-cache\r\n" \
                "Pragma: no-cache\r\n\r\n")

    # FPS clock
    clock = time.clock()

    # Start streaming images
    # NOTE: Disable IDE preview to increase streaming FPS.
    while (True):
        clock.tick() # Track elapsed milliseconds between snapshots().
        frame = sensor.snapshot()
        cframe = frame.compressed(quality=35)
        header = "\r\n--openmv\r\n" \
                 "Content-Type: image/jpeg\r\n"\
                 "Content-Length:"+str(cframe.size())+"\r\n\r\n"
        client.send(header)
        client.send(cframe)
        print(clock.fps())
Пример #2
0
def find_face():
    for i in range(0, 100):
        img = sensor.snapshot()
    while (True):
        img = sensor.snapshot()
        objects = img.find_features(face_cascade, threshold=0.65, scale=1.65)
        if objects:
            print (objects[0])
            img.draw_rectangle(objects[0])
            try:
                kpts1 = img.find_keypoints(threshold=32, normalized=False, roi=objects[0])
            except:
                continue
            if kpts1:
                img.draw_keypoints(kpts1)
                time.sleep(1000)
                return kpts1
Пример #3
0
def test_color_bars():

    sensor.reset()
    # Set sensor settings
    sensor.set_brightness(0)
    sensor.set_saturation(0)
    sensor.set_gainceiling(8)
    sensor.set_contrast(2)

    # Set sensor pixel format
    sensor.set_framesize(sensor.QVGA)
    sensor.set_pixformat(sensor.RGB565)

    # Enable colorbar test mode
    sensor.set_colorbar(True)

    # Skip a few frames to allow the sensor settle down
    # Note: This takes more time when exec from the IDE.
    for i in range(0, 100):
        image = sensor.snapshot()

    # Color bars thresholds
    t = [lambda r, g, b: r < 50  and g < 50  and b < 50,   # Black
         lambda r, g, b: r < 50  and g < 50  and b > 200,  # Blue
         lambda r, g, b: r > 200 and g < 50  and b < 50,   # Red
         lambda r, g, b: r > 200 and g < 50  and b > 200,  # Purple
         lambda r, g, b: r < 50  and g > 200 and b < 50,   # Green
         lambda r, g, b: r < 50  and g > 200 and b > 200,  # Aqua
         lambda r, g, b: r > 200 and g > 200 and b < 50,   # Yellow
         lambda r, g, b: r > 200 and g > 200 and b > 200]  # White

    # 320x240 image with 8 color bars each one is approx 40 pixels.
    # we start from the center of the frame buffer, and average the
    # values of 10 sample pixels from the center of each color bar.
    for i in range(0, 8):
        avg = (0, 0, 0)
        idx = 40*i+20 # center of colorbars
        for off in range(0, 10): # avg 10 pixels
            rgb = image.get_pixel(idx+off, 120)
            avg = tuple(map(sum, zip(avg, rgb)))

        if not t[i](avg[0]/10, avg[1]/10, avg[2]/10):
            raise Exception("COLOR BARS TEST FAILED. "
            "BAR#(%d): RGB(%d,%d,%d)"%(i+1, avg[0]/10, avg[1]/10, avg[2]/10))

    print("COLOR BARS TEST PASSED...")
Пример #4
0
def unittest(data_path, temp_path):
    import sensor
    sensor.reset()
    sensor.set_framesize(sensor.QVGA)
    sensor.set_pixformat(sensor.GRAYSCALE)
    img = sensor.snapshot().clear()
    img.set_pixel(img.width()//2+50, 120, 255)
    img.set_pixel(img.width()//2-50, 120, 255)
    img.draw_line([img.width()//2-50, 50, img.width()//2+50, 50])
    img.draw_rectangle([img.width()//2-25, img.height()//2-25, 50, 50])
    img.draw_circle(img.width()//2, img.height()//2, 40)
    img.draw_string(11, 10, "HelloWorld!")
    img.draw_cross(img.width()//2, img.height()//2)
    sensor.flush()
    img.difference(data_path+"/drawing.pgm")
    stats = img.get_statistics()
    return (stats.max() == 0) and (stats.min() == 0)
Пример #5
0
def find_face():
    global sensor, time
    # Load Haar Cascade
    face_cascade = HaarCascade("/frontalface.cascade")
    while (True):
        image = sensor.snapshot()
        objects = image.find_features(face_cascade, threshold=0.65, scale=1.85)
        if objects:
            print (objects[0])
            image.draw_rectangle(objects[0])
            try:
                kpts1 = image.find_keypoints(threshold=32, normalized=False, roi=objects[0])
            except:
                continue
            if kpts1:
                image.draw_keypoints(kpts1)
                time.sleep(1000)
                return kpts1
# Grayscale Light Removal
#
# This example shows off how to remove bright lights from the image.
# You can do this using the binary() method with the "zero=" argument.
#
# Removing bright lights from the image allows you to now use
# histeq() on the image without outliers from oversaturated
# parts of the image breaking the algorithm...

import sensor, image, time

sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.
clock = time.clock() # Tracks FPS.

thresholds = (220, 255)

while(True):
    clock.tick() # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot().binary([thresholds], invert=False, zero=True)

    print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
    # connected to your computer. The FPS should increase once disconnected.
Пример #7
0
# TV Example
#
# Note: To run this example you will need a wireless tv shield for your OpenMV Cam.
#
# The wireless video tv Shield allows you to view your OpenMV Cam's frame buffer on the go.

import sensor, image, tv

sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QQVGA)
tv.init() # Initialize the tv.
tv.channel(8) # For wireless video transmitter shield
while(True):
    tv.display(sensor.snapshot()) # Take a picture and display the image.
Пример #8
0
Important:
    This script should be copied to the OpenMV Cam as `main.py`.

Source:
    https://github.com/openmv/openmv/blob/master/scripts/examples/02-Board-Control/usb_vcp.py

"""
import sensor
import ustruct
import pyb

usb_vcp = pyb.USB_VCP()
# Disable USB interrupt (CTRL-C by default) when sending raw data (i.e. images)
# See: https://docs.openmv.io/library/pyb.USB_VCP.html#pyb.USB_VCP.setinterrupt
usb_vcp.setinterrupt(-1)

sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.VGA)
sensor.skip_frames(time=2000)  # wait for settings to take effect!

while True:

    command = usb_vcp.recv(4, timeout=5000)

    if command == b'snap':
        image = sensor.snapshot().compress()
        usb_vcp.send(ustruct.pack('<L', image.size()))
        usb_vcp.send(image)
Пример #9
0
            gray = image.rgb_to_grayscale(rgb)
            img_gray.set_pixel(x, y, gray)
            #grayFram[x,y] = gray
    a = img_gray.get_pixel(10, 10)
    print(a)
    return img_gray


path = "0.bmp"
img = image.Image(path, copy_to_fb=True)

sensor.reset()
sensor.set_framesize(sensor.QVGA)
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.skip_frames(10)
img_gray = sensor.snapshot()

#sensor.flush()

#time.sleep(100)

#roi = (10,10,10,10)
#thres = (49,174)
while (True):

    #clock.tick()

    #img_gray = sensor.snapshot()
    #a = img.get_pixel(10,10)
    #i = rgb2gray(img)
    #time.sleep(100)
Пример #10
0
# Negative Example
#
# This example shows off negating the image. This is not a particularly
# useful method but it can come in handy once in a while.

import sensor, image, time

sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.
clock = time.clock() # Tracks FPS.

while(True):
    clock.tick() # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot().negate()

    print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
    # connected to your computer. The FPS should increase once disconnected.
Пример #11
0
s.settimeout(1.0)

print ('Waiting for connections..')
client, addr = s.accept()
print ('Connected to ' + addr[0] + ':' + str(addr[1]))

# Read request from client
data = client.recv(1024)

# Should parse client request here

# Send multipart header
client.send("HTTP/1.1 200 OK\r\n"   \
            "Server: OpenMV\r\n"    \
            "Content-Type: multipart/x-mixed-replace;boundary=openmv\r\n" \
            "Cache-Control: no-cache\r\n" \
            "Pragma: no-cache\r\n\r\n")

# FPS clock
clock = time.clock()
# Start streaming images
while (True):
    clock.tick() # Track elapsed milliseconds between snapshots().
    frame = sensor.snapshot()
    client.send("\r\n--openmv\r\n"  \
                "Content-Type: image/jpeg\r\n"\
                "Content-Length:"+str(frame.size())+"\r\n\r\n")
    client.send(frame.compress(35))
    print(clock.fps())

client.close()
Пример #12
0
import sensor, time
# Set sensor contrast
sensor.set_contrast(1)
# Set sensor brightness
sensor.set_brightness(-2)
# Set sensor to pixel format
sensor.set_pixformat(sensor.GRAYSCALE)

# Load template
template = Image("0:/template.pgm")

# Run template matching
while (True):
    image = sensor.snapshot()
    r = image.find_template(template, 0.75)
    if r:    
        image.draw_rectangle(r)
        time.sleep(50)
Пример #13
0
sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
sensor.skip_frames(10) # Let new settings take affect.
sensor.set_whitebal(False) # Turn off white balance.

if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory

while(True):

    pyb.LED(RED_LED_PIN).on()
    print("About to save background image...")
    sensor.skip_frames(60) # Give the user time to get ready.

    pyb.LED(RED_LED_PIN).off()
    sensor.snapshot().save("temp/bg.bmp")
    print("Saved background image - Now detecting motion!")
    pyb.LED(BLUE_LED_PIN).on()

    diff = 10 # We'll say we detected motion after 10 frames of motion.
    while(diff):
        img = sensor.snapshot()
        img.difference("temp/bg.bmp")
        for blob_l in img.find_blobs([(20, 100, -128, 127, -128, 127)]):
            for blob in blob_l:
                # Over 100 pixels need to change to detect motion.
                if (diff and (blob[4] > 100)): diff -= 1

    m = mjpeg.Mjpeg("example-%d.mjpeg" % pyb.rng())

    clock = time.clock() # Tracks FPS.
Пример #14
0
lcd.rotation(0)

time.sleep(1)

count = 0
while (corgi85.wifi_check() == 0):
    print("WIFI Connecting")
    time.sleep(1)

print("\n\nWIFI Connected")

print("\n\nSet line Token:", token)
corgi85.LINE_setToken(token)  #set line Token

print("\n\nsend line image")
img = sensor.snapshot()  # camera capture image
lcd.display(img)  # lcd  display image
corgi85.LINE_notifyPicture(
    img, "CorgiDude LINE notify Picture")  # send image to line noti
time.sleep(3)

print("\n\nsend message to line noti: Hello From CorgiDude")
corgi85.LINE_notify("Hello From CorgiDude")
time.sleep(3)

print("\n\nsend line sticker")
corgi85.LINE_notifySticker(
    1, 1)  # detail : https://devdocs.line.me/files/sticker_list.pdf
time.sleep(3)

print("\n\nsend line sticker & message")
sensor.reset()  # Initialize the camera sensor.
sensor.set_pixformat(sensor.RGB565)  # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QQVGA)  # or sensor.QVGA (or others)
sensor.skip_frames(time=2000)  # Let new settings take affect.
sensor.set_auto_whitebal(False)  # Turn off white balance.

if not "temp" in os.listdir(): os.mkdir("temp")  # Make a temp directory

while (True):

    pyb.LED(RED_LED_PIN).on()
    print("About to save background image...")
    sensor.skip_frames(time=2000)  # Give the user time to get ready.

    pyb.LED(RED_LED_PIN).off()
    sensor.snapshot().save("temp/bg.bmp")
    print("Saved background image - Now detecting motion!")
    pyb.LED(BLUE_LED_PIN).on()

    diff = 10  # We'll say we detected motion after 10 camera of motion.
    while (diff):
        img = sensor.snapshot()
        img.difference("temp/bg.bmp")
        stats = img.statistics()
        # Stats 5 is the max of the lighting color channel. The below code
        # triggers when the lighting max for the whole image goes above 20.
        # The lighting difference maximum should be zero normally.
        if (stats[5] > 20):
            diff -= 1

    g = gif.Gif("example-%d.gif" % pyb.rng(), loop=True)
import machine, sensor, image, pyb

RED_LED_ID = 1

led = pyb.LED(RED_LED_ID)
rtc = pyb.RTC()
if (machine.reset_cause() != machine.DEEPSLEEP_RESET):
    rtc.datetime((2021, 3, 22, 1, 0, 11, 0, 0))  # When connecting LiPo

led.on()
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.WQXGA2)
sensor.skip_frames(time=2000)  # Let settings take effect
t = rtc.datetime()
y = str(t[0])
m = '%02d' % t[1]
d = '%02d' % t[2]
# w = '%d' % t[3]
h = '%02d' % t[4]
n = '%02d' % t[5]
s = '%02d' % t[6]
name = 'snapshot_' + y + '-' + m + '-' + d + 'T' + h + '-' + n + '-' + s + 'Z.jpg'
sensor.snapshot().save(name)  # 0.25 A
sensor.sleep(True)
sensor.shutdown(True)
rtc.wakeup(20000)  # ms
led.off()
machine.deepsleep()  # ~0.00 A, device will be reset on wakeup
Пример #17
0
sensor.set_framesize(sensor.QQVGA)  # 320*240
sensor.skip_frames(time=500)  # 跳过,等待摄像头稳定
sensor.set_auto_gain(False)  # 自动增益在颜色识别中一般关闭,不然会影响阈值
sensor.set_auto_whitebal(False)  # 白平衡在颜色识别中一般关闭,不然会影响阈值
clock = time.clock()  # 构造时钟对象

uart = UART(3, 115200)
uart.init(115200, bits=8, parity=None, stop=1,
          timeout_char=1000)  # 使用给定参数初始化 timeout_char是以毫秒计的等待字符间的超时时长


class ctrl_info(object):
    WorkMode = 0x03  # 色块检测模式  0x01为固定单颜色识别  0x02为自主学习颜色识别  0x03 巡线
    Threshold_index = 0x00  # 阈值编号


ctrl = ctrl_info()  # 定义控制信息类
single_blob.InitSuccess_LED()  # 初始化完成 Green LED 快闪2下
'''-----------------------------------------------初始化分割线------------------------------------------------'''

while (True):

    clock.tick()  # 追踪时钟
    img = sensor.snapshot()  # thresholds为阈值元组0
    if ctrl.WorkMode == 0x01:
        single_blob.check_blob(img, ctrl, thresholds, threshold_index, uart)
    elif ctrl.WorkMode == 0x02:
        a = 0  # 暂时为空
    elif ctrl.WorkMode == 0x03:
        find_line.find_line(img, ctrl, uart)
Пример #18
0
sensor.set_contrast(3)
clock = time.clock()

thresholds = (0, 80)  # looking for dark blobs
sensor.set_windowing((40, 20, 80, 80))  # looking at the center only

smoothing_steps = 32
smothing_index = 0
smoothing_values = [0.0 for x in range(smoothing_steps)]
initial_value = 66.6  # random value
initial_value_set = False
pluspi_value = 7.8
image_count = 0
while (True):
    clock.tick()
    img_orig = sensor.snapshot()

    img = img_orig.copy()

    # hide the center
    img.draw_circle(40, 40, 15, color=255, thickness=2, fill=True)

    line_coord = []
    elongation = []

    # look for the 2 sides of the clock hand
    for blob in img.find_blobs([thresholds],
                               pixels_threshold=20,
                               area_threshold=30,
                               merge=True,
                               margin=1):
Пример #19
0
# Transform. https://en.wikipedia.org/wiki/Circle_Hough_Transform
#
# Note that the find_circles() method will only find circles which are completely
# inside of the image. Circles which go outside of the image/roi are ignored...

import sensor, image, time

sensor.reset()
sensor.set_pixformat(sensor.RGB565) # grayscale is faster
sensor.set_framesize(sensor.QQVGA)
sensor.skip_frames(time = 2000)
clock = time.clock()

while(True):
    clock.tick()
    img = sensor.snapshot().lens_corr(1.8)

    # Circle objects have four values: x, y, r (radius), and magnitude. The
    # magnitude is the strength of the detection of the circle. Higher is
    # better...

    # `threshold` controls how many circles are found. Increase its value
    # to decrease the number of circles detected...

    # `x_margin`, `y_margin`, and `r_margin` control the merging of similar
    # circles in the x, y, and r (radius) directions.

    for c in img.find_circles(threshold = 2000, x_margin = 10, y_margin = 10, r_margin = 10):
        img.draw_circle(c.x(), c.y(), c.r(), color = (255, 0, 0))
        print(c)
# Loop
###########

old_time = pyb.millis()

throttle_old_result = None
throttle_i_output = 0
throttle_output = THROTTLE_OFFSET

steering_old_result = None
steering_i_output = 0
steering_output = STEERING_OFFSET

while True:
    clock.tick()
    img = sensor.snapshot().histeq()

    if BINARY_VIEW: img = img.binary(COLOR_THRESHOLDS if COLOR_LINE_FOLLOWING else GRAYSCALE_THRESHOLDS)
    if BINARY_VIEW: img.erode(1, threshold = 3).dilate(1, threshold = 1)
    if DO_NOTHING: continue

    # We call get regression below to get a robust linear regression of the field of view.
    # This returns a line object which we can use to steer the robocar.
    line = img.get_regression(([(50, 100, -128, 127, -128, 127)] if BINARY_VIEW else COLOR_THRESHOLDS) if COLOR_LINE_FOLLOWING \
        else ([(127, 255)] if BINARY_VIEW else GRAYSCALE_THRESHOLDS), \
        area_threshold = AREA_THRESHOLD, pixels_threshold = PIXELS_THRESHOLD, \
        robust = True)

    print_string = ""
    if line and (line.magnitude() >= MAG_THRESHOLD):
        img.draw_line(line.line(), color = (127, 127, 127) if COLOR_LINE_FOLLOWING else 127)
# stages.
face_cascade = image.HaarCascade("frontalface", stages=25)

while(True):

    pyb.LED(RED_LED_PIN).on()
    print("About to start detecting faces...")
    sensor.skip_frames(time = 2000) # Give the user time to get ready.

    pyb.LED(RED_LED_PIN).off()
    print("Now detecting faces!")
    pyb.LED(BLUE_LED_PIN).on()

    diff = 10 # We'll say we detected a face after 10 frames.
    while(diff):
        img = sensor.snapshot()
        # Threshold can be between 0.0 and 1.0. A higher threshold results in a
        # higher detection rate with more false positives. The scale value
        # controls the matching scale allowing you to detect smaller faces.
        faces = img.find_features(face_cascade, threshold=0.5, scale_factor=1.5)

        if faces:
            diff -= 1
            for r in faces:
                img.draw_rectangle(r)

    m = mjpeg.Mjpeg("example-%d.mjpeg" % pyb.rng())

    clock = time.clock() # Tracks FPS.
    print("You're on camera!")
    for i in range(200):
Пример #22
0
# Grayscale Light Removal
#
# This example shows off how to remove bright lights from the image.
# You can do this using the binary() method with the "zero=" argument.
#
# Removing bright lights from the image allows you to now use
# histeq() on the image without outliers from oversaturated
# parts of the image breaking the algorithm...

import sensor, image, time

sensor.reset()  # Initialize the camera sensor.
sensor.set_pixformat(sensor.GRAYSCALE)  # or sensor.RGB565
sensor.set_framesize(sensor.QQVGA)  # or sensor.QVGA (or others)
sensor.skip_frames(time=2000)  # Let new settings take affect.
clock = time.clock()  # Tracks FPS.

thresholds = (220, 255)

while (True):
    clock.tick()  # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot().binary([thresholds], invert=False, zero=True)

    print(clock.fps())  # Note: Your OpenMV Cam runs about half as fast while
    # connected to your computer. The FPS should increase once disconnected.
# up processing at the expense of accuracy. The frontalface HaarCascade has 25
# stages.
face_cascade = image.HaarCascade("frontalface", stages=25)

while(True):

    pyb.LED(RED_LED_PIN).on()
    print("About to start detecting faces...")
    sensor.skip_frames(60) # Give the user time to get ready.

    pyb.LED(RED_LED_PIN).off()
    print("Now detecting faces!")
    pyb.LED(BLUE_LED_PIN).on()

    diff = 10 # We'll say we detected a face after 10 frames.
    while(diff):
        img = sensor.snapshot()
        # Threshold can be between 0.0 and 1.0. A higher threshold results in a
        # higher detection rate with more false positives. The scale value
        # controls the matching scale allowing you to detect smaller faces.
        faces = img.find_features(face_cascade, threshold=0.5, scale=1.5)

        if faces:
            diff -= 1
            for r in faces:
                img.draw_rectangle(r)

    pyb.LED(BLUE_LED_PIN).off()
    print("Face detected! Saving image...")
    sensor.snapshot().save("snapshot-%d.jpg" % pyb.rng()) # Save Pic.
Пример #24
0
        continue

sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA) #QVGA=320x240
sensor.run(1)

task = kpu.load(0x300000) # Load Model File from Flash
anchor = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275, 6.718375, 9.01025)
# Anchor data is for bbox, extracted from the training sets.
kpu.init_yolo2(task, 0.5, 0.3, 5, anchor)

but_stu = 1

try:
    while(True):
        img = sensor.snapshot() # Take an image from sensor
        bbox = kpu.run_yolo2(task, img) # Run the detection routine
        if bbox:
            for i in bbox:
                print(i)
                img.draw_rectangle(i.rect())
        #lcd.display(img)

        if but_a.value() == 0 and but_stu == 1:
            if led_w.value() == 1:
                led_w.value(0)
            else:
                led_w.value(1)
            but_stu = 0
        if but_a.value() == 1 and but_stu == 0:
            but_stu = 1
Пример #25
0
def draw_keypoints(img, kpts):
    if kpts:
        print(kpts)
        img.draw_keypoints(kpts)
        img = sensor.snapshot()
        time.sleep_ms(1000)
import sensor, image, pyb

RED_LED_PIN = 1
BLUE_LED_PIN = 3

sensor.reset()  # Initialize the camera sensor.
sensor.set_pixformat(sensor.GRAYSCALE)  # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QVGA)  # or sensor.QQVGA (or others)
sensor.set_windowing((100, 60))
sensor.skip_frames(time=2000)  # Let new settings take affect.

led = pyb.LED(1)
led2 = pyb.LED(2)
led3 = pyb.LED(3)

sensor.skip_frames(time=6000)  # Give the user time to get ready.

print("You're on camera!")
sensor.snapshot().save("exampleNUEVOH2EF.jpg")  # or "example.bmp" (or others)

pyb.LED(BLUE_LED_PIN).off()
print("Done! Reset the camera to see the saved image.")
Пример #27
0
def jpeg_snapshot(data):
    sensor.set_pixformat(sensor.RGB565)
    sensor.set_framesize(sensor.QVGA)
    return sensor.snapshot().compress(quality=90).bytearray()
sensor.reset()  # 初始化sensor

sensor.set_pixformat(sensor.RGB565)  # use RGB565.
#设置图像色彩格式,有RGB565色彩图和GRAYSCALE灰度图两种

sensor.set_framesize(sensor.QQVGA)  # 使用QQVGA的速度。
#设置图像像素大小

sensor.skip_frames(10)  # 让新的设置生效。
sensor.set_auto_whitebal(False)  # turn this off.
#关闭白平衡。白平衡是默认开启的,在颜色识别中,需要关闭白平衡。
clock = time.clock()  # 跟踪FPS帧率

while (True):
    clock.tick()  # 追踪两个snapshots()之间经过的毫秒数.
    img = sensor.snapshot()  # 拍一张照片并返回图像。

    blobs = img.find_blobs([green_threshold])
    #find_blobs(thresholds, invert=False, roi=Auto),thresholds为颜色阈值,
    #是一个元组,需要用括号[ ]括起来。invert=1,反转颜色阈值,invert=False默认
    #不反转。roi设置颜色识别的视野区域,roi是一个元组, roi = (x, y, w, h),代表
    #从左上顶点(x,y)开始的宽为w高为h的矩形区域,roi不设置的话默认为整个图像视野。
    #这个函数返回一个列表,[0]代表识别到的目标颜色区域左上顶点的x坐标,[1]代表
    #左上顶点y坐标,[2]代表目标区域的宽,[3]代表目标区域的高,[4]代表目标
    #区域像素点的个数,[5]代表目标区域的中心点x坐标,[6]代表目标区域中心点y坐标,
    #[7]代表目标颜色区域的旋转角度(是弧度值,浮点型,列表其他元素是整型),
    #[8]代表与此目标区域交叉的目标个数,[9]代表颜色的编号(它可以用来分辨这个
    #区域是用哪个颜色阈值threshold识别出来的)。
    if blobs:
        #如果找到了目标颜色
        for b in blobs:
Пример #29
0
import sensor, time
sensor.set_pixformat(sensor.RGB565)
clock = time.clock()
while (True):
    clock.tick()
    # take snapshot
    image = sensor.snapshot()
    #get a binary image
    binary  = image.threshold((255, 127, 127),  25)
    # run median filter
    binary.median(3)
    # detect blobs in image
    blobs = binary.find_blobs()
    # draw rectangles around detected blobs
    for r in blobs:
        image.draw_rectangle(r)
    print(clock.fps())
Пример #30
0
if "names.txt" in os.listdir():
    with open("names.txt", 'r') as f:
        record_names = f.read().splitlines()
        print(record_names)

if "ftrs.txt" in os.listdir():
    with open("ftrs.txt", 'r') as f:
        record_ftrs = f.read().split('\n|||||\n')
        record_ftrs.pop()
        print(record_ftrs)

clock = time.clock()  # 初始化系统时钟,计算帧率

while (1):  # 主循环
    # check_key() #按键检测
    img = sensor.snapshot()  #从摄像头获取一张图片
    clock.tick()  #记录时刻,用于计算帧率
    code = kpu.run_yolo2(task_fd, img)  # 运行人脸检测模型,获取人脸坐标位置
    if code:  # 如果检测到人脸
        for i in code:  # 迭代坐标框
            # Cut face and resize to 128x128
            a = img.draw_rectangle(i.rect())  # 在屏幕显示人脸方框
            face_cut = img.cut(i.x(), i.y(), i.w(),
                               i.h())  # 裁剪人脸部分图片到 face_cut
            face_cut_128 = face_cut.resize(128, 128)  # 将裁出的人脸图片 缩放到128 * 128像素
            a = face_cut_128.pix_to_ai()  # 将猜出图片转换为kpu接受的格式
            #a = img.draw_image(face_cut_128, (0,0))
            # Landmark for face 5 points
            fmap = kpu.forward(task_ld, face_cut_128)  # 运行人脸5点关键点检测模型
            plist = fmap[:]  # 获取关键点预测结果
            le = (i.x() + int(plist[0] * i.w() - 10),
THRESHOLD = (0, 100) # Grayscale threshold for dark things...
BINARY_VISIBLE = True # Does binary first so you can see what the linear regression
                      # is being run on... might lower FPS though.

import sensor, image, time

sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.QQQVGA) # 80x60 (4,800 pixels) - O(N^2) max = 2,3040,000.
sensor.skip_frames(time = 2000)     # WARNING: If you use QQVGA it may take seconds
clock = time.clock()                # to process a frame sometimes.

while(True):
    clock.tick()
    img = sensor.snapshot().binary([THRESHOLD]) if BINARY_VISIBLE else sensor.snapshot()

    # Returns a line object similar to line objects returned by find_lines() and
    # find_line_segments(). You have x1(), y1(), x2(), y2(), length(),
    # theta() (rotation in degrees), rho(), and magnitude().
    #
    # magnitude() represents how well the linear regression worked. It means something
    # different for the robust linear regression. In general, the larger the value the
    # better...
    line = img.get_regression([(255,255) if BINARY_VISIBLE else THRESHOLD], robust = True)

    if (line): img.draw_line(line.line(), color = 127)
    print("FPS %f, mag = %s" % (clock.fps(), str(line.magnitude()) if (line) else "N/A"))

# About negative rho values:
#
Пример #32
0
# sensor.set_framesize(sensor.VGA)
sensor.skip_frames(time=2000)  # Wait for settings take effect.
clock = time.clock()  # Create a clock object to track the FPS.

curDir = uos.ilistdir()
iMax = 0
for i in curDir:
    if i[1] == 0x8000:
        fileName = i[0]
        #print(fileName[0:5])
        if fileName[0:3] == 'img':
            if int(fileName[3:-4]) >= iMax:
                iMax = int(fileName[3:-4]) + 1

streamFileName = 'img' + str(iMax) + '.bin'
print(streamFileName)

while (True):
    clock.tick()  # Update the FPS clock.
    img = sensor.snapshot()  # Take a picture and return the image.
    #print(clock.fps())              # Note: OpenMV Cam runs about half as fast when connected
    # to the IDE. The FPS should increase once disconnected.
    #curTimeSec = utime.time()
    #print(utime.localtime(curTimeSec))
    curTime = utime.localtime()
    #n = pyb.rng() / (2 ** 30 - 1)
    n = urandom.getrandbits(10)
    print(curTime[5])
    print(curTime)
    print(n)
Пример #33
0
# CMSIS CNN example.
import sensor, image, time, os

sensor.reset()                          # Reset and initialize the sensor.
sensor.set_contrast(3)
sensor.set_pixformat(sensor.RGB565)     # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA)       # Set frame size to QVGA (320x240)
sensor.set_windowing((200, 200))        # Set 128x128 window.
sensor.skip_frames(time = 100)          # Wait for settings take effect.
sensor.set_auto_gain(False)
sensor.set_auto_exposure(False)

labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']

clock = time.clock()    # Create a clock object to track the FPS.
while(True):
    clock.tick()        # Update the FPS clock.
    img = sensor.snapshot().lens_corr(1.6)  # Take a picture and return the image.
    out = img.classify_object()
    # print label_id:confidence
    #for i in range(0, len(out)):
    #    print("%s:%d "%(labels[i], out[i]), end="")
    max_idx = out.index(max(out))
    print("%s : %0.2f%% "%(labels[max_idx], (out[max_idx]/128)*100))

    #print(clock.fps())             # Note: OpenMV Cam runs about half as fast when connected
                                    # to the IDE. The FPS should increase once disconnected.
Пример #34
0
 def get_image():
     if obj.is_init == False:
         obj.init()
         obj.is_init = True
     return sensor.snapshot()
Пример #35
0
# #!/usr/bin/env python2.7
# import sys, serial, struct
# port = '/dev/ttyACM0'
# sp = serial.Serial(port, baudrate=115200, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE,
#             xonxoff=False, rtscts=False, stopbits=serial.STOPBITS_ONE, timeout=None, dsrdtr=True)
# sp.setDTR(True) # dsrdtr is ignored on Windows.
# sp.write("snap")
# sp.flush()
# size = struct.unpack('<L', sp.read(4))[0]
# img = sp.read(size)
# sp.close()
# 
# with open("img.jpg", "w") as f:
#     f.write(img)

import sensor, image, time, ustruct
from pyb import USB_VCP

usb = USB_VCP()
sensor.reset()                      # Reset and initialize the sensor.
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA)   # Set frame size to QVGA (320x240)
sensor.skip_frames(time = 2000)     # Wait for settings take effect.

while(True):
    cmd = usb.recv(4, timeout=5000)
    if (cmd == b'snap'):
        img = sensor.snapshot().compress()
        usb.send(ustruct.pack("<L", img.size()))
        usb.send(img)
Пример #36
0
if sensor.get_id() == sensor.OV7725:
    sensor.set_hmirror(True)
    sensor.set_vflip(True)
elif sensor.get_id() == sensor.OV5640:
    OV5640AF_Init()

sensor.skip_frames(time=2000)  # Let new settings take affect.

blue_led = LED(1)
KEY = Pin('C13', Pin.IN, Pin.PULL_DOWN)

print("You're on camera!")
keycount = 0
file_count = 0
while (True):
    sensor.snapshot()
    if KEY.value() == 1:
        while KEY.value() == 1:
            blue_led.on()
            sleep(50)
            blue_led.off()
            sleep(50)
            keycount += 1
            if keycount > 3:
                # 长按K1,开始对焦
                if sensor.get_id() == sensor.OV5640:
                    while KEY.value() == 1:
                        blue_led.on()
                        sleep(100)
                        blue_led.off()
                        sleep(100)
Пример #37
0
		# Display on
		self.write_command(0x29)

if __name__ == "__main__":
	import sensor, time
	#from lcd import LCD

	# Reset sensor
	sensor.reset()

	# Sensor settings
	sensor.set_contrast(2)
	sensor.set_brightness(0)
	sensor.set_saturation(2)
	sensor.set_pixformat(sensor.RGB565)

	# LCD resolution (128x160)
	sensor.set_framesize(sensor.QQVGA2)

	# Init LCD
	lcd = LCD()
	lcd.clear(0x00)
	lcd.set_backlight(True)

	clock = time.clock()
	while (True):
		clock.tick()
		# Capture a frame a draw it to LCD
		lcd.write_image(sensor.snapshot())
		print(clock.fps())
Пример #38
0
from pid import PID
rho_pid = PID(p=0.4, i=0)
theta_pid = PID(p=0.001, i=0)
sensor.reset()
sensor.set_vflip(True)
sensor.set_hmirror(True)
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QQQVGA) # 80x60 (4,800 pixels) - O(N^2) max = 2,3040,000.
#sensor.set_windowing([0,20,80,40])
sensor.skip_frames(time = 2000)     # WARNING: If you use QQVGA it may take seconds
clock = time.clock()                # to process a frame sometimes.

while(True):
    clock.tick()
    time.sleep(100)
    img = sensor.snapshot().binary([THRESHOLD])
    line = img.get_regression([(100,100,0,0,0,0)], robust = True)
    if (line):
        rho_err = abs(line.rho())-img.width()/2
        if line.theta()>90:
            theta_err = line.theta()-180
        else:
            theta_err = line.theta()
        img.draw_line(line.line(), color = 127)
        #print(rho_err,line.magnitude(),rho_err)
        if line.magnitude()>8:
            #if -40<b_err<40 and -30<t_err<30:
            rho_output = rho_pid.get_pid(rho_err,1)
            theta_output = theta_pid.get_pid(theta_err,1)
            output = rho_output+theta_output
            #print(output)
# stages.
face_cascade = image.HaarCascade("frontalface", stages=25)

while(True):

    pyb.LED(RED_LED_PIN).on()
    print("About to start detecting faces...")
    sensor.skip_frames(time = 2000) # Give the user time to get ready.

    pyb.LED(RED_LED_PIN).off()
    print("Now detecting faces!")
    pyb.LED(BLUE_LED_PIN).on()

    diff = 10 # We'll say we detected a face after 10 frames.
    while(diff):
        img = sensor.snapshot()
        # Threshold can be between 0.0 and 1.0. A higher threshold results in a
        # higher detection rate with more false positives. The scale value
        # controls the matching scale allowing you to detect smaller faces.
        faces = img.find_features(face_cascade, threshold=0.5, scale_factor=1.5)

        if faces:
            diff -= 1
            for r in faces:
                img.draw_rectangle(r)

    g = gif.Gif("example-%d.gif" % pyb.rng(), loop=True)

    clock = time.clock() # Tracks FPS.
    print("You're on camera!")
    for i in range(100):
Пример #40
0
# Use this script to gather face images for building a TensorFlow dataset. This script automatically
# zooms in the largest face in the field of view which you can then save using the data set editor.

import sensor, image, time

sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(time=2000)

clock = time.clock()

largest_face = None
largest_face_timeout = 0

while (True):
    clock.tick()

    faces = sensor.snapshot().gamma_corr(contrast=1.5).find_features(
        image.HaarCascade("frontalface"))

    if faces:
        largest_face = max(faces, key=lambda f: f[2] * f[3])
        largest_face_timeout = 20

    if largest_face_timeout > 0:
        sensor.get_fb().crop(roi=largest_face)
        largest_face_timeout -= 1

    print(clock.fps())
Пример #41
0
# Linear Polar Mapping Example
#
# This example shows off re-projecting the image using a linear polar
# transformation. Linear polar images are useful in that rotations
# become translations in the X direction and linear changes
# in scale become linear translations in the Y direction.

import sensor, image, time

sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.
clock = time.clock() # Tracks FPS.

while(True):
    clock.tick() # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot().linpolar(reverse=False)

    print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
    # connected to your computer. The FPS should increase once disconnected.
Пример #42
0
# Cartoon Filter
#
# This example shows off a simple cartoon filter on images. The cartoon
# filter works by joining similar pixel areas of an image and replacing
# the pixels in those areas with the area mean.

import sensor, image, time

sensor.reset()
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
sensor.set_framesize(sensor.QVGA) # or QQVGA...
sensor.skip_frames(time = 2000)
clock = time.clock()

while(True):
    clock.tick()

    # seed_threshold controls the maximum area growth of a colored
    # region. Making this larger will merge more pixels.

    # floating_threshold controls the maximum pixel-to-pixel difference
    # when growing a region. Settings this very high will quickly combine
    # all pixels in the image. You should keep this small.

    # cartoon() will grow regions while both thresholds are statisfied...

    img = sensor.snapshot().cartoon(seed_threshold=0.05, floating_thresholds=0.05)

    print(clock.fps())
Пример #43
0
classes = ["sakura"]
print(uos.listdir("/sd/"))

task = kpu.load("/sd/YOLO_best_mAP_75.kmodel")
kpu.set_outputs(
    task, 0, 7, 7, 30
)  #Reshape層の内容に応じて中身を変える必要がある #the actual shape needs to match the last layer shape of your model(before Reshape)

anchor = (0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282,
          3.52778, 9.77052, 9.16828)
kpu.init_yolo2(task, 0.3, 0.3, 5, anchor)
#kpu.init_yolo2(task, 0.8, 0.9, 5, anchor)
print("start")
code = ""
while (True):
    img = sensor.snapshot()  #.rotation_corr(z_rotation=90.0)
    #a = img.pix_to_ai()
    code = kpu.run_yolo2(task, img)
    if code:
        for i in code:
            a = img.draw_rectangle(i.rect(), color=(0, 255, 0))
            a = img.draw_string(i.x(),
                                i.y(),
                                classes[i.classid()],
                                color=(255, 0, 0),
                                scale=3)
        a = lcd.display(img)
    else:
        a = lcd.display(img)
a = kpu.deinit(task)
Пример #44
0
          -1, -1, -1]
# This is a high pass filter kernel. see here for more kernels:
# http://www.fmwconcepts.com/imagemagick/digital_image_filtering.pdf
thresholds = [(100, 255)] # grayscale thresholds

sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.
clock = time.clock() # Tracks FPS.

# On the OV7725 sensor, edge detection can be enhanced
# significantly by setting the sharpness/edge registers.
# Note: This will be implemented as a function later.
if (sensor.get_id() == sensor.OV7725):
    sensor.__write_reg(0xAC, 0xDF)
    sensor.__write_reg(0x8F, 0xFF)

while(True):
    clock.tick() # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot() # Take a picture and return the image.

    img.morph(kernel_size, kernel)
    img.binary(thresholds)

    # Erode pixels with less than 2 neighbors using a 3x3 image kernel
    img.erode(1, threshold = 2)

    print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
    # connected to your computer. The FPS should increase once disconnected.
Пример #45
0
led_control(0)

# configuration step
try:
    processing = True
    interface.loop()
except:
    pass

#stabilisation of the cam
sensor.skip_frames(time=2000)

# save the ref image used for the diff
data_fb = sensor.alloc_extra_fb(img_width, img_height, sensor.RGB565)
ref_img = sensor.alloc_extra_fb(img_width, img_height, sensor_format)
img = sensor.snapshot()
img.remap(data_fb, right=True, upside_down=True)
ref_img.replace(img)

# now add an additional part that will convey the mask info
sensor.set_windowing((int((sensor.width() - img_width) / 2) - 2,
                      int((sensor.height() - img_height) / 2), img_width,
                      img_height + mask_height))

# serve for ever
while True:
    try:
        processing = True
        while not pin4.value():
            pass
        # get the image and undistort it
Пример #46
0
# transpose an image. Note that:
#
# vflip=False, hmirror=False, transpose=False -> 0 degree rotation
# vflip=True,  hmirror=False, transpose=True  -> 90 degree rotation
# vflip=True,  hmirror=True,  transpose=False -> 180 degree rotation
# vflip=False, hmirror=True,  transpose=True  -> 270 degree rotation

import sensor, image, time, pyb

sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(time=2000)
clock = time.clock()

mills = pyb.millis()
counter = 0

while (True):
    clock.tick()

    img = sensor.snapshot().replace(vflip=(counter // 2) % 2,
                                    hmirror=(counter // 4) % 2,
                                    transpose=(counter // 8) % 2)

    if (pyb.millis() > (mills + 1000)):
        mills = pyb.millis()
        counter += 1

    print(clock.fps())
Пример #47
0
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QQVGA)  # use QQVGA for speed.
sensor.set_vflip(True)
sensor.set_hmirror(True)
sensor.set_auto_gain(True)  # do some calibration at the start
sensor.set_auto_whitebal(True)
sensor.skip_frames(
    time=0
)  # When you're inside, set time to 2000 to do a white balance calibration. Outside, this can be 0
sensor.set_auto_gain(
    False)  # now turn off autocalibration before we start color tracking
sensor.set_auto_whitebal(False)

while (True):
    clock.tick()  # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot().histeq(
    )  # Take a picture and return the image. The "histeq()" function does a histogram equalization to compensate for lighting changes
    print("FPS: ", clock.fps())
    centroid_sum = 0
    for r in ROIS:
        blobs = img.find_blobs([thresholds[threshold_index]],
                               roi=r[0:4],
                               merge=True)  # r[0:4] is roi tuple.
        if blobs:
            # Find the index of the blob with the most pixels.
            most_pixels = 0
            largest_blob = 0
            for i in range(len(blobs)):
                if blobs[i].pixels() > most_pixels:
                    most_pixels = blobs[i].pixels()
                    largest_blob = i
#
# This example demonstrates using frame differencing with your OpenMV Cam. It's
# called basic frame differencing because there's no background image update.
# So, as time passes the background image may change resulting in issues.

import sensor, image, pyb, os, time

sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.
sensor.set_auto_whitebal(False) # Turn off white balance.
clock = time.clock() # Tracks FPS.

if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory

print("About to save background image...")
sensor.skip_frames(time = 2000) # Give the user time to get ready.
sensor.snapshot().save("temp/bg.bmp")
print("Saved background image - Now frame differencing!")

while(True):
    clock.tick() # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot() # Take a picture and return the image.

    # Replace the image with the "abs(NEW-OLD)" frame difference.
    img.difference("temp/bg.bmp")

    print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
    # connected to your computer. The FPS should increase once disconnected.
Пример #49
0
def test_image_processing():
    for i in range(0, 50):
        clock.tick()                    # Update the FPS clock.
        img = sensor.snapshot()         # Take a picture and return the image.
        img.find_edges(image.EDGE_CANNY, threshold=(50, 80))
# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64,
# 128x64, and 128x128. If you want a resolution of 32x32 you can create
# it by doing "img.pool(2, 2)" on a 64x64 image.

sensor.reset()                         # Reset and initialize the sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565)
sensor.set_framesize(sensor.B128X128)  # Set frame size to 128x128... (or 128x64)...
sensor.skip_frames(time = 2000)        # Wait for settings take effect.
clock = time.clock()                   # Create a clock object to track the FPS.

# Take from the main frame buffer's RAM to allocate a second frame buffer.
# There's a lot more RAM in the frame buffer than in the MicroPython heap.
# However, after doing this you have a lot less RAM for some algorithms...
# So, be aware that it's a lot easier to get out of RAM issues now.
extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE)
extra_fb.replace(sensor.snapshot())

while(True):
    clock.tick() # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot() # Take a picture and return the image.

    for y in range(0, sensor.height(), BLOCK_H):
        for x in range(0, sensor.width(), BLOCK_W):
            displacement = extra_fb.find_displacement(img, \
                roi = (x, y, BLOCK_W, BLOCK_H), template_roi = (x, y, BLOCK_W, BLOCK_H))

            # Below 0.1 or so (YMMV) and the results are just noise.
            if(displacement.response() > 0.1):
                pixel_x = x + (BLOCK_W//2) + int(displacement.x_translation())
                pixel_y = y + (BLOCK_H//2) + int(displacement.y_translation())
                img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, pixel_x, pixel_y), \

sensor.reset() # 初始化摄像头
sensor.set_pixformat(sensor.GRAYSCALE) # 格式为 RGB565.
sensor.set_framesize(sensor.QVGA) # 使用 QQVGA 速度快一些
sensor.set_windowing(ROI)
sensor.skip_frames(10) # 跳过10帧,使新设置生效
sensor.set_auto_whitebal(False)


clock = time.clock() # 追踪帧率


while(True):
    clock.tick() # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot() # 从感光芯片获得一张图像
    pyb.LED(BLUE_LED_PIN).on()
    blobs = img.find_blobs([thresholds],pixel_threshold=3,threshold=3)


    if blobs:
      for b in blobs:
          img.draw_rectangle(b[0:4])
          img.draw_cross(b[5], b[6])

    H_xAngle=b[5]>>8&0xffffffff
    L_xAngle=b[5]&0xffffffff
    H_yAngle=b[6]>>8&0xffffffff
    L_yAngle=b[6]&0xffffffff
    uart_buf = bytearray([0x41,0x42,L_xAngle,H_xAngle,L_yAngle,H_yAngle,0x0d,0x0a])
    uart.write(uart_buf)
Пример #52
0
def person_detection(data):
    global net
    sensor.set_pixformat(sensor.RGB565)
    sensor.set_framesize(sensor.QVGA)
    scores = net.classify(sensor.snapshot())[0].output()
    return labels[scores.index(max(scores))].encode()
Пример #53
0
sensor.reset()

# Set sensor settings
sensor.set_brightness(0)
sensor.set_saturation(0)
sensor.set_gainceiling(16)
sensor.set_contrast(1)
sensor.set_framesize(sensor.QVGA)

# Enable JPEG and set quality
sensor.set_pixformat(sensor.JPEG)
sensor.set_quality(98)

# Red LED
led = pyb.LED(1)

# Skip a few frames to allow the sensor settle down
# Note: This takes more time when exec from the IDE.
for i in range(0, 30):
    sensor.snapshot()

# Turn on red LED and wait for a second
led.on()
time.sleep(1000)

# Write JPEG image to file
with open("/test.jpeg", "w") as f:
    f.write(sensor.snapshot())

led.off()
print("Reset the camera to see the saved image.")
sensor.set_hmirror(0)  # Mirrors image
clock = time.clock()
lcd.init()

Found_centerline = False
Found_crossing = False
centerlines = 4
crossingsFound = 0
offCenterCrossing = False
faultcount = 0

while (True):
    lines = []
    crossings = []
    clock.tick()
    img = sensor.snapshot()  # Original picture, in color
    bw_img = img.copy().to_grayscale()
    bw_img.binary([THRESHOLD],
                  invert=True)  # Convert img to binary, black and white
    blobs = bw_img.find_blobs([(120, 256)])  # Find white spots
    middle_line = bw_img.find_blobs([(120, 256)],
                                    roi=(120, 238, 80,
                                         2))  # Used to count tiles

    if Found_centerline == True:
        tmp = img.draw_rectangle((0, 0, 10, 10),
                                 color=(0, 213, 140),
                                 fill=True)
    else:
        tmp = img.draw_rectangle((0, 0, 10, 10),
                                 color=(213, 140, 0),
Пример #55
0
X_OFFSET = 0
Y_OFFSET = 0

ZOOM_AMOUNT = 1 # Lower zooms out - Higher zooms in

sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(time = 2000)
clock = time.clock()

x_rotation_counter = 0
y_rotation_counter = 0
z_rotation_counter = 0

while(True):
    clock.tick()

    img = sensor.snapshot().rotation_corr(x_rotation = x_rotation_counter, \
                                          y_rotation = y_rotation_counter, \
                                          z_rotation = z_rotation_counter, \
                                          x_translation = X_OFFSET, \
                                          y_translation = Y_OFFSET, \
                                          zoom = ZOOM_AMOUNT)

    x_rotation_counter += X_ROTATION_DEGREE_RATE
    y_rotation_counter += Y_ROTATION_DEGREE_RATE
    z_rotation_counter += Z_ROTATION_DEGREE_RATE

    print(clock.fps())
sensor.reset()  # Initialize the camera sensor.
sensor.set_pixformat(sensor.RGB565)  # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QVGA)  # or sensor.QQVGA (or others)
sensor.skip_frames(time=2000)  # Let new settings take affect.
sensor.set_auto_whitebal(False)  # Turn off white balance.
clock = time.clock()  # Tracks FPS.

# Take from the main frame buffer's RAM to allocate a second frame buffer.
# There's a lot more RAM in the frame buffer than in the MicroPython heap.
# However, after doing this you have a lot less RAM for some algorithms...
# So, be aware that it's a lot easier to get out of RAM issues now. However,
# frame differencing doesn't use a lot of the extra space in the frame buffer.
# But, things like AprilTags do and won't work if you do this...
extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(),
                                 sensor.RGB565)

print("About to save background image...")
sensor.skip_frames(time=2000)  # Give the user time to get ready.
extra_fb.replace(sensor.snapshot())
print("Saved background image!")

while (True):
    clock.tick()  # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot()  # Take a picture and return the image.
    sim = img.get_similarity(extra_fb)
    change = "- Change -" if sim.min(
    ) < MIN_TRIGGER_THRESHOLD else "- No Change -"

    print(clock.fps(), change, sim)
Пример #57
0
# Image Reader Example
#
# USE THIS EXAMPLE WITH A USD CARD!
#
# This example shows how to use the Image Reader object to replay snapshots of what your
# OpenMV Cam saw saved by the Image Writer object for testing machine vision algorithms.

import sensor, image, time

snapshot_source = False # Set to true once finished to pull data from sensor.

sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QQVGA)
sensor.skip_frames(time = 2000)
clock = time.clock()

img_reader = None if snapshot_source else image.ImageReader("/stream.bin")

while(True):
    clock.tick()
    img = sensor.snapshot() if snapshot_source else img_reader.next_frame(copy_to_fb=True, loop=True)
    # Do machine vision algorithms on the image here.

    print(clock.fps())
for i, color in enumerate(palette_source_colors):
    palette_source_color_image[i] = color

# Scale the image to palette width and smooth them
palette = image.Image(256, 1, sensor.RGB565)
palette.draw_image(palette_source_color_image,
                   0,
                   0,
                   x_scale=palette.width() /
                   palette_source_color_image.width())
palette.mean(int(palette.width() / palette_source_color_image.width() / 2))

while (True):
    clock.tick()

    img = sensor.snapshot()
    # Get a copy of grayscale image before converting to color
    img_copy = img.copy()

    img.to_rgb565()

    palette_boundary_inset = int(sensor.width() / 40)
    palette_scale_x = (sensor.width() -
                       palette_boundary_inset * 2) / palette.width()

    img.draw_image(img_copy, 0, 0, color_palette=palette)
    img.draw_image(palette,
                   palette_boundary_inset,
                   palette_boundary_inset,
                   x_scale=palette_scale_x,
                   y_scale=8)
Пример #59
0
# Codes are or'ed together when "merge=True" for "find_blobs".

sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False) # must be turned off for color tracking
sensor.set_auto_whitebal(False) # must be turned off for color tracking
clock = time.clock()

# Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are
# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the
# camera resolution. "merge=True" must be set to merge overlapping color blobs for color codes.

while(True):
    clock.tick()
    img = sensor.snapshot()
    for blob in img.find_blobs(thresholds, pixels_threshold=100, area_threshold=100, merge=True):
        if blob.code() == 3: # r/g code == (1 << 1) | (1 << 0)
            # These values depend on the blob not being circular - otherwise they will be shaky.
            if blob.elongation() > 0.5:
                img.draw_edges(blob.min_corners(), color=(255,0,0))
                img.draw_line(blob.major_axis_line(), color=(0,255,0))
                img.draw_line(blob.minor_axis_line(), color=(0,0,255))
            # These values are stable all the time.
            img.draw_rectangle(blob.rect())
            img.draw_cross(blob.cx(), blob.cy())
            # Note - the blob rotation is unique to 0-180 only.
            img.draw_keypoints([(blob.cx(), blob.cy(), int(math.degrees(blob.rotation())))], size=20)
    print(clock.fps())
Пример #60
0
disp_drv.flush_cb = lv_h.flush
disp_drv.hor_res = 320
disp_drv.ver_res = 240
lv.disp_drv_register(disp_drv)

indev_drv = lv.indev_drv_t()
lv.indev_drv_init(indev_drv)
indev_drv.type = lv.INDEV_TYPE.POINTER
indev_drv.read_cb = lv_h.read
lv.indev_drv_register(indev_drv)

# lv.log_register_print_cb(lv_h.log)
lv.log_register_print_cb(
    lambda level, path, line, msg: print('%s(%d): %s' % (path, line, msg)))

snapshot = sensor.snapshot()

# Create a screen with a draggable image

scr = lv.obj()
img = lv.img(scr)
img_data = snapshot.to_bytes()
img.align(scr, lv.ALIGN.CENTER, 0, 0)
img_dsc = lv.img_dsc_t({
    'header': {
        'always_zero': 0,
        'w': snapshot.width(),
        'h': snapshot.height(),
        'cf': lv.img.CF.TRUE_COLOR
    },
    'data_size': len(img_data),